metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "next_occultation.py",
"repo_name": "rodluger/planetplanet",
"repo_path": "planetplanet_extracted/planetplanet-master/scripts/next_occultation.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
next_occultation.py |github|
----------------------------
Compute the time of the next occultation of a given planet and plot its light curve.
.. plot::
:align: center
from scripts import next_occultation
next_occultation._test()
This is a **double** occultation of `c`, as `b` goes into retrograde halfway through
the event! The duration is 157 minutes, or nearly 3 hours (!)
.. role:: raw-html(raw)
:format: html
.. |github| replace:: :raw-html:`<a href = "https://github.com/rodluger/planetplanet/blob/master/scripts/next_occultation.py"><i class="fa fa-github" aria-hidden="true"></i></a>`
'''
from __future__ import division, print_function, absolute_import, \
unicode_literals
from planetplanet import Trappist1
from planetplanet.constants import *
import matplotlib.pyplot as pl
import numpy as np
def _test():
'''
'''
plot()
def plot():
'''
'''
# Instantiate the Trappist-1 system
system = Trappist1(sample = True, nbody = True, seed = 1234)
# Get the next 10 occultations of c
# I'm starting the integration on May 26, 2017. The ephemerides
# aren't really accurate given the transit times from Gillon et al. (2017),
# which are from October 2016. But that's ok for this example.
times, _, durations = system.next_occultation(system.c,
occultors = system.b,
noccultations = 10,
tstart = 7900., tend = 8000.)
# Grab the longest one
t = times[np.argmax(durations)]
# Now let's plot the light curve to check it out. Note that we need
# to re-instantiate the system (with the same seed) since the integration
# already carried us past the occultation. We should also integrate it
# from the same `tstart` we used above to get the exact same
# orbital solution.
# Get the light curve up to that point plus a little bit
system = Trappist1(sample = True, nbody = True, seed = 1234)
time = np.arange(7900., t + 0.1, MINUTE)
system.compute(time)
# Now plot just the occultation
system.plot_occultation('c', t)
pl.show()
if __name__ == '__main__':
plot()
|
rodlugerREPO_NAMEplanetplanetPATH_START.@planetplanet_extracted@planetplanet-master@scripts@next_occultation.py@.PATH_END.py
|
{
"filename": "flat_mirror.py",
"repo_name": "mtalapinto/moes",
"repo_path": "feros/optics/flat_mirror.py",
"type": "Python"
}
|
import numpy as np
from . import transform
def flat_out(H, DC, T):
DC_out = np.zeros([len(DC), 3])
H_out = np.zeros([len(DC), 3])
DC = transform.transform(DC, -T)
H_out = transform.transform(H, -T)
H_out[:, 0] = H[:, 0] - (DC[:, 0] / DC[:, 2]) * (H[:, 2])
H_out[:, 1] = H[:, 1] - (DC[:, 1] / DC[:, 2]) * (H[:, 2])
H_out[:, 2] = 0.
# mirror default normal
n0 = np.zeros([len(DC), 3])
n0[:, 2] = 1
n = transform.transform(n0, T)
cosi = DC[:, 0] * n[:, 0] + DC[:, 1] * n[:, 1] + DC[:, 2] * n[:, 2]
DC_out[:, 0] = DC[:, 0] - 2 * cosi * n[:, 0]
DC_out[:, 1] = DC[:, 1] - 2 * cosi * n[:, 1]
DC_out[:, 2] = DC[:, 2] - 2 * cosi * n[:, 2]
DC_out = transform.transform2(DC_out, T)
H_out = transform.transform2(H_out, T)
H_out[:, 0] = H[:, 0] - (DC_out[:, 0] / DC_out[:, 2]) * (H_out[:, 2])
H_out[:, 1] = H[:, 1] - (DC_out[:, 1] / DC_out[:, 2]) * (H_out[:, 2])
H_out[:, 2] = 0.
return H_out, DC_out
|
mtalapintoREPO_NAMEmoesPATH_START.@feros@optics@flat_mirror.py@.PATH_END.py
|
{
"filename": "loader.py",
"repo_name": "ML4GW/amplfi",
"repo_path": "amplfi_extracted/amplfi-main/amplfi/train/data/waveforms/loader.py",
"type": "Python"
}
|
from pathlib import Path
import h5py
import torch
from .sampler import WaveformSampler
def x_per_y(x, y):
return int((x - 1) // y) + 1
class WaveformLoader(WaveformSampler):
"""
Torch module for loading waveforms from disk,
performing train/val/test split, and sampling
them during training.
TODO: modify this to sample waveforms from disk, taking
an index sampler object so that DDP training can sample
different waveforms for each device.
Args:
waveform_file:
Path to the HDF5 file containing the waveforms
val_frac:
Fraction of waveforms to use for validation
"""
def __init__(
self,
*args,
waveform_file: Path,
val_frac: float,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.val_frac = val_frac
self.waveform_file = waveform_file
with h5py.File(waveform_file) as f:
self.num_waveforms = len(f["signals"])
self.waveform_file = waveform_file
(
self.train_waveforms,
self.train_parameters,
) = self.get_train_waveforms()
@property
def num_val_waveforms(self):
"""Total number of validation waveforms across all devices"""
return int(self.val_frac * self.num_waveforms)
@property
def val_waveforms_per_device(self):
"""Number of validation waveforms per device"""
world_size, _ = self.get_world_size_and_rank()
return self.num_val_waveforms // world_size
@property
def num_train_waveforms(self):
"""Total number of training waveforms"""
return self.num_waveforms - self.num_val_waveforms
def load_signals(self, start, stop):
"""
Load signals and parameters of specified indices from the dataset
"""
with h5py.File(self.waveform_file) as f:
signals = torch.Tensor(f["signals"][start:stop])
parameters = {}
for parameter in self.inference_params:
parameters[parameter] = torch.Tensor(f[parameter][start:stop])
return signals, parameters
def get_slice_bounds(self, total, world_size, rank) -> tuple[int, int]:
"""
Determine waveform indices to load for this device
given our rank and world size
"""
per_dev = x_per_y(abs(total), world_size)
start = rank * per_dev
stop = (rank + 1) * per_dev
return start, stop
def get_train_waveforms(
self,
):
"""
Returns train waveforms for this device
"""
world_size, rank = self.get_world_size_and_rank()
start, stop = self.get_slice_bounds(
self.num_train_waveforms, world_size, rank
)
return self.load_signals(start, stop)
def get_val_waveforms(self):
"""
Returns validation waveforms for this device
"""
world_size, rank = self.get_world_size_and_rank()
start, stop = self.get_slice_bounds(
self.num_val_waveforms, world_size, rank
)
# start counting from the back for val waveforms
start, stop = -start, -stop or None
return self.load_signals(start, stop)
def get_test_waveforms(self, f, world_size, rank):
"""
Load test waveforms
"""
return
def slice_waveforms(self, waveforms: torch.Tensor):
"""
Slice waveforms to the desired length;
**NOTE** it is assumed here that waveforms are centered;
"""
center = waveforms.shape[-1] // 2
half = self.waveform_length // 2
start, stop = center - half, center + half
return waveforms[:, start:stop]
def sample(self, X):
"""
Sample method for generating training waveforms
"""
N = X.shape[0]
idx = torch.randperm(len(self.train_waveforms))[:N]
waveforms = self.train_waveforms[idx]
parameters = {}
for k, v in self.train_parameters.items():
parameters[k] = v[idx]
cross, plus = waveforms
polarizations = {"cross": cross, "plus": plus}
return polarizations, parameters
|
ML4GWREPO_NAMEamplfiPATH_START.@amplfi_extracted@amplfi-main@amplfi@train@data@waveforms@loader.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/langchain/tests/integration_tests/evaluation/__init__.py",
"type": "Python"
}
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@langchain@tests@integration_tests@evaluation@__init__.py@.PATH_END.py
|
|
{
"filename": "test_kmeans.py",
"repo_name": "rmjarvis/TreeCorr",
"repo_path": "TreeCorr_extracted/TreeCorr-main/tests/test_kmeans.py",
"type": "Python"
}
|
# Copyright (c) 2003-2024 by Mike Jarvis
#
# TreeCorr is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
import numpy as np
import os
import time
import coord
import treecorr
from test_helper import get_from_wiki, assert_raises, timer
@timer
def test_dessv():
try:
import fitsio
except ImportError:
print('Skip test_dessv, since fitsio not installed')
return
rng = np.random.default_rng(123)
#treecorr.set_omp_threads(1);
get_from_wiki('des_sv.fits')
file_name = os.path.join('data','des_sv.fits')
cat = treecorr.Catalog(file_name, ra_col='ra', dec_col='dec', ra_units='deg', dec_units='deg')
# Use an odd number to make sure we force some of the shuffle bits in InitializeCenters
# to happen.
npatch = 43
field = cat.getNField(max_top=5)
t0 = time.time()
patches, cen = field.run_kmeans(npatch, rng=rng)
t1 = time.time()
print('patches = ',np.unique(patches))
assert len(patches) == cat.ntot
assert min(patches) == 0
assert max(patches) == npatch-1
# Check the returned center to a direct calculation.
xyz = np.array([cat.x, cat.y, cat.z]).T
direct_cen = np.array([xyz[patches==i].mean(axis=0) for i in range(npatch)])
direct_cen /= np.sqrt(np.sum(direct_cen**2,axis=1)[:,np.newaxis])
np.testing.assert_allclose(cen, direct_cen, atol=1.e-3)
# KMeans minimizes the total inertia.
# Check this value and the rms size, which should also be quite small.
inertia = np.array([np.sum((xyz[patches==i] - cen[i])**2) for i in range(npatch)])
sizes = np.array([np.mean((xyz[patches==i] - cen[i])**2) for i in range(npatch)])**0.5
sizes *= 180. / np.pi * 60. # convert to arcmin
counts = np.array([np.sum(patches==i) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
print('mean size = ',np.mean(sizes))
print('rms size = ',np.std(sizes))
assert np.sum(inertia) < 200. # This is specific to this particular field and npatch.
print(np.std(inertia)/np.mean(inertia))
assert np.std(inertia) < 0.2 * np.mean(inertia) # rms is usually < 0.2 * mean
print(np.std(sizes)/np.mean(sizes))
assert np.std(sizes) < 0.1 * np.mean(sizes) # sizes have even less spread usually.
# Should all have similar number of points. Nothing is required here though.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
t0 = time.time()
patches, cen = field.run_kmeans(npatch, alt=True, rng=rng)
t1 = time.time()
assert len(patches) == cat.ntot
assert min(patches) == 0
assert max(patches) == npatch-1
inertia = np.array([np.sum((xyz[patches==i] - cen[i])**2) for i in range(npatch)])
sizes = np.array([np.mean((xyz[patches==i] - cen[i])**2) for i in range(npatch)])**0.5
sizes *= 180. / np.pi * 60. # convert to arcmin
counts = np.array([np.sum(patches==i) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
print('mean size = ',np.mean(sizes))
print('rms size = ',np.std(sizes))
assert np.sum(inertia) < 200. # Total shouldn't increase much. (And often decreases.)
print(np.std(inertia)/np.mean(inertia))
assert np.std(inertia) < 0.07 * np.mean(inertia) # rms should be even smaller here.
print(np.std(sizes)/np.mean(sizes))
assert np.std(sizes) < 0.06 * np.mean(sizes) # This isn't usually much smaller.
# This doesn't keep the counts as equal as the standard algorithm.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Finally, use a field with lots of top level cells to check the other branch in
# InitializeCenters.
field = cat.getNField(min_top=10)
t0 = time.time()
patches, cen = field.run_kmeans(npatch, rng=rng)
t1 = time.time()
assert len(patches) == cat.ntot
assert min(patches) == 0
assert max(patches) == npatch-1
inertia = np.array([np.sum((xyz[patches==i] - cen[i])**2) for i in range(npatch)])
sizes = np.array([np.mean((xyz[patches==i] - cen[i])**2) for i in range(npatch)])**0.5
sizes *= 180. / np.pi * 60. # convert to arcmin
counts = np.array([np.sum(patches==i) for i in range(npatch)])
# This doesn't give as good an initialization, so these are a bit worse usually.
print('With min_top=10:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
print('mean size = ',np.mean(sizes))
print('rms size = ',np.std(sizes))
assert np.sum(inertia) < 210.
print(np.std(inertia)/np.mean(inertia))
assert np.std(inertia) < 0.26 * np.mean(inertia)
print(np.std(sizes)/np.mean(sizes))
assert np.std(sizes) < 0.08 * np.mean(sizes)
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
@timer
def test_radec():
# Very similar to the above, but with a random set of points, so it will run even
# if the user doesn't have fitsio installed.
# In addition, we add weights to make sure that works.
ngal = 100000
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
w = rng.random(ngal)
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
print('minra = ',np.min(ra) * coord.radians / coord.degrees)
print('maxra = ',np.max(ra) * coord.radians / coord.degrees)
print('mindec = ',np.min(dec) * coord.radians / coord.degrees)
print('maxdec = ',np.max(dec) * coord.radians / coord.degrees)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w)
npatch = 111
field = cat.getNField()
t0 = time.time()
rng = np.random.default_rng(8675309)
p, cen = field.run_kmeans(npatch, rng=rng)
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
# Check the returned center to a direct calculation.
xyz = np.array([cat.x, cat.y, cat.z]).T
direct_cen = np.array([np.average(xyz[p==i], axis=0, weights=w[p==i]) for i in range(npatch)])
direct_cen /= np.sqrt(np.sum(direct_cen**2,axis=1)[:,np.newaxis])
np.testing.assert_allclose(cen, direct_cen, atol=3.e-3)
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('inertia = ',inertia)
print('counts = ',counts)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # This is specific to this particular field and npatch.
print(np.std(inertia)/np.mean(inertia))
assert np.std(inertia) < 0.21 * np.mean(inertia)
# With weights, these aren't actually all that similar. The range is more than a
# factor of 10. I think because it varies whether high weight points happen to be near the
# edges or middles of patches, so the total weight varies when you target having the
# inertias be relatively similar.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
t0 = time.time()
p, cen = field.run_kmeans(npatch, alt=True, rng=rng)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # Total shouldn't increase much. (And often decreases.)
print(np.std(inertia)/np.mean(inertia))
assert np.std(inertia) < 0.09 * np.mean(inertia) # rms should be even smaller here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Finally, use a field with lots of top level cells to check the other branch in
# InitializeCenters.
field = cat.getNField(min_top=10)
t0 = time.time()
p, cen = field.run_kmeans(npatch, rng=rng)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
# This doesn't give as good an initialization, so these are a bit worse usually.
print('With min_top=10:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 210.
print(np.std(inertia)/np.mean(inertia))
assert np.std(inertia) < 0.22 * np.mean(inertia)
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
@timer
def test_3d():
# Like the above, but using x,y,z positions.
ngal = 100000
s = 1.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
z = rng.normal(0,s, (ngal,) )
w = rng.random(ngal) + 1
cat = treecorr.Catalog(x=x, y=y, z=z, w=w)
npatch = 111
field = cat.getNField()
t0 = time.time()
rng = np.random.default_rng(8675309)
p, cen = field.run_kmeans(npatch, rng=rng)
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
xyz = np.array([x, y, z]).T
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('inertia = ',inertia)
print('counts = ',counts)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 33000.
print(np.std(inertia)/np.mean(inertia))
assert np.std(inertia) < 0.13 * np.mean(inertia)
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Should be the same thing with ra, dec, ra
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
r = (x**2 + y**2 + z**2)**0.5
cat2 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', r=r, w=w)
field = cat2.getNField()
t0 = time.time()
p2, cen = field.run_kmeans(npatch, rng=rng)
t1 = time.time()
inertia = np.array([np.sum(w[p2==i][:,None] * (xyz[p2==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p2==i]) for i in range(npatch)])
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 33000.
print(np.std(inertia)/np.mean(inertia))
assert np.std(inertia) < 0.12 * np.mean(inertia)
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
t0 = time.time()
p, cen = field.run_kmeans(npatch, alt=True, rng=rng)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 33000.
print(np.std(inertia)/np.mean(inertia))
assert np.std(inertia) < 0.11 * np.mean(inertia) # rms should be even smaller here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Finally, use a field with lots of top level cells to check the other branch in
# InitializeCenters.
field = cat.getNField(min_top=10)
t0 = time.time()
p, cen = field.run_kmeans(npatch, rng=rng)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
# This doesn't give as good an initialization, so these are a bit worse usually.
print('With min_top=10:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 33000.
print(np.std(inertia)/np.mean(inertia))
assert np.std(inertia) < 0.12 * np.mean(inertia)
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
@timer
def test_2d():
# Like the above, but using x,y positions.
# An additional check here is that this works with other fields besides NField, even though
# in practice NField will alsmost always be the kind of Field used.
ngal = 100000
s = 1.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
w = rng.random(ngal) + 1
g1 = rng.normal(0,s, (ngal,) )
g2 = rng.normal(0,s, (ngal,) )
k = rng.normal(0,s, (ngal,) )
cat = treecorr.Catalog(x=x, y=y, w=w, g1=g1, g2=g2, k=k)
npatch = 111
field = cat.getGField()
t0 = time.time()
rng = np.random.default_rng(8675309)
p, cen = field.run_kmeans(npatch, rng=rng)
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
xy = np.array([x, y]).T
inertia = np.array([np.sum(w[p==i][:,None] * (xy[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('inertia = ',inertia)
print('counts = ',counts)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 5300.
print(np.std(inertia)/np.mean(inertia))
assert np.std(inertia) < 0.2 * np.mean(inertia)
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
t0 = time.time()
p, cen = field.run_kmeans(npatch, alt=True, rng=rng)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xy[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 5300.
print(np.std(inertia)/np.mean(inertia))
assert np.std(inertia) < 0.1 * np.mean(inertia)
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Finally, use a field with lots of top level cells to check the other branch in
# InitializeCenters.
field = cat.getKField(min_top=10)
t0 = time.time()
p, cen = field.run_kmeans(npatch, rng=rng)
t1 = time.time()
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xy[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
# This doesn't give as good an initialization, so these are a bit worse usually.
print('With min_top=10:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 5300.
print(np.std(inertia)/np.mean(inertia))
assert np.std(inertia) < 0.20 * np.mean(inertia)
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
@timer
def test_init_random():
# Test the init=random option
ngal = 100000
s = 1.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
z = rng.normal(0,s, (ngal,) )
cat = treecorr.Catalog(x=x, y=y, z=z)
xyz = np.array([x, y, z]).T
# Skip the refine_centers step.
print('3d with init=random')
npatch = 10
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'random')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
# Use higher max_iter, since random isn't a great initialization.
rng = np.random.default_rng(8675309)
p2, cen2 = field.run_kmeans(npatch, init='random', max_iter=1000, rng=rng)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Use a field with lots of top level cells
print('3d with init=random, min_top=10')
field = cat.getNField(min_top=10)
cen1 = field.kmeans_initialize_centers(npatch, 'random')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='random', max_iter=1000, rng=rng)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Repeat in 2d
print('2d with init=random')
cat = treecorr.Catalog(x=x, y=y)
xy = np.array([x, y]).T
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'random')
assert cen1.shape == (npatch, 2)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xy[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='random', max_iter=1000, rng=rng)
inertia2 = np.array([np.sum((xy[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Repeat in spherical
print('spher with init=random')
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad')
xyz = np.array([cat.x, cat.y, cat.z]).T
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'random')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='random', max_iter=1000, rng=rng)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Check that rng can also be a RandomState
rng = np.random.RandomState(12345)
p2, cen2 = field.run_kmeans(npatch, init='random', max_iter=1000, rng=rng)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
with assert_raises(ValueError):
field.run_kmeans(npatch, init='invalid')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch, init='invalid')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=ngal*2, init='random')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=ngal+1, init='random')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=0, init='random')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=-100, init='random')
# Should be valid to give npatch = 1, although not particularly useful.
cen_1 = field.kmeans_initialize_centers(npatch=1, init='random')
p_1 = field.kmeans_assign_patches(cen_1)
np.testing.assert_equal(p_1, np.zeros(ngal))
# If same number of patches as galaxies, each galaxy gets a patch.
# (This is stupid of course, but check that it doesn't fail.)
# Do this with fewer points though, since it's not particularly fast with N=10^5.
n = 100
cat = treecorr.Catalog(ra=ra[:n], dec=dec[:n], ra_units='rad', dec_units='rad')
field = cat.getNField()
cen_n = field.kmeans_initialize_centers(npatch=n, init='random')
p_n = field.kmeans_assign_patches(cen_n)
np.testing.assert_equal(sorted(p_n), list(range(n)))
@timer
def test_init_kmpp():
# Test the init=random option
ngal = 100000
s = 1.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) )
z = rng.normal(0,s, (ngal,) )
cat = treecorr.Catalog(x=x, y=y, z=z)
xyz = np.array([x, y, z]).T
# Skip the refine_centers step.
print('3d with init=kmeans++')
npatch = 10
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
# Use higher max_iter, since random isn't a great initialization.
rng = np.random.default_rng(8675309)
p2, cen2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000, rng=rng)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Use a field with lots of top level cells
print('3d with init=kmeans++, min_top=10')
field = cat.getNField(min_top=10)
cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000, rng=rng)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Repeat in 2d
print('2d with init=kmeans++')
cat = treecorr.Catalog(x=x, y=y)
xy = np.array([x, y]).T
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
assert cen1.shape == (npatch, 2)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xy[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000, rng=rng)
inertia2 = np.array([np.sum((xy[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Repeat in spherical
print('spher with init=kmeans++')
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad')
xyz = np.array([cat.x, cat.y, cat.z]).T
field = cat.getNField()
cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++')
assert cen1.shape == (npatch, 3)
p1 = field.kmeans_assign_patches(cen1)
print('patches = ',np.unique(p1))
assert len(p1) == cat.ntot
assert min(p1) == 0
assert max(p1) == npatch-1
inertia1 = np.array([np.sum((xyz[p1==i] - cen1[i])**2) for i in range(npatch)])
counts1 = np.array([np.sum(p1==i) for i in range(npatch)])
print('counts = ',counts1)
print('rms counts = ',np.std(counts1))
print('total inertia = ',np.sum(inertia1))
# Now run the normal way
p2, cen2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000, rng=rng)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Check that rng can also be a RandomState
rng = np.random.RandomState(12345)
p2, cen2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000, rng=rng)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
# Also via GField and KField
rng = np.random.RandomState(12345)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', rng=rng,
g1=np.zeros_like(ra), g2=np.zeros_like(ra), k=np.zeros_like(ra))
field = cat.getGField()
p2, cen2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000, rng=rng)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
field = cat.getKField()
p2, cen2 = field.run_kmeans(npatch, init='kmeans++', max_iter=1000, rng=rng)
inertia2 = np.array([np.sum((xyz[p2==i] - cen2[i])**2) for i in range(npatch)])
counts2 = np.array([np.sum(p2==i) for i in range(npatch)])
print('rms counts => ',np.std(counts2))
print('total inertia => ',np.sum(inertia2))
assert np.sum(inertia2) < np.sum(inertia1)
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=ngal*2, init='kmeans++')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=ngal+1, init='kmeans++')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=0, init='kmeans++')
with assert_raises(ValueError):
field.kmeans_initialize_centers(npatch=-100, init='kmeans++')
# Should be valid to give npatch = 1, although not particularly useful.
cen_1 = field.kmeans_initialize_centers(npatch=1, init='kmeans++')
p_1 = field.kmeans_assign_patches(cen_1)
np.testing.assert_equal(p_1, np.zeros(ngal))
# If same number of patches as galaxies, each galaxy gets a patch.
# (This is stupid of course, but check that it doesn't fail.)
# Do this with fewer points though, since it's not particularly fast with N=10^5.
n = 100
cat = treecorr.Catalog(ra=ra[:n], dec=dec[:n], ra_units='rad', dec_units='rad')
field = cat.getNField()
cen_n = field.kmeans_initialize_centers(npatch=n, init='kmeans++')
p_n = field.kmeans_assign_patches(cen_n)
np.testing.assert_equal(sorted(p_n), list(range(n)))
@timer
def test_zero_weight():
# Based on test_ra_dec, but where many galaxies have w=0.
# There used to be a bug where w=0 objects were not assigned to any patch.
ngal = 10000
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
w = np.zeros(ngal)
w[np.random.choice(range(ngal), ngal//10, replace=False)] = 1.0
ra, dec = coord.CelestialCoord.xyz_to_radec(x,y,z)
print('minra = ',np.min(ra) * coord.radians / coord.degrees)
print('maxra = ',np.max(ra) * coord.radians / coord.degrees)
print('mindec = ',np.min(dec) * coord.radians / coord.degrees)
print('maxdec = ',np.max(dec) * coord.radians / coord.degrees)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w,
keep_zero_weight=True)
treecorr.set_omp_threads(1)
npatch = 16
field = cat.getNField()
t0 = time.time()
p, c = field.run_kmeans(npatch)
t1 = time.time()
print('patches = ',np.unique(p), t1-t0)
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
print('w>0 patches = ',np.unique(p[w>0]))
print('w==0 patches = ',np.unique(p[w==0]))
assert set(p[w>0]) == set(p[w==0])
@timer
def test_catalog_sphere():
# This follows the same path as test_radec, but using the Catalog API to run kmeans.
ngal = 100000
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
w = rng.random(ngal)
ra, dec, r = coord.CelestialCoord.xyz_to_radec(x,y,z, return_r=True)
print('minra = ',np.min(ra) * coord.radians / coord.degrees)
print('maxra = ',np.max(ra) * coord.radians / coord.degrees)
print('mindec = ',np.min(dec) * coord.radians / coord.degrees)
print('maxdec = ',np.max(dec) * coord.radians / coord.degrees)
npatch = 111
rng = np.random.default_rng(8675309)
cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w, npatch=npatch,
rng=rng)
t0 = time.time()
p = cat.patch
cen = cat.patch_centers
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
# Check the returned center to a direct calculation.
xyz = np.array([cat.x, cat.y, cat.z]).T
direct_cen = np.array([np.average(xyz[p==i], axis=0, weights=w[p==i]) for i in range(npatch)])
direct_cen /= np.sqrt(np.sum(direct_cen**2,axis=1)[:,np.newaxis])
np.testing.assert_allclose(cen, direct_cen, atol=2.e-3)
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('inertia = ',inertia)
print('counts = ',counts)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # This is specific to this particular field and npatch.
print(np.std(inertia)/np.mean(inertia))
assert np.std(inertia) < 0.19 * np.mean(inertia) # rms is usually small mean
# With weights, these aren't actually all that similar. The range is more than a
# factor of 10. I think because it varies whether high weight points happen to be near the
# edges or middles of patches, so the total weight varies when you target having the
# inertias be relatively similar.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
cat2 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w,
npatch=npatch, kmeans_alt=True, rng=rng)
t0 = time.time()
p = cat2.patch
cen = cat2.patch_centers
t1 = time.time()
assert len(p) == cat2.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # Total shouldn't increase much. (And often decreases.)
print(np.std(inertia)/np.mean(inertia))
assert np.std(inertia) < 0.10 * np.mean(inertia) # rms should be even smaller here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check using patch_centers from (ra,dec) -> (ra,dec,r)
cat3 = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad', w=w,
patch_centers=cat2.patch_centers)
np.testing.assert_array_equal(cat2.patch, cat3.patch)
np.testing.assert_array_equal(cat2.patch_centers, cat3.patch_centers)
@timer
def test_catalog_3d():
# With ra, dec, r, the Catalog API should only do patches using RA, Dec.
ngal = 100000
s = 10.
rng = np.random.RandomState(8675309)
x = rng.normal(0,s, (ngal,) )
y = rng.normal(0,s, (ngal,) ) + 100 # Put everything at large y, so smallish angle on sky
z = rng.normal(0,s, (ngal,) )
w = rng.random(ngal)
ra, dec, r = coord.CelestialCoord.xyz_to_radec(x,y,z, return_r=True)
print('minra = ',np.min(ra) * coord.radians / coord.degrees)
print('maxra = ',np.max(ra) * coord.radians / coord.degrees)
print('mindec = ',np.min(dec) * coord.radians / coord.degrees)
print('maxdec = ',np.max(dec) * coord.radians / coord.degrees)
npatch = 111
rng = np.random.default_rng(8675309)
cat = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad', w=w,
npatch=npatch, rng=rng)
t0 = time.time()
p = cat.patch
cen = cat.patch_centers
t1 = time.time()
print('patches = ',np.unique(p))
assert len(p) == cat.ntot
assert min(p) == 0
assert max(p) == npatch-1
# Check the returned center to a direct calculation.
xyz = np.array([cat.x/cat.r, cat.y/cat.r, cat.z/cat.r]).T
print('cen = ',cen)
print('xyz = ',xyz)
direct_cen = np.array([np.average(xyz[p==i], axis=0, weights=w[p==i]) for i in range(npatch)])
direct_cen /= np.sqrt(np.sum(direct_cen**2,axis=1)[:,np.newaxis])
np.testing.assert_allclose(cen, direct_cen, atol=2.e-3)
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With standard algorithm:')
print('time = ',t1-t0)
print('inertia = ',inertia)
print('counts = ',counts)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # This is specific to this particular field and npatch.
print(np.std(inertia)/np.mean(inertia))
assert np.std(inertia) < 0.19 * np.mean(inertia) # rms is usually smaller than the mean
# With weights, these aren't actually all that similar. The range is more than a
# factor of 10. I think because it varies whether high weight points happen to be near the
# edges or middles of patches, so the total weight varies when you target having the
# inertias be relatively similar.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check the alternate algorithm. rms inertia should be lower.
cat2 = treecorr.Catalog(ra=ra, dec=dec, r=r, ra_units='rad', dec_units='rad', w=w,
npatch=npatch, kmeans_alt=True, rng=rng)
t0 = time.time()
p = cat2.patch
cen = cat2.patch_centers
t1 = time.time()
assert len(p) == cat2.ntot
assert min(p) == 0
assert max(p) == npatch-1
inertia = np.array([np.sum(w[p==i][:,None] * (xyz[p==i] - cen[i])**2) for i in range(npatch)])
counts = np.array([np.sum(w[p==i]) for i in range(npatch)])
print('With alternate algorithm:')
print('time = ',t1-t0)
print('total inertia = ',np.sum(inertia))
print('mean inertia = ',np.mean(inertia))
print('rms inertia = ',np.std(inertia))
assert np.sum(inertia) < 200. # Total shouldn't increase much. (And often decreases.)
print(np.std(inertia)/np.mean(inertia))
assert np.std(inertia) < 0.10 * np.mean(inertia) # rms should be even smaller here.
print('mean counts = ',np.mean(counts))
print('min counts = ',np.min(counts))
print('max counts = ',np.max(counts))
# Check using patch_centers from (ra,dec,r) -> (ra,dec)
cat3 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', w=w,
patch_centers=cat2.patch_centers)
np.testing.assert_array_equal(cat2.patch, cat3.patch)
np.testing.assert_array_equal(cat2.patch_centers, cat3.patch_centers)
if __name__ == '__main__':
test_dessv()
test_radec()
test_3d()
test_2d()
test_init_random()
test_init_kmpp()
test_zero_weight()
test_catalog_sphere()
test_catalog_3d()
|
rmjarvisREPO_NAMETreeCorrPATH_START.@TreeCorr_extracted@TreeCorr-main@tests@test_kmeans.py@.PATH_END.py
|
{
"filename": "flat_sky_to_healpix.py",
"repo_name": "threeML/hawc_hal",
"repo_path": "hawc_hal_extracted/hawc_hal-master/hawc_hal/healpix_handling/flat_sky_to_healpix.py",
"type": "Python"
}
|
from builtins import object
import healpy as hp
import numpy as np
import six
from scipy.ndimage import map_coordinates
from astropy.coordinates import Galactic, ICRS
from astropy import units as u
from astropy.coordinates import UnitSphericalRepresentation
from astropy.wcs.utils import wcs_to_celestial_frame
from ..special_values import UNSEEN
from ..interpolation import FastBilinearInterpolation
ORDER = {}
ORDER['nearest-neighbor'] = 0
ORDER['bilinear'] = 1
ORDER['biquadratic'] = 2
ORDER['bicubic'] = 3
COORDSYS = {
'g': Galactic(),
'c': ICRS(),
'icrs': ICRS(),
}
def _parse_coord_system(system):
try:
return COORDSYS[system.lower()]
except KeyError: # pragma: no cover
raise ValueError("Coordinate system %s is not known" % system)
def _convert_world_coordinates(lon_in, lat_in, wcs_in, wcs_out):
frame_in, lon_in_unit, lat_in_unit = wcs_in
wcs_out = wcs_out.celestial
frame_out = wcs_to_celestial_frame(wcs_out)
lon_out_unit = u.Unit(wcs_out.wcs.cunit[0])
lat_out_unit = u.Unit(wcs_out.wcs.cunit[1])
data = UnitSphericalRepresentation(lon_in * lon_in_unit,
lat_in * lat_in_unit)
coords_in = frame_in.realize_frame(data)
coords_out = coords_in.transform_to(frame_out)
lon_out = coords_out.represent_as('unitspherical').lon.to(lon_out_unit).value
lat_out = coords_out.represent_as('unitspherical').lat.to(lat_out_unit).value
return lon_out, lat_out
class FlatSkyToHealpixTransform(object):
"""
A class to perform transformation from a flat sky projection to Healpix optimized to be used for the same
transformation over and over again.
The constructor will pre-compute all needed quantities for the transformation, and the __call__ method just applies
the transformation. This avoids to re-compute the same quantities over and over again.
"""
def __init__(self, wcs_in, coord_system_out, nside, pixels_id, input_shape, order='bilinear', nested=False):
# Look up lon, lat of pixels in output system and convert colatitude theta
# and longitude phi to longitude and latitude.
theta, phi = hp.pix2ang(nside, pixels_id, nested)
lon_out = np.degrees(phi)
lat_out = 90. - np.degrees(theta)
# Convert between celestial coordinates
coord_system_out = _parse_coord_system(coord_system_out)
with np.errstate(invalid='ignore'):
lon_in, lat_in = _convert_world_coordinates(lon_out, lat_out, (coord_system_out, u.deg, u.deg), wcs_in)
# Look up pixels in input system
yinds, xinds = wcs_in.wcs_world2pix(lon_in, lat_in, 0)
self._coords = [xinds, yinds]
# Interpolate
if isinstance(order, six.string_types):
order = ORDER[order]
self._order = order
self._interpolator = FastBilinearInterpolation(input_shape, self._coords)
def __call__(self, data, fill_value=UNSEEN):
# healpix_data = map_coordinates(data, self._coords,
# order=self._order,
# mode='constant', cval=fill_value)
healpix_data = self._interpolator(data)
return healpix_data
|
threeMLREPO_NAMEhawc_halPATH_START.@hawc_hal_extracted@hawc_hal-master@hawc_hal@healpix_handling@flat_sky_to_healpix.py@.PATH_END.py
|
{
"filename": "test_regressioncorrector.py",
"repo_name": "lightkurve/lightkurve",
"repo_path": "lightkurve_extracted/lightkurve-main/tests/correctors/test_regressioncorrector.py",
"type": "Python"
}
|
"""Unit tests for the `RegressionCorrector` class."""
import warnings
import numpy as np
from numpy.testing import assert_almost_equal
import pandas as pd
import pytest
from lightkurve import LightCurve, LightkurveWarning
from lightkurve.correctors import RegressionCorrector, DesignMatrix
def test_regressioncorrector_priors():
"""This test will fit a design matrix containing the column vectors
a=[1, 1] and b=[1, 2] to a light curve with flux=[5, 10].
The best coefficients for this problem are [0, 5] because 0*a + 5*b == flux,
however we will verify that changing the priors will yield different
solutions.
"""
lc1 = LightCurve(flux=[5, 10])
lc2 = LightCurve(flux=[5, 10], flux_err=[1, 1])
design_matrix = DesignMatrix(pd.DataFrame({"a": [1, 1], "b": [1, 2]}))
for dm in [design_matrix, design_matrix.to_sparse()]:
for lc in [lc1, lc2]:
rc = RegressionCorrector(lc)
# No prior
rc.correct(dm)
assert_almost_equal(rc.coefficients, [0, 5])
# Strict prior centered on correct solution
dm.prior_mu = [0, 5]
dm.prior_sigma = [1e-6, 1e-6]
rc.correct(dm)
assert_almost_equal(rc.coefficients, [0, 5])
# Strict prior centered on incorrect solution
dm.prior_mu = [99, 99]
dm.prior_sigma = [1e-6, 1e-6]
rc.correct(dm)
assert_almost_equal(rc.coefficients, [99, 99])
# Wide prior centered on incorrect solution
dm.prior_mu = [9, 9]
dm.prior_sigma = [1e6, 1e6]
rc.correct(dm)
assert_almost_equal(rc.coefficients, [0, 5])
def test_sinusoid_noise():
"""Can we remove simple sinusoid noise added to a flat light curve?"""
size = 100
time = np.linspace(1, 100, size)
true_flux = np.ones(size)
noise = np.sin(time / 5)
# True light curve is flat, i.e. flux=1 at all time steps
true_lc = LightCurve(time=time, flux=true_flux, flux_err=0.1 * np.ones(size))
# Noisy light curve has a sinusoid single added
noisy_lc = LightCurve(time=time, flux=true_flux + noise, flux_err=true_lc.flux_err)
design_matrix = DesignMatrix(
{"noise": noise, "offset": np.ones(len(time))}, name="noise_model"
)
for dm in [design_matrix, design_matrix.to_sparse()]:
# Can we recover the true light curve?
rc = RegressionCorrector(noisy_lc)
corrected_lc = rc.correct(dm)
assert_almost_equal(corrected_lc.normalize().flux, true_lc.flux)
# Can we produce the diagnostic plot?
rc.diagnose()
# Does it work when we set priors?
dm.prior_mu = [0.1, 0.1]
dm.prior_sigma = [1e6, 1e6]
corrected_lc = RegressionCorrector(noisy_lc).correct(dm)
assert_almost_equal(corrected_lc.normalize().flux, true_lc.flux)
# Does it work when `flux_err` isn't available?
noisy_lc = LightCurve(time=time, flux=true_flux + noise)
corrected_lc = RegressionCorrector(noisy_lc).correct(dm)
assert_almost_equal(corrected_lc.normalize().flux, true_lc.flux)
def test_nan_input():
# The following light curves should all raise ValueErrors because of NaNs
with warnings.catch_warnings():
# Instantiating light curves with NaN times will yield a warning
warnings.simplefilter("ignore", LightkurveWarning)
lcs = [
LightCurve(flux=[5, 10], flux_err=[np.nan, 1]),
LightCurve(flux=[np.nan, 10], flux_err=[1, 1]),
]
# Passing these to RegressionCorrector should raise a ValueError
for lc in lcs:
with pytest.raises(ValueError):
RegressionCorrector(lc)
# However, we should be flexible with letting `flux_err` be all-NaNs,
# because it is common for errors to be missing.
lc = LightCurve(flux=[5, 10], flux_err=[np.nan, np.nan])
RegressionCorrector(lc)
def test_zero_fluxerr():
"""Regression test for #668.
Flux uncertainties smaller than or equal to zero (`lc.flux_err <= 0`) will
trigger an invalid or non-finite matrix. We expect `RegressionCorrector`
to detect this and yield a graceful `ValueError`."""
lc = LightCurve(flux=[5, 10], flux_err=[1, 0])
with pytest.raises(ValueError):
RegressionCorrector(lc)
lc = LightCurve(flux=[5, 10], flux_err=[1, -10])
with pytest.raises(ValueError):
RegressionCorrector(lc)
|
lightkurveREPO_NAMElightkurvePATH_START.@lightkurve_extracted@lightkurve-main@tests@correctors@test_regressioncorrector.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "JLBLine/CHIPS_wrappers",
"repo_path": "CHIPS_wrappers_extracted/CHIPS_wrappers-main/chips_wrappers/run_chips/__init__.py",
"type": "Python"
}
|
JLBLineREPO_NAMECHIPS_wrappersPATH_START.@CHIPS_wrappers_extracted@CHIPS_wrappers-main@chips_wrappers@run_chips@__init__.py@.PATH_END.py
|
|
{
"filename": "ionization_cube.py",
"repo_name": "yt-project/yt",
"repo_path": "yt_extracted/yt-main/doc/source/analyzing/ionization_cube.py",
"type": "Python"
}
|
import time
import h5py
import numpy as np
import yt
from yt.utilities.parallel_tools.parallel_analysis_interface import communication_system
@yt.derived_field(
name="IonizedHydrogen", units="", display_name=r"\frac{\rho_{HII}}{\rho_H}"
)
def IonizedHydrogen(field, data):
return data["gas", "HII_Density"] / (
data["gas", "HI_Density"] + data["gas", "HII_Density"]
)
ts = yt.DatasetSeries("SED800/DD*/*.index", parallel=8)
ionized_z = np.zeros(ts[0].domain_dimensions, dtype="float32")
t1 = time.time()
for ds in ts.piter():
z = ds.current_redshift
for g in yt.parallel_objects(ds.index.grids, njobs=16):
i1, j1, k1 = g.get_global_startindex() # Index into our domain
i2, j2, k2 = g.get_global_startindex() + g.ActiveDimensions
# Look for the newly ionized gas
newly_ion = (g["IonizedHydrogen"] > 0.999) & (
ionized_z[i1:i2, j1:j2, k1:k2] < z
)
ionized_z[i1:i2, j1:j2, k1:k2][newly_ion] = z
g.clear_data()
print(f"Iteration completed {time.time() - t1:0.3e}")
comm = communication_system.communicators[-1]
for i in range(ionized_z.shape[0]):
ionized_z[i, :, :] = comm.mpi_allreduce(ionized_z[i, :, :], op="max")
print("Slab % 3i has minimum z of %0.3e" % (i, ionized_z[i, :, :].max()))
t2 = time.time()
print(f"Completed. {t2 - t1:0.3e}")
if comm.rank == 0:
f = h5py.File("IonizationCube.h5", mode="w")
f.create_dataset("/z", data=ionized_z)
|
yt-projectREPO_NAMEytPATH_START.@yt_extracted@yt-main@doc@source@analyzing@ionization_cube.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "simonsobs/socs",
"repo_path": "socs_extracted/socs-main/socs/agents/hwp_encoder/__init__.py",
"type": "Python"
}
|
simonsobsREPO_NAMEsocsPATH_START.@socs_extracted@socs-main@socs@agents@hwp_encoder@__init__.py@.PATH_END.py
|
|
{
"filename": "primitives_gmos_ifu.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/geminidr/gmos/primitives_gmos_ifu.py",
"type": "Python"
}
|
#
# primtives_gmos_ifu.py
# ------------------------------------------------------------------------------
from .primitives_gmos_spect import GMOSSpect
from .primitives_gmos_nodandshuffle import GMOSNodAndShuffle
from . import parameters_gmos_ifu
from recipe_system.utils.decorators import parameter_override, capture_provenance
# ------------------------------------------------------------------------------
@parameter_override
@capture_provenance
class GMOSIFU(GMOSSpect, GMOSNodAndShuffle):
"""
This is the class containing all of the preprocessing primitives
for the GMOSLongslit level of the type hierarchy tree. It inherits all
the primitives from the level above
"""
tagset = {"GEMINI", "GMOS", "SPECT", "IFU"}
def _initialize(self, adinputs, **kwargs):
super()._initialize(adinputs, **kwargs)
self._param_update(parameters_gmos_ifu)
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@geminidr@gmos@primitives_gmos_ifu.py@.PATH_END.py
|
{
"filename": "mond.py",
"repo_name": "mianbreton/pysco",
"repo_path": "pysco_extracted/pysco-master/pysco/mond.py",
"type": "Python"
}
|
"""
Implementation of QUMOND interpolating functions (Famaey & McGaugh, 2021)
This module implements the inner gradient of the QUMOND interpolating function
with the simple, n-family, beta-family, gamma-family, and delta-family
parameterizations.
"""
import numpy as np
import numpy.typing as npt
from numba import njit, prange
import math
@njit(["f4(f4)"], fastmath=True, cache=True)
def nu_simple(y: np.float32) -> np.float32:
"""
QUMOND interpolating function with the simple parameterization.
Parameters
----------
y : np.float32
Argument
Returns
-------
np.float32
Nu function
Examples
--------
>>> from pysco.mond import nu_simple
>>> nu_simple(1)
"""
half = np.float32(0.5)
invfour = np.float32(0.25)
one = np.float32(1)
return half + math.sqrt(invfour + one / y)
# Currently, the option [fastmath = True] generates the following message:
# LLVM ERROR: Symbol not found: __powisf2
# Indicating a bug in the LLVM compiler. Hope this get fixed in the future.
@njit(["f4(f4, i4)"], fastmath=False, cache=True)
def nu_n(y: np.float32, n: int) -> np.float32:
"""
QUMOND n-family interpolating function.
Parameters
----------
y : np.float32
Argument
n : int
Exponent of the n-family parameterization
Returns
-------
np.float32
Nu function
Examples
--------
>>> from pysco.mond import nu_n
>>> nu_n(1, 1)
"""
half = np.float32(0.5)
invfour = np.float32(0.25)
minus_n = np.int32(-n)
inv_n = np.float32(1.0 / n)
return (half + math.sqrt(invfour + y**minus_n)) ** inv_n
@njit(["f4(f4, f4)"], fastmath=True, cache=True)
def nu_beta(y: np.float32, beta: np.float32) -> np.float32:
"""
QUMOND beta-family interpolating function.
Parameters
----------
y : np.float32
Argument
beta : np.int32
Parameter of the beta-family parameterization
Returns
-------
np.float32
Nu function
Examples
--------
>>> from pysco.mond import nu_beta
>>> nu_beta(1, 1)
"""
minus_half = np.float32(-0.5)
one = np.float32(1)
exp_minus_y = math.exp(-y)
nu = beta * exp_minus_y
one_minus_expmy = one - exp_minus_y
if one_minus_expmy > 0:
nu += one_minus_expmy**minus_half
return nu
@njit(["f4(f4, f4)"], fastmath=True, cache=True)
def nu_gamma(y: np.float32, gamma: np.float32) -> np.float32:
"""
QUMOND gamma-family interpolating function.
Parameters
----------
y : np.float32
Argument
gamma : np.float32
Parameter of the gamma-family parameterization
Returns
-------
np.float32
Nu function
Examples
--------
>>> from pysco.mond import nu_gamma
>>> nu_gamma(1, 1)
"""
one = np.float32(1)
half_gamma = np.float32(0.5 * gamma)
inv_gamma = np.float32(gamma ** (-1))
minus_inv_gamma = np.float32(-1.0 / gamma)
exp_minus_y_halfgamma = math.exp(-(y**half_gamma))
return (one - exp_minus_y_halfgamma) ** (minus_inv_gamma) + (
one - inv_gamma
) * exp_minus_y_halfgamma
@njit(["f4(f4, f4)"], fastmath=True, cache=True)
def nu_delta(y: np.float32, delta: np.float32) -> np.float32:
"""
QUMOND delta-family interpolating function.
Parameters
----------
y : np.float32
Argument
delta : np.float32
Parameter of the gamma-family parameterization
Returns
-------
np.float32
Nu function
Examples
--------
>>> from pysco.mond import nu_delta
>>> nu_delta(1, 1)
"""
one = np.float32(1)
half_delta = np.float32(0.5 * delta)
minus_inv_delta = np.float32(-1.0 / delta)
return (one - math.exp(-(y**half_delta))) ** (minus_inv_delta)
@njit(
["void(f4[:,:,::1], f4[:,:,::1], f4)"],
fastmath=True,
cache=True,
parallel=True,
)
def rhs_simple(
potential: npt.NDArray[np.float32], out: npt.NDArray[np.float32], g0: np.float32
) -> None:
"""
This function implements the right-hand side of QUMOND Poisson equation using interpolating function
with the simple parameterization.
Parameters
----------
potential : npt.NDArray[np.float32]
Newtonian Potential field [N, N, N]
out : npt.NDArray[np.float32]
Output array [N, N, N]
g0 : np.float32
Acceleration constant
Examples
--------
>>> from pysco.mond import rhs_simple
>>> phi = np.random.rand(32, 32, 32).astype(np.float32)
>>> out = np.empty_like(phi)
>>> rhs_simple(phi, out, 0.5)
"""
inv_g0 = np.float32(1.0 / g0)
ncells_1d = len(potential)
invh = np.float32(ncells_1d)
inv4h = np.float32(0.25 * ncells_1d)
for i in prange(-1, ncells_1d - 1):
im1 = i - 1
ip1 = i + 1
for j in prange(-1, ncells_1d - 1):
jm1 = j - 1
jp1 = j + 1
for k in prange(-1, ncells_1d - 1):
km1 = k - 1
kp1 = k + 1
potential_000 = potential[i, j, k]
# Point A at -h/2, Point B at +h/2 (same convention as Lüghausen et al. 2014)
# Ax
f_Ax_x = invh * (potential_000 - potential[im1, j, k])
f_Ax_y = inv4h * (
potential[i, jp1, k]
- potential[i, jm1, k]
+ potential[im1, jp1, k]
- potential[im1, jm1, k]
)
f_Ax_z = inv4h * (
potential[i, j, kp1]
- potential[i, j, km1]
+ potential[im1, j, kp1]
- potential[im1, j, km1]
)
f_Ax = math.sqrt(f_Ax_x**2 + f_Ax_y**2 + f_Ax_z**2)
# Bx
f_Bx_x = invh * (-potential_000 + potential[ip1, j, k])
f_Bx_y = inv4h * (
potential[ip1, jp1, k]
- potential[ip1, jm1, k]
+ potential[i, jp1, k]
- potential[i, jm1, k]
)
f_Bx_z = inv4h * (
potential[ip1, j, kp1]
- potential[ip1, j, km1]
+ potential[i, j, kp1]
- potential[i, j, km1]
)
f_Bx = math.sqrt(f_Bx_x**2 + f_Bx_y**2 + f_Bx_z**2)
# Ay
f_Ay_y = invh * (potential_000 - potential[i, jm1, k])
f_Ay_x = inv4h * (
potential[ip1, j, k]
- potential[im1, j, k]
+ potential[ip1, jm1, k]
- potential[im1, jm1, k]
)
f_Ay_z = inv4h * (
potential[i, j, kp1]
- potential[i, j, km1]
+ potential[i, jm1, kp1]
- potential[i, jm1, km1]
)
f_Ay = math.sqrt(f_Ay_x**2 + f_Ay_y**2 + f_Ay_z**2)
# By
f_By_y = invh * (-potential_000 + potential[i, jp1, k])
f_By_x = inv4h * (
potential[ip1, jp1, k]
- potential[im1, jp1, k]
+ potential[ip1, j, k]
- potential[im1, j, k]
)
f_By_z = inv4h * (
potential[i, jp1, kp1]
- potential[i, jp1, km1]
+ potential[i, j, kp1]
- potential[i, j, km1]
)
f_By = math.sqrt(f_By_x**2 + f_By_y**2 + f_By_z**2)
# Az
f_Az_z = invh * (potential_000 - potential[i, j, km1])
f_Az_x = inv4h * (
potential[ip1, j, k]
- potential[im1, j, k]
+ potential[ip1, j, km1]
- potential[im1, j, km1]
)
f_Az_y = inv4h * (
potential[i, jp1, k]
- potential[i, jm1, k]
+ potential[i, jp1, km1]
- potential[i, jm1, km1]
)
f_Az = math.sqrt(f_Az_x**2 + f_Az_y**2 + f_Az_z**2)
# Bz
f_Bz_z = invh * (-potential_000 + potential[i, j, kp1])
f_Bz_x = inv4h * (
potential[ip1, j, kp1]
- potential[im1, j, kp1]
+ potential[ip1, j, k]
- potential[im1, j, k]
)
f_Bz_y = inv4h * (
potential[i, jp1, kp1]
- potential[i, jm1, kp1]
+ potential[i, jp1, k]
- potential[i, jm1, k]
)
f_Bz = math.sqrt(f_Bz_x**2 + f_Bz_y**2 + f_Bz_z**2)
nu_Ax = nu_simple(f_Ax * inv_g0)
nu_Ay = nu_simple(f_Ay * inv_g0)
nu_Az = nu_simple(f_Az * inv_g0)
nu_Bx = nu_simple(f_Bx * inv_g0)
nu_By = nu_simple(f_By * inv_g0)
nu_Bz = nu_simple(f_Bz * inv_g0)
out[i, j, k] = invh * (
nu_Bx * f_Bx_x
- nu_Ax * f_Ax_x
+ nu_By * f_By_y
- nu_Ay * f_Ay_y
+ nu_Bz * f_Bz_z
- nu_Az * f_Az_z
)
@njit(
["void(f4[:,:,::1], f4[:,:,::1], f4, i4)"], fastmath=True, cache=True, parallel=True
)
def rhs_n(
potential: npt.NDArray[np.float32],
out: npt.NDArray[np.float32],
g0: np.float32,
n: int,
) -> None:
"""
This function implements the right-hand side of QUMOND Poisson equation using n-family interpolating function
Parameters
----------
potential : npt.NDArray[np.float32]
Newtonian Potential field [N, N, N]
out : npt.NDArray[np.float32]
Output array [N, N, N]
g0 : np.float32
Acceleration constant
n : int
Exponent of the n-family parameterization
Examples
--------
>>> from pysco.mond import rhs_n
>>> phi = np.random.rand(32, 32, 32).astype(np.float32)
>>> out = np.empty_like(phi)
>>> rhs_n(phi, out, 0.5, 1)
"""
inv_g0 = np.float32(1.0 / g0)
ncells_1d = len(potential)
invh = np.float32(ncells_1d)
inv4h = np.float32(0.25 * ncells_1d)
for i in prange(-1, ncells_1d - 1):
im1 = i - 1
ip1 = i + 1
for j in prange(-1, ncells_1d - 1):
jm1 = j - 1
jp1 = j + 1
for k in prange(-1, ncells_1d - 1):
km1 = k - 1
kp1 = k + 1
potential_000 = potential[i, j, k]
# Point A at -h/2, Point B at +h/2 (same convention as Lüghausen et al. 2014)
# Ax
f_Ax_x = invh * (potential_000 - potential[im1, j, k])
f_Ax_y = inv4h * (
potential[i, jp1, k]
- potential[i, jm1, k]
+ potential[im1, jp1, k]
- potential[im1, jm1, k]
)
f_Ax_z = inv4h * (
potential[i, j, kp1]
- potential[i, j, km1]
+ potential[im1, j, kp1]
- potential[im1, j, km1]
)
f_Ax = math.sqrt(f_Ax_x**2 + f_Ax_y**2 + f_Ax_z**2)
# Bx
f_Bx_x = invh * (-potential_000 + potential[ip1, j, k])
f_Bx_y = inv4h * (
potential[ip1, jp1, k]
- potential[ip1, jm1, k]
+ potential[i, jp1, k]
- potential[i, jm1, k]
)
f_Bx_z = inv4h * (
potential[ip1, j, kp1]
- potential[ip1, j, km1]
+ potential[i, j, kp1]
- potential[i, j, km1]
)
f_Bx = math.sqrt(f_Bx_x**2 + f_Bx_y**2 + f_Bx_z**2)
# Ay
f_Ay_y = invh * (potential_000 - potential[i, jm1, k])
f_Ay_x = inv4h * (
potential[ip1, j, k]
- potential[im1, j, k]
+ potential[ip1, jm1, k]
- potential[im1, jm1, k]
)
f_Ay_z = inv4h * (
potential[i, j, kp1]
- potential[i, j, km1]
+ potential[i, jm1, kp1]
- potential[i, jm1, km1]
)
f_Ay = math.sqrt(f_Ay_x**2 + f_Ay_y**2 + f_Ay_z**2)
# By
f_By_y = invh * (-potential_000 + potential[i, jp1, k])
f_By_x = inv4h * (
potential[ip1, jp1, k]
- potential[im1, jp1, k]
+ potential[ip1, j, k]
- potential[im1, j, k]
)
f_By_z = inv4h * (
potential[i, jp1, kp1]
- potential[i, jp1, km1]
+ potential[i, j, kp1]
- potential[i, j, km1]
)
f_By = math.sqrt(f_By_x**2 + f_By_y**2 + f_By_z**2)
# Az
f_Az_z = invh * (potential_000 - potential[i, j, km1])
f_Az_x = inv4h * (
potential[ip1, j, k]
- potential[im1, j, k]
+ potential[ip1, j, km1]
- potential[im1, j, km1]
)
f_Az_y = inv4h * (
potential[i, jp1, k]
- potential[i, jm1, k]
+ potential[i, jp1, km1]
- potential[i, jm1, km1]
)
f_Az = math.sqrt(f_Az_x**2 + f_Az_y**2 + f_Az_z**2)
# Bz
f_Bz_z = invh * (-potential_000 + potential[i, j, kp1])
f_Bz_x = inv4h * (
potential[ip1, j, kp1]
- potential[im1, j, kp1]
+ potential[ip1, j, k]
- potential[im1, j, k]
)
f_Bz_y = inv4h * (
potential[i, jp1, kp1]
- potential[i, jm1, kp1]
+ potential[i, jp1, k]
- potential[i, jm1, k]
)
f_Bz = math.sqrt(f_Bz_x**2 + f_Bz_y**2 + f_Bz_z**2)
nu_Ax = nu_n(f_Ax * inv_g0, n)
nu_Ay = nu_n(f_Ay * inv_g0, n)
nu_Az = nu_n(f_Az * inv_g0, n)
nu_Bx = nu_n(f_Bx * inv_g0, n)
nu_By = nu_n(f_By * inv_g0, n)
nu_Bz = nu_n(f_Bz * inv_g0, n)
out[i, j, k] = invh * (
nu_Bx * f_Bx_x
- nu_Ax * f_Ax_x
+ nu_By * f_By_y
- nu_Ay * f_Ay_y
+ nu_Bz * f_Bz_z
- nu_Az * f_Az_z
)
@njit(
["void(f4[:,:,::1], f4[:,:,::1], f4, f4)"], fastmath=True, cache=True, parallel=True
)
def rhs_beta(
potential: npt.NDArray[np.float32],
out: npt.NDArray[np.float32],
g0: np.float32,
beta: np.float32,
) -> None:
"""
This function implements the right-hand side of QUMOND Poisson equation using beta-family interpolating function
Parameters
----------
potential : npt.NDArray[np.float32]
Newtonian Potential field [N, N, N]
out : npt.NDArray[np.float32]
Output array [N, N, N]
g0 : np.float32
Acceleration constant
beta : np.float32
Parameter of the beta-family parameterization
Examples
--------
>>> from pysco.mond import rhs_beta
>>> phi = np.random.rand(32, 32, 32).astype(np.float32)
>>> out = np.empty_like(phi)
>>> rhs_beta(phi, out, 0.5, 1)
"""
inv_g0 = np.float32(1.0 / g0)
ncells_1d = len(potential)
invh = np.float32(ncells_1d)
inv4h = np.float32(0.25 * ncells_1d)
for i in prange(-1, ncells_1d - 1):
im1 = i - 1
ip1 = i + 1
for j in prange(-1, ncells_1d - 1):
jm1 = j - 1
jp1 = j + 1
for k in prange(-1, ncells_1d - 1):
km1 = k - 1
kp1 = k + 1
potential_000 = potential[i, j, k]
# Point A at -h/2, Point B at +h/2 (same convention as Lüghausen et al. 2014)
# Ax
f_Ax_x = invh * (potential_000 - potential[im1, j, k])
f_Ax_y = inv4h * (
potential[i, jp1, k]
- potential[i, jm1, k]
+ potential[im1, jp1, k]
- potential[im1, jm1, k]
)
f_Ax_z = inv4h * (
potential[i, j, kp1]
- potential[i, j, km1]
+ potential[im1, j, kp1]
- potential[im1, j, km1]
)
f_Ax = math.sqrt(f_Ax_x**2 + f_Ax_y**2 + f_Ax_z**2)
# Bx
f_Bx_x = invh * (-potential_000 + potential[ip1, j, k])
f_Bx_y = inv4h * (
potential[ip1, jp1, k]
- potential[ip1, jm1, k]
+ potential[i, jp1, k]
- potential[i, jm1, k]
)
f_Bx_z = inv4h * (
potential[ip1, j, kp1]
- potential[ip1, j, km1]
+ potential[i, j, kp1]
- potential[i, j, km1]
)
f_Bx = math.sqrt(f_Bx_x**2 + f_Bx_y**2 + f_Bx_z**2)
# Ay
f_Ay_y = invh * (potential_000 - potential[i, jm1, k])
f_Ay_x = inv4h * (
potential[ip1, j, k]
- potential[im1, j, k]
+ potential[ip1, jm1, k]
- potential[im1, jm1, k]
)
f_Ay_z = inv4h * (
potential[i, j, kp1]
- potential[i, j, km1]
+ potential[i, jm1, kp1]
- potential[i, jm1, km1]
)
f_Ay = math.sqrt(f_Ay_x**2 + f_Ay_y**2 + f_Ay_z**2)
# By
f_By_y = invh * (-potential_000 + potential[i, jp1, k])
f_By_x = inv4h * (
potential[ip1, jp1, k]
- potential[im1, jp1, k]
+ potential[ip1, j, k]
- potential[im1, j, k]
)
f_By_z = inv4h * (
potential[i, jp1, kp1]
- potential[i, jp1, km1]
+ potential[i, j, kp1]
- potential[i, j, km1]
)
f_By = math.sqrt(f_By_x**2 + f_By_y**2 + f_By_z**2)
# Az
f_Az_z = invh * (potential_000 - potential[i, j, km1])
f_Az_x = inv4h * (
potential[ip1, j, k]
- potential[im1, j, k]
+ potential[ip1, j, km1]
- potential[im1, j, km1]
)
f_Az_y = inv4h * (
potential[i, jp1, k]
- potential[i, jm1, k]
+ potential[i, jp1, km1]
- potential[i, jm1, km1]
)
f_Az = math.sqrt(f_Az_x**2 + f_Az_y**2 + f_Az_z**2)
# Bz
f_Bz_z = invh * (-potential_000 + potential[i, j, kp1])
f_Bz_x = inv4h * (
potential[ip1, j, kp1]
- potential[im1, j, kp1]
+ potential[ip1, j, k]
- potential[im1, j, k]
)
f_Bz_y = inv4h * (
potential[i, jp1, kp1]
- potential[i, jm1, kp1]
+ potential[i, jp1, k]
- potential[i, jm1, k]
)
f_Bz = math.sqrt(f_Bz_x**2 + f_Bz_y**2 + f_Bz_z**2)
nu_Ax = nu_beta(f_Ax * inv_g0, beta)
nu_Ay = nu_beta(f_Ay * inv_g0, beta)
nu_Az = nu_beta(f_Az * inv_g0, beta)
nu_Bx = nu_beta(f_Bx * inv_g0, beta)
nu_By = nu_beta(f_By * inv_g0, beta)
nu_Bz = nu_beta(f_Bz * inv_g0, beta)
out[i, j, k] = invh * (
nu_Bx * f_Bx_x
- nu_Ax * f_Ax_x
+ nu_By * f_By_y
- nu_Ay * f_Ay_y
+ nu_Bz * f_Bz_z
- nu_Az * f_Az_z
)
@njit(
["void(f4[:,:,::1], f4[:,:,::1], f4, f4)"], fastmath=True, cache=True, parallel=True
)
def rhs_gamma(
potential: npt.NDArray[np.float32],
out: npt.NDArray[np.float32],
g0: np.float32,
gamma: np.float32,
) -> None:
"""
This function implements the right-hand side of QUMOND Poisson equation using gamma-family interpolating function
Parameters
----------
potential : npt.NDArray[np.float32]
Newtonian Potential field [N, N, N]
out : npt.NDArray[np.float32]
Output array [N, N, N]
g0 : np.float32
Acceleration constant
gamma : np.float32
Parameter of the gamma-family parameterization
Examples
--------
>>> from pysco.mond import rhs_gamma
>>> phi = np.random.rand(32, 32, 32).astype(np.float32)
>>> out = np.empty_like(phi)
>>> rhs_gamma(phi, out, 0.5, 1)
"""
inv_g0 = np.float32(1.0 / g0)
ncells_1d = len(potential)
invh = np.float32(ncells_1d)
inv4h = np.float32(0.25 * ncells_1d)
for i in prange(-1, ncells_1d - 1):
im1 = i - 1
ip1 = i + 1
for j in prange(-1, ncells_1d - 1):
jm1 = j - 1
jp1 = j + 1
for k in prange(-1, ncells_1d - 1):
km1 = k - 1
kp1 = k + 1
potential_000 = potential[i, j, k]
# Point A at -h/2, Point B at +h/2 (same convention as Lüghausen et al. 2014)
# Ax
f_Ax_x = invh * (potential_000 - potential[im1, j, k])
f_Ax_y = inv4h * (
potential[i, jp1, k]
- potential[i, jm1, k]
+ potential[im1, jp1, k]
- potential[im1, jm1, k]
)
f_Ax_z = inv4h * (
potential[i, j, kp1]
- potential[i, j, km1]
+ potential[im1, j, kp1]
- potential[im1, j, km1]
)
f_Ax = math.sqrt(f_Ax_x**2 + f_Ax_y**2 + f_Ax_z**2)
# Bx
f_Bx_x = invh * (-potential_000 + potential[ip1, j, k])
f_Bx_y = inv4h * (
potential[ip1, jp1, k]
- potential[ip1, jm1, k]
+ potential[i, jp1, k]
- potential[i, jm1, k]
)
f_Bx_z = inv4h * (
potential[ip1, j, kp1]
- potential[ip1, j, km1]
+ potential[i, j, kp1]
- potential[i, j, km1]
)
f_Bx = math.sqrt(f_Bx_x**2 + f_Bx_y**2 + f_Bx_z**2)
# Ay
f_Ay_y = invh * (potential_000 - potential[i, jm1, k])
f_Ay_x = inv4h * (
potential[ip1, j, k]
- potential[im1, j, k]
+ potential[ip1, jm1, k]
- potential[im1, jm1, k]
)
f_Ay_z = inv4h * (
potential[i, j, kp1]
- potential[i, j, km1]
+ potential[i, jm1, kp1]
- potential[i, jm1, km1]
)
f_Ay = math.sqrt(f_Ay_x**2 + f_Ay_y**2 + f_Ay_z**2)
# By
f_By_y = invh * (-potential_000 + potential[i, jp1, k])
f_By_x = inv4h * (
potential[ip1, jp1, k]
- potential[im1, jp1, k]
+ potential[ip1, j, k]
- potential[im1, j, k]
)
f_By_z = inv4h * (
potential[i, jp1, kp1]
- potential[i, jp1, km1]
+ potential[i, j, kp1]
- potential[i, j, km1]
)
f_By = math.sqrt(f_By_x**2 + f_By_y**2 + f_By_z**2)
# Az
f_Az_z = invh * (potential_000 - potential[i, j, km1])
f_Az_x = inv4h * (
potential[ip1, j, k]
- potential[im1, j, k]
+ potential[ip1, j, km1]
- potential[im1, j, km1]
)
f_Az_y = inv4h * (
potential[i, jp1, k]
- potential[i, jm1, k]
+ potential[i, jp1, km1]
- potential[i, jm1, km1]
)
f_Az = math.sqrt(f_Az_x**2 + f_Az_y**2 + f_Az_z**2)
# Bz
f_Bz_z = invh * (-potential_000 + potential[i, j, kp1])
f_Bz_x = inv4h * (
potential[ip1, j, kp1]
- potential[im1, j, kp1]
+ potential[ip1, j, k]
- potential[im1, j, k]
)
f_Bz_y = inv4h * (
potential[i, jp1, kp1]
- potential[i, jm1, kp1]
+ potential[i, jp1, k]
- potential[i, jm1, k]
)
f_Bz = math.sqrt(f_Bz_x**2 + f_Bz_y**2 + f_Bz_z**2)
nu_Ax = nu_gamma(f_Ax * inv_g0, gamma)
nu_Ay = nu_gamma(f_Ay * inv_g0, gamma)
nu_Az = nu_gamma(f_Az * inv_g0, gamma)
nu_Bx = nu_gamma(f_Bx * inv_g0, gamma)
nu_By = nu_gamma(f_By * inv_g0, gamma)
nu_Bz = nu_gamma(f_Bz * inv_g0, gamma)
out[i, j, k] = invh * (
nu_Bx * f_Bx_x
- nu_Ax * f_Ax_x
+ nu_By * f_By_y
- nu_Ay * f_Ay_y
+ nu_Bz * f_Bz_z
- nu_Az * f_Az_z
)
@njit(
["void(f4[:,:,::1], f4[:,:,::1], f4, f4)"], fastmath=True, cache=True, parallel=True
)
def rhs_delta(
potential: npt.NDArray[np.float32],
out: npt.NDArray[np.float32],
g0: np.float32,
delta: np.float32,
) -> None:
"""
This function implements the right-hand side of QUMOND Poisson equation using delta-family interpolating function
Parameters
----------
potential : npt.NDArray[np.float32]
Newtonian Potential field [N, N, N]
out : npt.NDArray[np.float32]
Output array [N, N, N]
g0 : np.float32
Acceleration constant
delta : np.float32
Parameter of the delta-family parameterization
Examples
--------
>>> from pysco.mond import rhs_delta
>>> phi = np.random.rand(32, 32, 32).astype(np.float32)
>>> out = np.empty_like(phi)
>>> rhs_delta(phi, out, 0.5, 1)
"""
inv_g0 = np.float32(1.0 / g0)
ncells_1d = len(potential)
invh = np.float32(ncells_1d)
inv4h = np.float32(0.25 * ncells_1d)
for i in prange(-1, ncells_1d - 1):
im1 = i - 1
ip1 = i + 1
for j in prange(-1, ncells_1d - 1):
jm1 = j - 1
jp1 = j + 1
for k in prange(-1, ncells_1d - 1):
km1 = k - 1
kp1 = k + 1
potential_000 = potential[i, j, k]
# Point A at -h/2, Point B at +h/2 (same convention as Lüghausen et al. 2014)
# Ax
f_Ax_x = invh * (potential_000 - potential[im1, j, k])
f_Ax_y = inv4h * (
potential[i, jp1, k]
- potential[i, jm1, k]
+ potential[im1, jp1, k]
- potential[im1, jm1, k]
)
f_Ax_z = inv4h * (
potential[i, j, kp1]
- potential[i, j, km1]
+ potential[im1, j, kp1]
- potential[im1, j, km1]
)
f_Ax = math.sqrt(f_Ax_x**2 + f_Ax_y**2 + f_Ax_z**2)
# Bx
f_Bx_x = invh * (-potential_000 + potential[ip1, j, k])
f_Bx_y = inv4h * (
potential[ip1, jp1, k]
- potential[ip1, jm1, k]
+ potential[i, jp1, k]
- potential[i, jm1, k]
)
f_Bx_z = inv4h * (
potential[ip1, j, kp1]
- potential[ip1, j, km1]
+ potential[i, j, kp1]
- potential[i, j, km1]
)
f_Bx = math.sqrt(f_Bx_x**2 + f_Bx_y**2 + f_Bx_z**2)
# Ay
f_Ay_y = invh * (potential_000 - potential[i, jm1, k])
f_Ay_x = inv4h * (
potential[ip1, j, k]
- potential[im1, j, k]
+ potential[ip1, jm1, k]
- potential[im1, jm1, k]
)
f_Ay_z = inv4h * (
potential[i, j, kp1]
- potential[i, j, km1]
+ potential[i, jm1, kp1]
- potential[i, jm1, km1]
)
f_Ay = math.sqrt(f_Ay_x**2 + f_Ay_y**2 + f_Ay_z**2)
# By
f_By_y = invh * (-potential_000 + potential[i, jp1, k])
f_By_x = inv4h * (
potential[ip1, jp1, k]
- potential[im1, jp1, k]
+ potential[ip1, j, k]
- potential[im1, j, k]
)
f_By_z = inv4h * (
potential[i, jp1, kp1]
- potential[i, jp1, km1]
+ potential[i, j, kp1]
- potential[i, j, km1]
)
f_By = math.sqrt(f_By_x**2 + f_By_y**2 + f_By_z**2)
# Az
f_Az_z = invh * (potential_000 - potential[i, j, km1])
f_Az_x = inv4h * (
potential[ip1, j, k]
- potential[im1, j, k]
+ potential[ip1, j, km1]
- potential[im1, j, km1]
)
f_Az_y = inv4h * (
potential[i, jp1, k]
- potential[i, jm1, k]
+ potential[i, jp1, km1]
- potential[i, jm1, km1]
)
f_Az = math.sqrt(f_Az_x**2 + f_Az_y**2 + f_Az_z**2)
# Bz
f_Bz_z = invh * (-potential_000 + potential[i, j, kp1])
f_Bz_x = inv4h * (
potential[ip1, j, kp1]
- potential[im1, j, kp1]
+ potential[ip1, j, k]
- potential[im1, j, k]
)
f_Bz_y = inv4h * (
potential[i, jp1, kp1]
- potential[i, jm1, kp1]
+ potential[i, jp1, k]
- potential[i, jm1, k]
)
f_Bz = math.sqrt(f_Bz_x**2 + f_Bz_y**2 + f_Bz_z**2)
nu_Ax = nu_delta(f_Ax * inv_g0, delta)
nu_Ay = nu_delta(f_Ay * inv_g0, delta)
nu_Az = nu_delta(f_Az * inv_g0, delta)
nu_Bx = nu_delta(f_Bx * inv_g0, delta)
nu_By = nu_delta(f_By * inv_g0, delta)
nu_Bz = nu_delta(f_Bz * inv_g0, delta)
out[i, j, k] = invh * (
nu_Bx * f_Bx_x
- nu_Ax * f_Ax_x
+ nu_By * f_By_y
- nu_Ay * f_Ay_y
+ nu_Bz * f_Bz_z
- nu_Az * f_Az_z
)
|
mianbretonREPO_NAMEpyscoPATH_START.@pysco_extracted@pysco-master@pysco@mond.py@.PATH_END.py
|
{
"filename": "slurm_client.py",
"repo_name": "icrar/daliuge",
"repo_path": "daliuge_extracted/daliuge-master/daliuge-engine/dlg/deploy/slurm_client.py",
"type": "Python"
}
|
#
# ICRAR - International Centre for Radio Astronomy Research
# (c) UWA - The University of Western Australia, 2016
# Copyright by UWA (in the framework of the ICRAR)
# All rights reserved
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307 USA
#
"""
Contains a slurm client which generates slurm scripts from daliuge graphs.
"""
import datetime
import sys
import os
import subprocess
import shutil
import tempfile
from dlg import remote
from dlg.runtime import __git_version__ as git_commit
from dlg.deploy.configs import ConfigFactory, init_tpl
from dlg.deploy.configs import DEFAULT_MON_PORT, DEFAULT_MON_HOST
from dlg.deploy.deployment_utils import find_numislands, label_job_dur
from paramiko.ssh_exception import SSHException
class SlurmClient:
"""
parameters we can control:
1. user group / account name (Required)
2. whether to submit a graph, and if so provide graph path
3. # of nodes (of Drop Managers)
4. how long to run
5. whether to produce offline graph vis
6. whether to attach proxy for remote monitoring, and if so provide
DLG_MON_HOST
DLG_MON_PORT
7. Root directory of the Log files (Required)
"""
def __init__(
self,
dlg_root=None,
log_root=None,
host=None,
acc=None,
physical_graph_template_file=None, # filename of physical graph template
logical_graph=None,
job_dur=30,
num_nodes=None,
run_proxy=False,
mon_host=DEFAULT_MON_HOST,
mon_port=DEFAULT_MON_PORT,
logv=1,
facility=None,
zerorun=False,
max_threads=0,
sleepncopy=False,
num_islands=None,
all_nics=False,
check_with_session=False,
submit=False,
remote=True,
pip_name=None,
username=None,
):
self._config = ConfigFactory.create_config(facility=facility, user=username)
self.host = self._config.getpar("host") if host is None else host
self._acc = self._config.getpar("account") if (acc is None) else acc
self._user = self._config.getpar("user") if (username is None) else username
self.dlg_root = self._config.getpar("dlg_root") if not dlg_root else dlg_root
self._log_root = (
self._config.getpar("log_root") if (log_root is None) else log_root
)
self.modules = self._config.getpar("modules")
self.venv = self._config.getpar("venv")
self.exec_prefix = self._config.getpar("exec_prefix")
if num_nodes is None:
self._num_nodes = 1
else:
self._num_nodes = num_nodes
self._job_dur = job_dur
self._logical_graph = logical_graph
self._physical_graph_template_file = physical_graph_template_file
self._visualise_graph = False
self._run_proxy = run_proxy
self._mon_host = mon_host
self._mon_port = mon_port
self._pip_name = pip_name
self._logv = logv
self._zerorun = zerorun
self._max_threads = max_threads
self._sleepncopy = sleepncopy
if num_islands is None:
self._num_islands = 1
else:
self._num_islands = num_islands
self._all_nics = all_nics
self._check_with_session = check_with_session
self._submit = submit
self._remote = remote
self._dtstr = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") # .%f
ni, nn, self._pip_name = find_numislands(self._physical_graph_template_file)
if isinstance(ni, int) and ni >= self._num_islands:
self._num_islands = ni
if nn and nn >= self._num_nodes:
self._num_nodes = nn
self.username = username
def get_session_dirname(self):
"""
(pipeline name_)[Nnum_of_daliuge_nodes]_[time_stamp]
"""
# Moved setting of dtstr to init
# to ensure it doesn't change for this instance of SlurmClient()
# dtstr = datetime.datetime.now().strftime("%Y-%m-%dT%H-%M-%S") # .%f
graph_name = self._pip_name.split("_")[0] # use only the part of the graph name
graph_name = graph_name.rsplit(".pgt.graph")[0]
return "{0}_{1}".format(graph_name, self._dtstr)
def create_job_desc(self, physical_graph_file):
"""
Creates the slurm script from a physical graph
"""
session_dir = "{0}/workspace/{1}".format(
self.dlg_root, self.get_session_dirname()
)
pardict = dict()
pardict["VENV"] = self.venv
pardict["NUM_NODES"] = str(self._num_nodes)
pardict["PIP_NAME"] = self._pip_name
pardict["SESSION_ID"] = os.path.split(session_dir)[-1]
pardict["JOB_DURATION"] = label_job_dur(self._job_dur)
pardict["ACCOUNT"] = self._acc
pardict["PY_BIN"] = "python3" if pardict["VENV"] else sys.executable
pardict["LOG_DIR"] = session_dir
pardict["GRAPH_PAR"] = (
'--logical-graph "{0}"'.format(self._logical_graph)
if self._logical_graph
else (
'--physical-graph "{0}"'.format(physical_graph_file)
if physical_graph_file
else ""
)
)
pardict["PROXY_PAR"] = (
"--monitor_host %s --monitor_port %d" % (self._mon_host, self._mon_port)
if self._run_proxy
else ""
)
pardict["GRAPH_VIS_PAR"] = "--dump" if self._visualise_graph else ""
pardict["LOGV_PAR"] = "--verbose-level %d" % self._logv
pardict["ZERORUN_PAR"] = "--zerorun" if self._zerorun else ""
pardict["MAXTHREADS_PAR"] = "--max-threads %d" % self._max_threads
pardict["SNC_PAR"] = "--app 1" if self._sleepncopy else "--app 0"
pardict["NUM_ISLANDS_PAR"] = "--num_islands %d" % self._num_islands
pardict["ALL_NICS"] = "--all_nics" if self._all_nics else ""
pardict["CHECK_WITH_SESSION"] = (
"--check_with_session" if self._check_with_session else ""
)
pardict["MODULES"] = self.modules
pardict["DLG_ROOT"] = self.dlg_root
pardict["EXEC_PREFIX"] = self.exec_prefix
job_desc = init_tpl.safe_substitute(pardict)
return job_desc
def mk_session_dir(self, dlg_root: str = ""):
"""
Create the session directory. If dlg_root is provided it is used,
else env var DLG_ROOT is used.
"""
if dlg_root: # has always preference
self.dlg_root = dlg_root
if self._remote and not self.dlg_root:
print("Deploying on a remote cluster requires specifying DLG_ROOT!")
print("Unable to create session directory!")
return ""
elif not self._remote:
# locally fallback to env var
if os.environ["DLG_ROOT"]:
dlg_root = os.environ["DLG_ROOT"]
else:
dlg_root = f"{os.environ['HOME']}.dlg"
session_dir = "{0}/workspace/{1}".format(
self.dlg_root, self.get_session_dirname()
)
if not self._remote and not os.path.exists(session_dir):
os.makedirs(session_dir)
if self._remote:
command = f"mkdir -p {session_dir}"
print(
f"Creating remote session directory on {self.username}@{self.host}: {command}"
)
try:
remote.execRemote(self.host, command, username=self.username)
except (TypeError, SSHException):
print(
f"ERROR: Unable to create {session_dir} on {self.username}@{self.host}"
)
sys.exit()
return session_dir
def submit_job(self):
"""
Submits the slurm script to the requested facility
"""
jobId = None
session_dir = self.mk_session_dir()
physical_graph_file_name = "{0}/{1}".format(session_dir, self._pip_name)
if self._physical_graph_template_file:
if self._remote:
print(f"Copying PGT to: {physical_graph_file_name}")
remote.copyTo(
self.host,
self._physical_graph_template_file,
physical_graph_file_name,
username=self.username,
)
else:
shutil.copyfile(
self._physical_graph_template_file, physical_graph_file_name
)
job_file_name = "{0}/jobsub.sh".format(session_dir)
job_desc = self.create_job_desc(physical_graph_file_name)
if self._remote:
print(f"Creating SLURM script remotely: {job_file_name}")
tjob = tempfile.mktemp()
with open(tjob, "w+t") as t:
t.write(job_desc)
remote.copyTo(self.host, tjob, job_file_name, username=self.username)
os.remove(tjob)
else:
with open(job_file_name, "w") as job_file:
job_file.write(job_desc)
with open(os.path.join(session_dir, "git_commit.txt"), "w") as git_file:
git_file.write(git_commit)
if self._submit:
if not self._remote:
os.chdir(session_dir) # so that slurm logs will be dumped here
print(subprocess.check_output(["sbatch", job_file_name]))
else:
command = f"cd {session_dir} && sbatch --parsable {job_file_name}"
print(f"Submitting sbatch job: {command}")
stdout, stderr, exitStatus = remote.execRemote(
self.host, command, username=self.username
)
if exitStatus != 0:
print(
f"Job submission unsuccessful: {exitStatus.decode()}, {stderr.decode()}"
)
else:
jobId = stdout.decode()
print(f"Job with ID {jobId} submitted successfully.")
else:
print(f"Created job submission script {job_file_name}")
return jobId
|
icrarREPO_NAMEdaliugePATH_START.@daliuge_extracted@daliuge-master@daliuge-engine@dlg@deploy@slurm_client.py@.PATH_END.py
|
{
"filename": "sed_plotting.py",
"repo_name": "temuller/hostphot",
"repo_path": "hostphot_extracted/hostphot-main/src/hostphot/sed_plotting.py",
"type": "Python"
}
|
import os
from pathlib import Path
import numpy as np
import pandas as pd
from matplotlib import ticker
import matplotlib.pyplot as plt
import hostphot
from hostphot._constants import workdir, font_family
from hostphot.utils import get_survey_filters
path = Path(hostphot.__path__[0])
config_file = path.joinpath('filters', 'config.txt')
config_df = pd.read_csv(config_file, sep='\\s+')
colours = {'GALEX':'purple', 'PS1':'green', 'SDSS':'blue', 'DES':'lightblue',
'SkyMapper':'slateblue', 'SPLUS':'lime', 'LegacySurvey':'gold',
'2MASS':'red', 'unWISE':'brown', 'WISE':'black',
'VISTA':'coral', 'UKIDSS':'darkgoldenrod'}
def get_eff_wave(filt, survey):
"""Obtains the effective wavelength of a filter.
Parameters
----------
filt: str
Filter name.
survey: str
Survey name.
Returns
-------
eff_wave: float
Effective wavelength in angstroms.
"""
path = Path(hostphot.__path__[0])
if survey=='unWISE':
survey = 'WISE'
survey_files = path.joinpath('filters', survey).glob('*')
filt_file = [file for file in survey_files if str(file).endswith(f'_{filt}.dat')][0]
wave, trans = np.loadtxt(filt_file).T
eff_wave = np.sum(wave*trans)/np.sum(trans)
return eff_wave
def plot_sed(name, phot_type='global', z=None, radius=None, include=None, exclude=None, save_plot=True, outfile=None):
"""Plots the SED of an object.
The SED will depend on the available photometry.
Parameters
----------
name : str
Name of the object.
phot_type : str, optional
Type of photometry: ``global`` or ``local``. By default 'global'.
z : float, optional
Redshift of the object, by default ``None``. If given, corrects
for time dilation.
radius : int, float or str, optional
Radius for the local photometry, by default ``None``.
include: list, default ``None``
List of surveys to include in the plot. Cannot be given together
with '``exclude``.
exclude: list, default ``None``
List of surveys to exclude from the plot. Cannot be given together
with '``include``.
save_plot: bool, default ``True``
Whether to save the SED plot.
outfile: str, default ``None``
If give, the plot is saved with this name instead of the default ones.
Raises
------
ValueError
The photometry type should be either ``global`` or ``local``.
"""
if phot_type == 'local':
assert radius is not None, "radius must be given with local photometry"
global colours
obj_path = Path(workdir, name)
phot_files = [file for file in obj_path.glob('*')
if str(file).endswith(f'_{phot_type}.csv')]
if include is not None and exclude is not None:
raise ValueError("'inlcude' cannot be given together with 'exclude'!")
# include or exclude some surveys
if include is not None:
include_files = []
for file in phot_files:
for pattern in include:
if pattern in file:
include_files.append(file)
break
phot_files = include_files
if exclude is not None:
include_files = []
for file in phot_files:
skip = False
for pattern in exclude:
if pattern in file:
skip = True
if skip is False:
include_files.append(file)
phot_files = include_files
if len(phot_files)==0:
print(f'There is no photometry for {name}!')
return None
# start plotting
fig, ax = plt.subplots(figsize=(12, 8))
ax.invert_yaxis() # for magnitude plot
for file in phot_files:
survey = os.path.basename(file).split('_')[0]
filters = get_survey_filters(survey)
# Vega to AB
global config_df
survey_df = config_df[config_df.survey==survey]
mag_sys_conv = survey_df.mag_sys_conv.values[0]
if ',' in mag_sys_conv:
mag_sys_conv = {filt:float(conv) for filt, conv
in zip(filters, mag_sys_conv.split(','))}
else:
mag_sys_conv = {filt:0.0 for filt in filters}
# adapt name for local photometry
if phot_type=='local':
title = f'{name} - {phot_type} SED (r $= {radius}$ kpc)'
ext = f'_{radius}'
elif phot_type=='global':
title = f'{name} - {phot_type} SED'
ext = ''
else:
raise ValueError(f'Invalid photometry type: {phot_type}')
phot_df = pd.read_csv(file)
waves, phot, phot_err = [], [], []
valid_filters = []
for filt in filters:
filt_str = filt + ext
filt_err_str = filt + ext + '_err'
if filt_str not in phot_df.columns:
continue
# get photometry
wave = get_eff_wave(filt, survey)
mag = phot_df[filt_str].values[0] + mag_sys_conv[filt]
mag_err = phot_df[filt_err_str].values[0]
waves.append(wave)
phot.append(mag)
phot_err.append(mag_err)
valid_filters.append(filt)
if z is not None:
# correct for time dilation
waves = np.array(waves)/(1+z)
phot = np.array(phot) - 2.5*np.log10((1+z))
xlabel = r'Rest Wavelength ($\AA$)'
if ')' in title:
title = title.replace(')', f' @ $z={z}$)')
else:
title = title + f' ($z={z}$)'
else:
waves = np.array(waves)
phot = np.array(phot)
xlabel = r'Observed Wavelength ($\AA$)'
phot_err = np.array(phot_err)
# NaN mask
mask = ~np.isnan(phot) * phot_err>0
waves = waves[mask]
phot = phot[mask]
phot_err = phot_err[mask]
valid_filters = np.array(valid_filters)[mask]
lims = phot/phot_err < 3 # upper limits;inverted for magnitude plots
phot_err[lims] = 1 # for visualization
label = f'{survey} ({",".join(filt for filt in valid_filters)})'
ax.errorbar(waves, phot, yerr=phot_err, marker='o', ms=10, ls='dashed', lw=3,
c=colours[survey], mec='k', capsize=6, label=label, lolims=lims)
if len(phot_files)>4:
ncol = 2
else:
ncol=1
ax.set_xlabel(xlabel, fontsize=24, font=font_family)
ax.set_ylabel('Magnitude (AB)', fontsize=24, font=font_family)
ax.set_title(title, fontsize=28, font=font_family)
for label in ax.get_xticklabels():
label.set_fontproperties(font_family)
for label in ax.get_yticklabels():
label.set_fontproperties(font_family)
ax.tick_params(labelsize=20)
ax.legend(ncol=ncol, fancybox=True, framealpha=1, prop={"size": 16, "family": font_family})
ax.set_xscale('log')
# format ticks
ticks = np.array([2e3, 4e3, 9e3, 2e4, 4e4, 9e4, 2e5, 4e5, 9e5])
start, end = ax.get_xlim()
mask = (ticks >= start) & (ticks <= end)
ax.set_xticks(ticks[mask])
formatter = ticker.ScalarFormatter(useMathText=True)
ax.get_xaxis().set_major_formatter(formatter)
if save_plot is True:
if outfile is None:
obj_dir = Path(workdir, name)
basename = f'sed_{phot_type}.jpg'
outfile = obj_dir / basename
plt.savefig(outfile)
plt.show()
|
temullerREPO_NAMEhostphotPATH_START.@hostphot_extracted@hostphot-main@src@hostphot@sed_plotting.py@.PATH_END.py
|
{
"filename": "plot.py",
"repo_name": "exoclime/HELIOS-K",
"repo_path": "HELIOS-K_extracted/HELIOS-K-master/plots/Plinth/plot.py",
"type": "Python"
}
|
import os
import matplotlib
#matplotlib.use('PS')
matplotlib.use('agg')
import pylab as pl
import numpy as np
from matplotlib.patches import Rectangle
pl.rc('font', size=12)
params = {'legend.fontsize': 12}
pl.rcParams.update(params)
pl.figure(figsize=(8, 6))
pl.subplots_adjust(left=0.17, bottom=None, right=None, top=None, wspace=None, hspace=None)
dirname = os.path.abspath(os.path.dirname(__file__))
print(dirname)
filename1 = os.path.join(dirname, 'Out_i.dat')
filename2 = os.path.join(dirname, 'Out_j.dat')
filename3 = os.path.join(dirname, 'plinth.dat')
ax1=pl.subplot(121)
nu1, k1 = np.loadtxt(filename1, unpack=True)
nu2, k2 = np.loadtxt(filename2, unpack=True)
il, nu3, k3 = np.loadtxt(filename3, unpack=True)
pl.plot(nu1, k1, lw = 1.5, label= r'1: full Voigt')
pl.plot(nu2, k2, lw = 1.0, label= r'2: Voigt - plinth')
#pl.plot(nu3, 2.51241e-6, lw = 1.0, label= r'3: plinth')
ax1.add_patch(Rectangle((20318.99, 0), 1.0, 2.51241e-6,alpha=0.3, color='g'))
ax1.text(20319.4,1.0e-6, 'Plinth', color='g')
pl.xlabel(r'$\nu$ [cm$^{-1}$]')
pl.ylabel(r'$\kappa$ [cm$^2$ / g]')
#ax1.set_yscale('log')
pl.xlim([20318.8, 20320.2])
pl.ylim([0.0,0.00002])
pl.legend(loc='upper right')
ax1=pl.subplot(122)
nu1, k1 = np.loadtxt(filename1, unpack=True)
nu2, k2 = np.loadtxt(filename2, unpack=True)
il, nu3, k3 = np.loadtxt(filename3, unpack=True)
pl.plot(nu1, k1, lw = 1.5, label= r'1: full Voigt')
pl.plot(nu2, k2, lw = 1.0, label= r'2: Voigt - plinth')
ax1.add_patch(Rectangle((20318.99, 3e-8), 1.0, 2.51241e-6-3e-8,alpha=0.3, color='g'))
ax1.text(20319.4,3.0e-7, 'Plinth', color='g')
pl.xlabel(r'$\nu$ [cm$^{-1}$]')
ax1.set_yscale('log')
pl.xlim([20318.8, 20320.2])
pl.ylim([3E-8,5E-5])
pl.legend(loc='upper right')
name = 'plot001.png'
pl.savefig(name, dpi=300)
pl.clf()
|
exoclimeREPO_NAMEHELIOS-KPATH_START.@HELIOS-K_extracted@HELIOS-K-master@plots@Plinth@plot.py@.PATH_END.py
|
{
"filename": "centroidRandomWalk.py",
"repo_name": "lsst-ts/ts_wep",
"repo_path": "ts_wep_extracted/ts_wep-main/python/lsst/ts/wep/centroid/centroidRandomWalk.py",
"type": "Python"
}
|
# This file is part of ts_wep.
#
# Developed for the LSST Telescope and Site Systems.
# This product includes software developed by the LSST Project
# (https://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__all__ = ["CentroidRandomWalk"]
import numpy as np
from lsst.ts.wep.centroid.centroidDefault import CentroidDefault
class CentroidRandomWalk(CentroidDefault):
"""CentroidDefault child class to get the centroid of donut by the
random walk model."""
def __init__(self):
# Minimum effective signal
self.minEffSignal = 1e-8
# Number of bins in the histogram
self.numOfBins = 256
# Random seed
self.seed = 1000
def getImgBinary(self, imgDonut):
"""Get the binary image.
Parameters
----------
imgDonut : numpy.ndarray
Donut image to do the analysis.
Returns
-------
numpy.ndarray [int]
Binary image of donut.
"""
imgBinary = np.zeros(imgDonut.shape, dtype=int)
threshold = self._calcThreshold(imgDonut)
imgBinary[imgDonut > max(self.minEffSignal, threshold)] = 1
return imgBinary
def _calcThreshold(self, imgDonut):
"""Calculate the threshold to decide the effective signal.
Parameters
----------
imgDonut : numpy.ndarray
Donut image to do the analysis.
Returns
-------
float
Threshold.
"""
# Parameters to decide the signal of donut
slide = int(0.1 * self.numOfBins)
stepsize = int(0.06 * self.numOfBins)
nwalk = int(1.56 * self.numOfBins)
# Reshape the image to 1D array
array1d = imgDonut.flatten()
# Generate the histogram of intensity
hist, binEdges = np.histogram(array1d, bins=self.numOfBins)
# Parameters for random walk search
start = int(self.numOfBins / 2.1)
end = slide + 25 # Go back
startidx = range(start, end, -15)
foundvalley = False
for istartPoint in range(len(startidx)):
minind = startidx[istartPoint]
# Check the condition of start index
if (minind <= 0) or (max(hist[minind - 1 :]) == 0):
continue
minval = hist[minind - 1]
# Do the random walk search
np.random.seed(seed=self.seed)
for ii in range(nwalk + 1):
# if (minind <= slide):
if minind >= slide:
# Find the index of bin that the count is not zero
while minval == 0:
minind = minind - 1
minval = hist[int(minind - 1)]
# Generate the thermal fluctuation based on the random
# table to give a random walk/ step with a random thermal
# fluctuation.
ind = np.round(stepsize * (2 * np.random.rand() - 1)).astype(int)
thermal = 1 + 0.5 * np.random.rand() * np.exp(
1.0 * ii / (nwalk * 0.3)
)
# Check the index of bin is whithin the range of histogram
if (minind + ind < 1) or (minind + ind > (self.numOfBins)):
continue
# Look for the minimum point
if hist[int(minind + ind - 1)] < (minval * thermal):
# Add the panality to go to the high intensity position
if ind > 0:
ind = int(ind / 3)
else:
ind = int(ind / 2)
# Update the value of minind
minval = hist[int(minind + ind - 1)]
minind = minind + ind
else:
break
# Find the signal of donut in histogram
if minind >= slide:
foundvalley = True
break
# Try to close the second peak
while (minind >= slide) and (foundvalley is True):
if np.abs(hist[int(minind - 5)] - hist[int(minind)]) < 4 * np.median(
hist[len(hist) - 20 :]
):
minind = minind - 1
else:
break
# If no valley (signal) is found for the noise, use the value at start
# index of histogram to be the threshold.
if not foundvalley:
minind = start
# Get the threshold value of donut
threshold = binEdges[int(minind)]
return threshold
|
lsst-tsREPO_NAMEts_wepPATH_START.@ts_wep_extracted@ts_wep-main@python@lsst@ts@wep@centroid@centroidRandomWalk.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/carpet/baxis/title/_font.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "carpet.baxis.title"
_path_str = "carpet.baxis.title.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans", "Droid Serif", "Droid Sans Mono", "Gravitas One",
"Old Standard TT", "Open Sans", "Overpass", "PT Sans Narrow",
"Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# lineposition
# ------------
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
# shadow
# ------
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# style
# -----
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
# textcase
# --------
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
# variant
# -------
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
# weight
# ------
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this axis' title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.carpet.baxis.title.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans", "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.carpet.baxis.title.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.carpet.baxis.title.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("lineposition", None)
_v = lineposition if lineposition is not None else _v
if _v is not None:
self["lineposition"] = _v
_v = arg.pop("shadow", None)
_v = shadow if shadow is not None else _v
if _v is not None:
self["shadow"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("style", None)
_v = style if style is not None else _v
if _v is not None:
self["style"] = _v
_v = arg.pop("textcase", None)
_v = textcase if textcase is not None else _v
if _v is not None:
self["textcase"] = _v
_v = arg.pop("variant", None)
_v = variant if variant is not None else _v
if _v is not None:
self["variant"] = _v
_v = arg.pop("weight", None)
_v = weight if weight is not None else _v
if _v is not None:
self["weight"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@carpet@baxis@title@_font.py@.PATH_END.py
|
{
"filename": "meta.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/chat_models/meta.py",
"type": "Python"
}
|
from typing import List
from langchain_core.messages import (
AIMessage,
BaseMessage,
ChatMessage,
HumanMessage,
SystemMessage,
)
def _convert_one_message_to_text_llama(message: BaseMessage) -> str:
if isinstance(message, ChatMessage):
message_text = f"\n\n{message.role.capitalize()}: {message.content}"
elif isinstance(message, HumanMessage):
message_text = f"[INST] {message.content} [/INST]"
elif isinstance(message, AIMessage):
message_text = f"{message.content}"
elif isinstance(message, SystemMessage):
message_text = f"<<SYS>> {message.content} <</SYS>>"
else:
raise ValueError(f"Got unknown type {message}")
return message_text
def convert_messages_to_prompt_llama(messages: List[BaseMessage]) -> str:
"""Convert a list of messages to a prompt for llama."""
return "\n".join(
[_convert_one_message_to_text_llama(message) for message in messages]
)
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@chat_models@meta.py@.PATH_END.py
|
{
"filename": "wlm.md",
"repo_name": "pjcigan/multicolorfits",
"repo_path": "multicolorfits_extracted/multicolorfits-master/examples/wlm.md",
"type": "Markdown"
}
|
# WLM
------- WLM, one of my favorite weird little low-metallicity galaxies -------
You can download the fits files from [the LITTLE THINGS NRAO data page for WLM](https://science.nrao.edu/science/surveys/littlethings/data/wlm.html)
Here, I use the HI, V-band, and near-UV images.
This script creates a combined image that approximately resembles the nice thumbnail on NRAO data page (which was made in Photoshop, specifically to improve on the slightly 'off' appearance of the simplistic RGB image).
```python
import numpy as np
import astropy.io.fits as pyfits
import multicolorfits as mcf
### Load the data
wlm_hidat,wlm_hihdr=pyfits.getdata('./WLM_NA_X0_P_R.FITS',header=True)
#wlm_halphadat,wlm_halphahdr=pyfits.getdata('./wlmhmrms.fits',header=True)
wlm_vdat,wlm_vhdr=pyfits.getdata('./wlmv.fits',header=True)
wlm_nuvdat,wlm_nuvhdr=pyfits.getdata('./wlmncut.fit',header=True)
### The original HI Moment-0 map still retains phantom Stokes and velocity axes. Squeeze down to two axes.
wlm_hidat=np.squeeze(wlm_hidat)
```
- Update the header to 2D as well. Sometimes downloaded headers have a wealth of information about their reduction process, etc. But that is usually far more than is required for a simple plotting routine, and sometimes the extra header cards will interfere with the plotting (header says there are 3 axes when plotter expects only 2, etc.). multicolorfits has a convenience function for forcing a header to 2 dimensions:
```python
wlm_hihdr=mcf.force_hdr_to_2D(wlm_hihdr)
#This particular header lost its RADESYS card somewhere along the way.
# Add it back in manually to enable correct coordinate calculations.
wlm_hihdr['RADESYS']='FK5' #Add the missing RA/DEC system to enable coord conversion
```
- Another option is to simply make a simple 'classic' header from scratch (copying the basic cards needed for WCS). There is an option to specify the RA/DEC system in case it's needed
```python
wlm_hihdr_simple=mcf.makesimpleheader(wlm_hihdr,radesys='FK5')
```
- Let's select the HI image as the 'master' header, which the other images will be reprojected to, and crop it down slightly to the desired size for the final image. multicolorfits has convenience functions for this, and they require the reference pixel coordinates (in decimal, though sex2dec can convert from sexagesimal) and the width in arcseconds.
```python
#cropcenter_coords=mcf.sex2dec('0:01:57.796','-15:28:17.127')
##or, specify directly in decimal:
#cropcenter_coords(0.4908176671,-15.47142427)
## Could also just get the coords from a pixel location -- here let's take the center pixel:
cropcenter_coords=mcf.convpix2sky(wlm_hihdr_simple,512,512) #[0.4908176822975695, -15.464166666280857]
cropwidth_asec=600. #width to extend in either direction from the reference coords (i.e., half-width)
```
- Now crop the image using the convenience function cropfits2D(), and specify a save path so that we can save a copy to use in the GUI if we want
```python
wlm_hicropdat,wlm_hicrophdr=mcf.cropfits2D_coords(wlm_hidat, wlm_hihdr_simple,
cropcenter_coords, cropwidth_asec, savenew='./wlm_hicrop.fits', overwrite=True)
```
- Reproject the other two images to the same (new/cropped) header, and save copies for use in the GUI.
Reprojection can be done using the kapteyn package reproject2D(...,option='kapteyn') or with the reproject package --
- option='interp' for reproject_interp [default] or
- option='spi' for reproject_exact
See the kapteyn and reproject package documentation for more info
```python
wlm_vcropdat=mcf.reproject2D(wlm_vdat,mcf.makesimpleheader(wlm_vhdr),wlm_hicrophdr);
pyfits.writeto('./wlm_vcrop.fits',wlm_vcropdat,wlm_hicrophdr,overwrite=True)
wlm_nuvcropdat=mcf.reproject2D(wlm_nuvdat,mcf.makesimpleheader(wlm_nuvhdr),wlm_hicrophdr);
pyfits.writeto('./wlm_nuvcrop.fits',wlm_nuvcropdat,wlm_hicrophdr,overwrite=True)
```
- Now, you could use the GUI to interactively select levels & colors. Or, can do it in a script...
# Creating the multi-color RGB image
Load in the cropped images here if you don't want to repeat the above process again...
```python
"""
wlm_hicropdat,wlm_hicrophdr=pyfits.getdata('./wlm_hicrop.fits',header=True)
wlm_vcropdat,wlm_vcrophdr=pyfits.getdata('./wlm_vcrop.fits',header=True)
wlm_nuvcropdat,wlm_nuvcrophdr=pyfits.getdata('./wlm_nuvcrop.fits',header=True)
"""
```
- Convert the single images (greyscale) to RGB format (still greyscale, but now with R,G,B channels). Like in the other example, setting checkscale=True here will bring up a plot window to check your scaling (useful if you're not using the GUI to do this interactively). Just set to False (default) to skip the popup.
```python
hi_greyRGB=mcf.greyRGBize_image(wlm_hicropdat, rescalefn='asinh', scaletype='perc',
min_max=[1.,99.9], gamma=2.2, checkscale=True)
v_greyRGB=mcf.greyRGBize_image(wlm_vcropdat, rescalefn='asinh', scaletype='abs',
min_max=[3650.,4800.], gamma=2.2, checkscale=True)
nuv_greyRGB=mcf.greyRGBize_image(wlm_nuvcropdat, rescalefn='asinh', scaletype='perc',
min_max=[20,99.9], gamma=2.2, checkscale=True)
```
- Now colorize the greyscale RGB images using colorize_image(image,color)
The color can be either HTML/HEX, RGB tuples, or HSV tuples (default). (specify colorintype='hex', 'rgb', 'hsv')
--> This will take some seconds for very large files.
```python
hi_red=mcf.colorize_image(hi_greyRGB, '#994242', colorintype='hex', gammacorr_color=2.2)
v_yellow=mcf.colorize_image(v_greyRGB, '#FFF9DB', colorintype='hex', gammacorr_color=2.2)
nuv_blue=mcf.colorize_image(nuv_greyRGB, '#1773E9', colorintype='hex', gammacorr_color=2.2)
```
- Combine the separate colored images into one master RGB image
```python
wlm_RYB=mcf.combine_multicolor([hi_red,v_yellow,nuv_blue],gamma=2.2)
```
- Example plot
```python
mcf.plotsinglemulticolorRGB(wlm_RYB, wlm_hicrophdr, 'WLM -- HI (red), V (yellow), NUV (blue)',
'./WLM_testplot.jpg', tickcolor='0.6', labelcolor='k', facecolor='w', minorticks=True, dpi=150)
```
Here is the result:

- Compare the custom multicolor RGB to the pure (R,G,B) frame image.
```python
# --> Rather than re-scale each original image, just take one of the greyRGB frames from each
wlm_pureRGB=np.dstack([hi_greyRGB[:,:,0],v_greyRGB[:,:,0],nuv_greyRGB[:,:,0]])
mcf.comparemulticolorRGB_pureRGB(wlm_pureRGB, wlm_RYB,wlm_hicrophdr,
'Custom Multicolor: RYB',"WLM", './wlm_compare.jpg', tickcolor='0.6', supy=.75)
```

As with the other example, the simple RGB case is good for emphasizing differences in the features. Of course, this could be achieved in multicolorfits as well with different choices for the image colors. This is merely intended to be an example of how to produce something that looks nice.
Save out the RGB fits for later use (can also load in DS9)
```python
mcf.saveRGBfits('./wlm_RYB.fits', wlm_RYB, wlm_hicrophdr)
```
|
pjciganREPO_NAMEmulticolorfitsPATH_START.@multicolorfits_extracted@multicolorfits-master@examples@wlm.md@.PATH_END.py
|
{
"filename": "_marker.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/scattercarpet/selected/_marker.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Marker(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scattercarpet.selected"
_path_str = "scattercarpet.selected.marker"
_valid_props = {"color", "opacity", "size"}
# color
# -----
@property
def color(self):
"""
Sets the marker color of selected points.
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the marker opacity of selected points.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# size
# ----
@property
def size(self):
"""
Sets the marker size of selected points.
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
size
Sets the marker size of selected points.
"""
def __init__(self, arg=None, color=None, opacity=None, size=None, **kwargs):
"""
Construct a new Marker object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.scattercarpet.
selected.Marker`
color
Sets the marker color of selected points.
opacity
Sets the marker opacity of selected points.
size
Sets the marker size of selected points.
Returns
-------
Marker
"""
super(Marker, self).__init__("marker")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scattercarpet.selected.Marker
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scattercarpet.selected.Marker`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@scattercarpet@selected@_marker.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattermapbox/cluster/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="scattermapbox.cluster", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattermapbox@cluster@_color.py@.PATH_END.py
|
{
"filename": "_borderwidth.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/sunburst/marker/colorbar/_borderwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BorderwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="borderwidth",
parent_name="sunburst.marker.colorbar",
**kwargs
):
super(BorderwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@sunburst@marker@colorbar@_borderwidth.py@.PATH_END.py
|
{
"filename": "_xsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/box/_xsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="xsrc", parent_name="box", **kwargs):
super(XsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@box@_xsrc.py@.PATH_END.py
|
{
"filename": "_ticklabelposition.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/choroplethmapbox/colorbar/_ticklabelposition.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicklabelpositionValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="ticklabelposition",
parent_name="choroplethmapbox.colorbar",
**kwargs,
):
super(TicklabelpositionValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop(
"values",
[
"outside",
"inside",
"outside top",
"inside top",
"outside left",
"inside left",
"outside right",
"inside right",
"outside bottom",
"inside bottom",
],
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@choroplethmapbox@colorbar@_ticklabelposition.py@.PATH_END.py
|
{
"filename": "test_sky_coord_velocities.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/coordinates/tests/test_sky_coord_velocities.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Tests for putting velocity differentials into SkyCoord objects.
Note: the skyoffset velocity tests are in a different file, in
test_skyoffset_transformations.py
"""
import numpy as np
import pytest
from astropy import units as u
from astropy.coordinates import (
ICRS,
CartesianDifferential,
CartesianRepresentation,
Galactic,
PrecessedGeocentric,
RadialDifferential,
SkyCoord,
SphericalCosLatDifferential,
SphericalDifferential,
SphericalRepresentation,
UnitSphericalCosLatDifferential,
UnitSphericalDifferential,
UnitSphericalRepresentation,
)
from astropy.tests.helper import assert_quantity_allclose
from astropy.utils.compat.optional_deps import HAS_SCIPY
def test_creation_frameobjs():
i = ICRS(
1 * u.deg, 2 * u.deg, pm_ra_cosdec=0.2 * u.mas / u.yr, pm_dec=0.1 * u.mas / u.yr
)
sc = SkyCoord(i)
for attrnm in ["ra", "dec", "pm_ra_cosdec", "pm_dec"]:
assert_quantity_allclose(getattr(i, attrnm), getattr(sc, attrnm))
sc_nod = SkyCoord(ICRS(1 * u.deg, 2 * u.deg))
for attrnm in ["ra", "dec"]:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_nod, attrnm))
def test_creation_attrs():
sc1 = SkyCoord(
1 * u.deg,
2 * u.deg,
pm_ra_cosdec=0.2 * u.mas / u.yr,
pm_dec=0.1 * u.mas / u.yr,
frame="fk5",
)
assert_quantity_allclose(sc1.ra, 1 * u.deg)
assert_quantity_allclose(sc1.dec, 2 * u.deg)
assert_quantity_allclose(sc1.pm_ra_cosdec, 0.2 * u.arcsec / u.kyr)
assert_quantity_allclose(sc1.pm_dec, 0.1 * u.arcsec / u.kyr)
sc2 = SkyCoord(
1 * u.deg,
2 * u.deg,
pm_ra=0.2 * u.mas / u.yr,
pm_dec=0.1 * u.mas / u.yr,
differential_type=SphericalDifferential,
)
assert_quantity_allclose(sc2.ra, 1 * u.deg)
assert_quantity_allclose(sc2.dec, 2 * u.deg)
assert_quantity_allclose(sc2.pm_ra, 0.2 * u.arcsec / u.kyr)
assert_quantity_allclose(sc2.pm_dec, 0.1 * u.arcsec / u.kyr)
sc3 = SkyCoord(
"1:2:3 4:5:6",
pm_ra_cosdec=0.2 * u.mas / u.yr,
pm_dec=0.1 * u.mas / u.yr,
unit=(u.hour, u.deg),
)
assert_quantity_allclose(
sc3.ra, 1 * u.hourangle + 2 * u.arcmin * 15 + 3 * u.arcsec * 15
)
assert_quantity_allclose(sc3.dec, 4 * u.deg + 5 * u.arcmin + 6 * u.arcsec)
# might as well check with sillier units?
assert_quantity_allclose(
sc3.pm_ra_cosdec, 1.2776637006616473e-07 * u.arcmin / u.fortnight
)
assert_quantity_allclose(sc3.pm_dec, 6.388318503308237e-08 * u.arcmin / u.fortnight)
def test_creation_copy_basic():
i = ICRS(
1 * u.deg, 2 * u.deg, pm_ra_cosdec=0.2 * u.mas / u.yr, pm_dec=0.1 * u.mas / u.yr
)
sc = SkyCoord(i)
sc_cpy = SkyCoord(sc)
for attrnm in ["ra", "dec", "pm_ra_cosdec", "pm_dec"]:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_cpy, attrnm))
def test_creation_copy_rediff():
sc = SkyCoord(
1 * u.deg,
2 * u.deg,
pm_ra=0.2 * u.mas / u.yr,
pm_dec=0.1 * u.mas / u.yr,
differential_type=SphericalDifferential,
)
sc_cpy = SkyCoord(sc)
for attrnm in ["ra", "dec", "pm_ra", "pm_dec"]:
assert_quantity_allclose(getattr(sc, attrnm), getattr(sc_cpy, attrnm))
sc_newdiff = SkyCoord(sc, differential_type=SphericalCosLatDifferential)
reprepr = sc.represent_as(SphericalRepresentation, SphericalCosLatDifferential)
assert_quantity_allclose(
sc_newdiff.pm_ra_cosdec, reprepr.differentials["s"].d_lon_coslat
)
def test_creation_cartesian():
rep = CartesianRepresentation([10, 0.0, 0.0] * u.pc)
dif = CartesianDifferential([0, 100, 0.0] * u.pc / u.Myr)
rep = rep.with_differentials(dif)
c = SkyCoord(rep)
sdif = dif.represent_as(SphericalCosLatDifferential, rep)
assert_quantity_allclose(c.pm_ra_cosdec, sdif.d_lon_coslat)
def test_useful_error_missing():
sc_nod = SkyCoord(ICRS(1 * u.deg, 2 * u.deg))
try:
sc_nod.l
except AttributeError as e:
# this is double-checking the *normal* behavior
msg_l = e.args[0]
try:
sc_nod.pm_dec
except Exception as e:
msg_pm_dec = e.args[0]
assert "has no attribute" in msg_l
assert "has no associated differentials" in msg_pm_dec
# ----------------------Operations on SkyCoords w/ velocities-------------------
# define some fixtures to get baseline coordinates to try operations with
@pytest.fixture(
scope="module", params=[(False, False), (True, False), (False, True), (True, True)]
)
def sc(request):
incldist, inclrv = request.param
args = [1 * u.deg, 2 * u.deg]
kwargs = {"pm_dec": 1 * u.mas / u.yr, "pm_ra_cosdec": 2 * u.mas / u.yr}
if incldist:
kwargs["distance"] = 213.4 * u.pc
if inclrv:
kwargs["radial_velocity"] = 61 * u.km / u.s
return SkyCoord(*args, **kwargs)
@pytest.fixture(scope="module")
def scmany():
return SkyCoord(
ICRS(
ra=[1] * 100 * u.deg,
dec=[2] * 100 * u.deg,
pm_ra_cosdec=np.random.randn(100) * u.mas / u.yr,
pm_dec=np.random.randn(100) * u.mas / u.yr,
)
)
def test_accessors(sc, scmany):
sc.data.differentials["s"]
sph = sc.spherical
gal = sc.galactic
if sc.data.name.startswith("unit") and not sc.data.differentials[
"s"
].name.startswith("unit"):
# this xfail can be eliminated when issue #7028 is resolved
pytest.xfail(".velocity fails if there is an RV but not distance")
sc.velocity
assert isinstance(sph, SphericalRepresentation)
assert gal.data.differentials is not None
scmany[0]
sph = scmany.spherical
gal = scmany.galactic
assert isinstance(sph, SphericalRepresentation)
assert gal.data.differentials is not None
def test_transforms(sc):
trans = sc.transform_to("galactic")
assert isinstance(trans.frame, Galactic)
def test_transforms_diff(sc):
# note that arguably this *should* fail for the no-distance cases: 3D
# information is necessary to truly solve this, hence the xfail
if not sc.distance.unit.is_equivalent(u.m):
pytest.xfail("Should fail for no-distance cases")
else:
trans = sc.transform_to(PrecessedGeocentric(equinox="B1975"))
assert isinstance(trans.frame, PrecessedGeocentric)
@pytest.mark.skipif(not HAS_SCIPY, reason="Requires scipy")
def test_matching(sc, scmany):
# just check that it works and yields something
idx, d2d, d3d = sc.match_to_catalog_sky(scmany)
def test_constellations(sc):
const = sc.get_constellation()
assert const == "Pisces"
@pytest.mark.parametrize("sph_type", ["spherical", "unitspherical"])
def test_cartesian_to_spherical(sph_type):
"""Conversion to unitspherical should work, even if we lose distance."""
c = SkyCoord(
x=1 * u.kpc,
y=0 * u.kpc,
z=0 * u.kpc,
v_x=10 * u.km / u.s,
v_y=0 * u.km / u.s,
v_z=4.74 * u.km / u.s,
representation_type="cartesian",
)
c.representation_type = sph_type
assert c.ra == 0
assert c.dec == 0
assert c.pm_ra == 0
assert u.allclose(c.pm_dec, 1 * (u.mas / u.yr), rtol=1e-3)
assert u.allclose(c.radial_velocity, 10 * (u.km / u.s))
if sph_type == "spherical":
assert u.allclose(c.distance, 1 * u.kpc)
else:
assert not hasattr(c, "distance")
@pytest.mark.parametrize(
"diff_info, diff_cls",
[
({"radial_velocity": [20, 30] * u.km / u.s}, RadialDifferential),
(
{
"pm_ra": [2, 3] * u.mas / u.yr,
"pm_dec": [-3, -4] * u.mas / u.yr,
"differential_type": "unitspherical",
},
UnitSphericalDifferential,
),
(
{"pm_ra_cosdec": [2, 3] * u.mas / u.yr, "pm_dec": [-3, -4] * u.mas / u.yr},
UnitSphericalCosLatDifferential,
),
],
scope="class",
)
class TestDifferentialClassPropagation:
"""Test that going in between spherical and unit-spherical, we do not
change differential type (since both can handle the same types).
"""
def test_sc_unit_spherical_with_pm_or_rv_only(self, diff_info, diff_cls):
sc = SkyCoord(ra=[10, 20] * u.deg, dec=[-10, 10] * u.deg, **diff_info)
assert isinstance(sc.data, UnitSphericalRepresentation)
assert isinstance(sc.data.differentials["s"], diff_cls)
sr = sc.represent_as("spherical")
assert isinstance(sr, SphericalRepresentation)
assert isinstance(sr.differentials["s"], diff_cls)
def test_sc_spherical_with_pm_or_rv_only(self, diff_info, diff_cls):
sc = SkyCoord(
ra=[10, 20] * u.deg,
dec=[-10, 10] * u.deg,
distance=1.0 * u.kpc,
**diff_info,
)
assert isinstance(sc.data, SphericalRepresentation)
assert isinstance(sc.data.differentials["s"], diff_cls)
sr = sc.represent_as("unitspherical")
assert isinstance(sr, UnitSphericalRepresentation)
assert isinstance(sr.differentials["s"], diff_cls)
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@coordinates@tests@test_sky_coord_velocities.py@.PATH_END.py
|
{
"filename": "fit_binned_spec.py",
"repo_name": "sdss/mangadap",
"repo_path": "mangadap_extracted/mangadap-main/examples/fit_binned_spec.py",
"type": "Python"
}
|
import os
import time
from IPython import embed
import numpy
from astropy.io import fits
from matplotlib import pyplot
from mangadap.config import defaults
from mangadap.config import manga
from mangadap.datacube import MaNGADataCube
from mangadap.util.fitsutil import DAPFitsUtil
from mangadap.util.resolution import SpectralResolution
from mangadap.util.pixelmask import SpectralPixelMask
from mangadap.par.artifactdb import ArtifactDB
from mangadap.par.emissionmomentsdb import EmissionMomentsDB
from mangadap.par.emissionlinedb import EmissionLineDB
from mangadap.proc.templatelibrary import TemplateLibrary
from mangadap.proc.spectralstack import SpectralStack
from mangadap.proc.emissionlinemoments import EmissionLineMoments
from mangadap.proc.sasuke import Sasuke
from mangadap.proc.ppxffit import PPXFFit
from mangadap.proc.stellarcontinuummodel import StellarContinuumModel, StellarContinuumModelBitMask
from mangadap.proc.emissionlinemodel import EmissionLineModelBitMask
from mangadap.proc.spectralfitting import EmissionLineFit
#-----------------------------------------------------------------------------
def get_redshift(plt, ifu, drpall_file=None):
"""
Get the redshift of a galaxy from the DRPall file.
Args:
plt (:obj:`int`):
Plate number
ifu (:obj:`int`):
IFU identifier
drapall_file (:obj:`str`, optional):
DRPall file. If None, attempts to use the default path to
the file using environmental variables.
Returns:
:obj:`float`: The redshift to the galaxy observed by the
provided PLATEIFU.
"""
if drpall_file is None:
drpall_file = manga.drpall_file()
if not drpall_file.exists():
raise FileNotFoundError(f'Could not find DRPall file: {drpall_file}')
hdu = fits.open(str(drpall_file))
indx = hdu[1].data['PLATEIFU'] == '{0}-{1}'.format(plt, ifu)
return hdu[1].data['NSA_Z'][indx][0]
def get_spectra(plt, ifu, x, y, directory_path=None):
"""
Extract spectra from a MaNGA observation.
Args:
plt (:obj:`int`):
Plate number
ifu (:obj:`int`):
IFU identifier
x (:obj:`int`, `numpy.ndarray`_):
The spaxel coordinate along the RA axis.
y (:obj:`int`, `numpy.ndarray`_):
The spaxel coordinate along the DEC axis.
directory_path (:obj:`str`, optional):
Directory with the DRP LOGCUBE file. If None, uses the
default directory path based on the environmental
variables.
Returns:
:obj:`tuple`: Returns 4 numpy vectors: The wavelength, flux,
flux inverse variance, and spectral resolution extracted from
the datacube.
"""
cube = MaNGADataCube.from_plateifu(plt, ifu, directory_path=directory_path)
flat_indx = cube.spatial_shape[1]*x+y
# This function always returns as masked array
flux = cube.copy_to_masked_array(attr='flux', flag=cube.do_not_fit_flags())
ivar = cube.copy_to_masked_array(attr='ivar', flag=cube.do_not_fit_flags())
sres = cube.copy_to_array(attr='sres')
return cube.wave, flux[flat_indx,:], ivar[flat_indx,:], sres[flat_indx,:]
#-----------------------------------------------------------------------------
if __name__ == '__main__':
t = time.perf_counter()
# Plate-IFU to use
plt = 7815
ifu = 3702
# Spaxel coordinates
x = numpy.array([21,20,19])
y = numpy.array([21,21,21])
binid=numpy.array([0,0,1])
# Number of spectra
nspec = len(x)
# Show the ppxf plots
# fit_plots = True
fit_plots = False
# Show summary plots
usr_plots = True #False
# Template keywords
sc_tpl_key = 'MILESHC'
el_tpl_key = 'MILESHC'
# el_tpl_key = 'BC03'
# Emission-line database keywords
elmom_key = 'ELBMILES'
elfit_key = 'ELPMILES'
# elmom_key = 'ELBTT'
# elfit_key = 'ELPT4'
# Template pixel scale a factor of 4 smaller than galaxy data
velscale_ratio = 4
# DAP source directory
dapsrc = defaults.dap_source_dir()
# Get the redshift
drpver = 'v3_1_1'
directory_path = dapsrc / 'data' / 'remote'
drpall_file = directory_path / f'drpall-{drpver}.fits'
# Assume all spectra have the same redshift
z = numpy.array([get_redshift(plt, ifu, drpall_file)]*nspec)
print('Redshift: {0}'.format(z[0]))
dispersion = numpy.full_like(z, 100.)
# Read the spectra
print('reading spectra')
wave, flux, ivar, sres = get_spectra(plt, ifu, x, y, directory_path=directory_path)
ferr = numpy.ma.power(ivar, -0.5)
# Fitting functions expect data to be in 2D arrays (for now):
if len(flux.shape) == 1:
flux = flux.reshape(1,-1)
ferr = ferr.reshape(1,-1)
sres = sres.reshape(1,-1)
flux_binned = flux.copy()
ferr_binned = ferr.copy()
sres_binned = sres.copy()
x_binned = x.copy()
y_binned = y.copy()
z_binned = z.copy()
dispersion_binned = dispersion.copy()
else:
# Stack the spectra
wave_binned, flux_binned, fsdev_binned, npix_binned, ivar_binned, sres_binned, \
covar_binned = SpectralStack().stack(wave, flux, binid=binid, ivar=ivar, sres=sres)
ferr_binned = numpy.ma.power(ivar_binned, -0.5)
x_binned = numpy.array([numpy.mean(x[binid == i]) for i in numpy.unique(binid)])
y_binned = numpy.array([numpy.mean(y[binid == i]) for i in numpy.unique(binid)])
z_binned = numpy.array([numpy.mean(z[binid == i]) for i in numpy.unique(binid)])
dispersion_binned = numpy.array([numpy.mean(dispersion)])
if usr_plots:
for f in flux:
pyplot.plot(wave, f)
pyplot.plot(wave_binned, flux_binned[0])
pyplot.show()
#-------------------------------------------------------------------
# Fit the stellar continuum
# Mask the 5577 sky line and the emission lines
sc_pixel_mask = SpectralPixelMask(artdb=ArtifactDB.from_key('BADSKY'),
emldb=EmissionLineDB.from_key('ELPSCMSK'))
# Construct the template library
sc_tpl = TemplateLibrary(sc_tpl_key, match_resolution=False, velscale_ratio=velscale_ratio,
spectral_step=1e-4, log=True, hardcopy=False)
sc_tpl_sres = numpy.mean(sc_tpl['SPECRES'].data, axis=0).ravel()
# Instantiate the fitting class
ppxf = PPXFFit(StellarContinuumModelBitMask())
# Perform the fit
cont_wave, cont_flux, cont_mask, cont_par \
= ppxf.fit(sc_tpl['WAVE'].data.copy(), sc_tpl['FLUX'].data.copy(), wave_binned,
flux_binned, ferr_binned, z_binned, dispersion_binned,
iteration_mode='no_global_wrej', reject_boxcar=100,
ensemble=False, velscale_ratio=velscale_ratio, mask=sc_pixel_mask,
matched_resolution=False, tpl_sres=sc_tpl_sres, obj_sres=sres_binned,
degree=8, moments=2, plot=fit_plots)
# Remask the continuum fit
sc_continuum = StellarContinuumModel.reset_continuum_mask_window(
numpy.ma.MaskedArray(cont_flux, mask=cont_mask>0))
# Show the fit and residual
if usr_plots:
pyplot.plot(wave, flux_binned[0,:], label='Data')
pyplot.plot(wave, sc_continuum[0,:], label='Model')
pyplot.plot(wave, flux_binned[0,:] - sc_continuum[0,:], label='Resid')
pyplot.legend()
pyplot.xlabel('Wavelength')
pyplot.ylabel('Flux')
pyplot.show()
#-------------------------------------------------------------------
#-------------------------------------------------------------------
# Get the emission-line moments using the fitted stellar continuum
# Read the database that define the emission lines and passbands
momdb = EmissionMomentsDB.from_key(elmom_key)
# Measure the moments
elmom = EmissionLineMoments.measure_moments(momdb, wave, flux_binned, continuum=sc_continuum,
redshift=z_binned)
#-------------------------------------------------------------------
#-------------------------------------------------------------------
# Fit the emission-line model
# Set the emission-line continuum templates if different from those
# used for the stellar continuum
if sc_tpl_key == el_tpl_key:
# If the keywords are the same, just copy over the previous
# library ...
el_tpl = sc_tpl
el_tpl_sres = sc_tpl_sres
# ... and the best fitting stellar kinematics
stellar_kinematics = cont_par['KIN']
else:
# If the template sets are different, we need to match the
# spectral resolution to the galaxy data ...
_sres = SpectralResolution(wave, sres[0,:], log10=True)
el_tpl = TemplateLibrary(el_tpl_key, sres=_sres, velscale_ratio=velscale_ratio,
spectral_step=1e-4, log=True, hardcopy=False)
el_tpl_sres = numpy.mean(el_tpl['SPECRES'].data, axis=0).ravel()
# ... and use the corrected velocity dispersions.
stellar_kinematics = cont_par['KIN']
stellar_kinematics[:,1] = numpy.ma.sqrt(numpy.square(cont_par['KIN'][:,1]) -
numpy.square(cont_par['SIGMACORR_EMP']))
# Mask the 5577 sky line
el_pixel_mask = SpectralPixelMask(artdb=ArtifactDB.from_key('BADSKY'))
# Read the emission line fitting database
emldb = EmissionLineDB.from_key(elfit_key)
# Instantiate the fitting class
emlfit = Sasuke(EmissionLineModelBitMask())
# Perform the emission-line fit on each spectrum using the stellar
# kinematics from the stacked spectrum
eml_wave, model_flux, eml_flux, eml_mask, eml_fit_par, eml_eml_par \
= emlfit.fit(emldb, wave_binned, flux_binned, obj_ferr=ferr_binned,
obj_mask=el_pixel_mask, obj_sres=sres_binned,
guess_redshift=z_binned, guess_dispersion=dispersion_binned,
reject_boxcar=101, stpl_wave=el_tpl['WAVE'].data,
stpl_flux=el_tpl['FLUX'].data, stpl_sres=el_tpl_sres,
stellar_kinematics=stellar_kinematics, etpl_sinst_mode='offset',
etpl_sinst_min=10., velscale_ratio=velscale_ratio,
matched_resolution=False, mdegree=8, plot=fit_plots,
remapid=binid, remap_flux=flux, remap_ferr=ferr,
remap_mask=el_pixel_mask, remap_sres=sres, remap_skyx=x, remap_skyy=y,
obj_skyx=x_binned, obj_skyy=y_binned)
# Line-fit metrics
eml_eml_par = EmissionLineFit.line_metrics(emldb, wave, flux, ferr, model_flux, eml_eml_par,
model_mask=eml_mask, bitmask=emlfit.bitmask)
# Get the stellar continuum that was fit for the emission lines
elcmask = eml_mask > 0
edges = numpy.ma.notmasked_edges(numpy.ma.MaskedArray(model_flux, mask=elcmask), axis=1)
for i,s,e in zip(edges[0][0],edges[0][1],edges[1][1]):
elcmask[i,s:e+1] = False
el_continuum = numpy.ma.MaskedArray(model_flux - eml_flux, mask=elcmask)
# Plot the result
if usr_plots:
for i in range(flux.shape[0]):
pyplot.plot(wave, flux[i,:], label='Data')
pyplot.plot(wave, model_flux[i,:], label='Model')
pyplot.plot(wave, el_continuum[i,:], label='EL Cont.')
pyplot.plot(wave, sc_continuum[binid[i],:], label='SC Cont.')
pyplot.legend()
pyplot.xlabel('Wavelength')
pyplot.ylabel('Flux')
pyplot.show()
# Remeasure the emission-line moments with the new continuum
new_elmom = EmissionLineMoments.measure_moments(momdb, wave, flux, continuum=el_continuum,
redshift=z)
# Compare the summed flux and Gaussian-fitted flux for all the
# fitted lines
if usr_plots:
pyplot.scatter(emldb['restwave'], new_elmom['FLUX'][0,:]-eml_eml_par['FLUX'][0,:],
c=eml_eml_par['FLUX'][0,:], cmap='viridis', marker='.', s=60, lw=0,
zorder=4)
pyplot.grid()
pyplot.xlabel('Wavelength')
pyplot.ylabel('Summed-Gaussian Difference')
pyplot.show()
# TODO: Add the spectral index calls...
print('Elapsed time: {0} seconds'.format(time.perf_counter() - t))
|
sdssREPO_NAMEmangadapPATH_START.@mangadap_extracted@mangadap-main@examples@fit_binned_spec.py@.PATH_END.py
|
{
"filename": "faces.md",
"repo_name": "youngjookim/sdr",
"repo_path": "sdr_extracted/sdr-master/Code/packages/tapkee-master/examples/faces/faces.md",
"type": "Markdown"
}
|
In this example we embed images of faces with
the diffusion map algorithm. The dataset is
pretty small (only 50 vectors) and doesn`t
have explicit latent variables beneath -
this makes it hard for the algorithm to
find an efficient 2d representation.
|
youngjookimREPO_NAMEsdrPATH_START.@sdr_extracted@sdr-master@Code@packages@tapkee-master@examples@faces@faces.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/linalg/__init__.py",
"type": "Python"
}
|
"""
====================================
Linear algebra (:mod:`scipy.linalg`)
====================================
.. currentmodule:: scipy.linalg
.. toctree::
:hidden:
linalg.blas
linalg.cython_blas
linalg.cython_lapack
linalg.interpolative
linalg.lapack
Linear algebra functions.
.. eventually, we should replace the numpy.linalg HTML link with just `numpy.linalg`
.. seealso::
`numpy.linalg <https://www.numpy.org/devdocs/reference/routines.linalg.html>`__
for more linear algebra functions. Note that
although `scipy.linalg` imports most of them, identically named
functions from `scipy.linalg` may offer more or slightly differing
functionality.
Basics
======
.. autosummary::
:toctree: generated/
inv - Find the inverse of a square matrix
solve - Solve a linear system of equations
solve_banded - Solve a banded linear system
solveh_banded - Solve a Hermitian or symmetric banded system
solve_circulant - Solve a circulant system
solve_triangular - Solve a triangular matrix
solve_toeplitz - Solve a toeplitz matrix
matmul_toeplitz - Multiply a Toeplitz matrix with an array.
det - Find the determinant of a square matrix
norm - Matrix and vector norm
lstsq - Solve a linear least-squares problem
pinv - Pseudo-inverse (Moore-Penrose) using lstsq
pinvh - Pseudo-inverse of hermitian matrix
kron - Kronecker product of two arrays
khatri_rao - Khatri-Rao product of two arrays
orthogonal_procrustes - Solve an orthogonal Procrustes problem
matrix_balance - Balance matrix entries with a similarity transformation
subspace_angles - Compute the subspace angles between two matrices
bandwidth - Return the lower and upper bandwidth of an array
issymmetric - Check if a square 2D array is symmetric
ishermitian - Check if a square 2D array is Hermitian
LinAlgError
LinAlgWarning
Eigenvalue Problems
===================
.. autosummary::
:toctree: generated/
eig - Find the eigenvalues and eigenvectors of a square matrix
eigvals - Find just the eigenvalues of a square matrix
eigh - Find the e-vals and e-vectors of a Hermitian or symmetric matrix
eigvalsh - Find just the eigenvalues of a Hermitian or symmetric matrix
eig_banded - Find the eigenvalues and eigenvectors of a banded matrix
eigvals_banded - Find just the eigenvalues of a banded matrix
eigh_tridiagonal - Find the eigenvalues and eigenvectors of a tridiagonal matrix
eigvalsh_tridiagonal - Find just the eigenvalues of a tridiagonal matrix
Decompositions
==============
.. autosummary::
:toctree: generated/
lu - LU decomposition of a matrix
lu_factor - LU decomposition returning unordered matrix and pivots
lu_solve - Solve Ax=b using back substitution with output of lu_factor
svd - Singular value decomposition of a matrix
svdvals - Singular values of a matrix
diagsvd - Construct matrix of singular values from output of svd
orth - Construct orthonormal basis for the range of A using svd
null_space - Construct orthonormal basis for the null space of A using svd
ldl - LDL.T decomposition of a Hermitian or a symmetric matrix.
cholesky - Cholesky decomposition of a matrix
cholesky_banded - Cholesky decomp. of a sym. or Hermitian banded matrix
cho_factor - Cholesky decomposition for use in solving a linear system
cho_solve - Solve previously factored linear system
cho_solve_banded - Solve previously factored banded linear system
polar - Compute the polar decomposition.
qr - QR decomposition of a matrix
qr_multiply - QR decomposition and multiplication by Q
qr_update - Rank k QR update
qr_delete - QR downdate on row or column deletion
qr_insert - QR update on row or column insertion
rq - RQ decomposition of a matrix
qz - QZ decomposition of a pair of matrices
ordqz - QZ decomposition of a pair of matrices with reordering
schur - Schur decomposition of a matrix
rsf2csf - Real to complex Schur form
hessenberg - Hessenberg form of a matrix
cdf2rdf - Complex diagonal form to real diagonal block form
cossin - Cosine sine decomposition of a unitary or orthogonal matrix
.. seealso::
`scipy.linalg.interpolative` -- Interpolative matrix decompositions
Matrix Functions
================
.. autosummary::
:toctree: generated/
expm - Matrix exponential
logm - Matrix logarithm
cosm - Matrix cosine
sinm - Matrix sine
tanm - Matrix tangent
coshm - Matrix hyperbolic cosine
sinhm - Matrix hyperbolic sine
tanhm - Matrix hyperbolic tangent
signm - Matrix sign
sqrtm - Matrix square root
funm - Evaluating an arbitrary matrix function
expm_frechet - Frechet derivative of the matrix exponential
expm_cond - Relative condition number of expm in the Frobenius norm
fractional_matrix_power - Fractional matrix power
Matrix Equation Solvers
=======================
.. autosummary::
:toctree: generated/
solve_sylvester - Solve the Sylvester matrix equation
solve_continuous_are - Solve the continuous-time algebraic Riccati equation
solve_discrete_are - Solve the discrete-time algebraic Riccati equation
solve_continuous_lyapunov - Solve the continuous-time Lyapunov equation
solve_discrete_lyapunov - Solve the discrete-time Lyapunov equation
Sketches and Random Projections
===============================
.. autosummary::
:toctree: generated/
clarkson_woodruff_transform - Applies the Clarkson Woodruff Sketch (a.k.a CountMin Sketch)
Special Matrices
================
.. autosummary::
:toctree: generated/
block_diag - Construct a block diagonal matrix from submatrices
circulant - Circulant matrix
companion - Companion matrix
convolution_matrix - Convolution matrix
dft - Discrete Fourier transform matrix
fiedler - Fiedler matrix
fiedler_companion - Fiedler companion matrix
hadamard - Hadamard matrix of order 2**n
hankel - Hankel matrix
helmert - Helmert matrix
hilbert - Hilbert matrix
invhilbert - Inverse Hilbert matrix
leslie - Leslie matrix
pascal - Pascal matrix
invpascal - Inverse Pascal matrix
toeplitz - Toeplitz matrix
Low-level routines
==================
.. autosummary::
:toctree: generated/
get_blas_funcs
get_lapack_funcs
find_best_blas_type
.. seealso::
`scipy.linalg.blas` -- Low-level BLAS functions
`scipy.linalg.lapack` -- Low-level LAPACK functions
`scipy.linalg.cython_blas` -- Low-level BLAS functions for Cython
`scipy.linalg.cython_lapack` -- Low-level LAPACK functions for Cython
""" # noqa: E501
from ._misc import *
from ._cythonized_array_utils import *
from ._basic import *
from ._decomp import *
from ._decomp_lu import *
from ._decomp_ldl import *
from ._decomp_cholesky import *
from ._decomp_qr import *
from ._decomp_qz import *
from ._decomp_svd import *
from ._decomp_schur import *
from ._decomp_polar import *
from ._matfuncs import *
from .blas import *
from .lapack import *
from ._special_matrices import *
from ._solvers import *
from ._procrustes import *
from ._decomp_update import *
from ._sketches import *
from ._decomp_cossin import *
# Deprecated namespaces, to be removed in v2.0.0
from . import (
decomp, decomp_cholesky, decomp_lu, decomp_qr, decomp_svd, decomp_schur,
basic, misc, special_matrices, matfuncs,
)
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@linalg@__init__.py@.PATH_END.py
|
{
"filename": "UVLF_IllustrisTNG.py",
"repo_name": "NNSSA/GALLUMI_public",
"repo_path": "GALLUMI_public_extracted/GALLUMI_public-main/Scripts/UVLF_IllustrisTNG/UVLF_IllustrisTNG.py",
"type": "Python"
}
|
import numpy as np
import h5py
import galcv
data = {}
models = ["A", "B", "C"]
simulations = ["TNG100-1", "TNG300-1", "TNG50-1", "combined"]
redshifts = {33:2, 25:3, 21:4, 17:5, 13:6, 11:7, 8:8, 6:9, 4:10}
quantities = ["bincenters", "luminosity_function", "number_count", "lf_combined"]
## Extract data and store in dictionary "data"
with h5py.File("UVLF_TNG_MV2019.hdf5", "r") as f:
for model in list(f.keys()):
data[model] = {}
for simu in f[model]:
data[model][simu] = {}
for redshift in f[model][simu]:
redshift_translated = redshifts[float(redshift)]
data[model][simu][redshift_translated] = {}
for quantity in f[model][simu][redshift]:
data[model][simu][redshift_translated][quantity] = f[model][simu][redshift][quantity][()]
## This function calls the data
def sim(model, simulation, redshift, quantity):
return data[model][simulation][redshift][quantity]
## Total magnitude range
magnitudes = sim("A", "TNG300-1", 4, "bincenters")
bin_width = np.diff(magnitudes)[-1]
## Compute volume (Mpc^3 x mag_bin_size) of each simulation
vol_300 = (sim("A", "TNG300-1", 4, "number_count") / 10**sim("A", "TNG300-1", 4, "luminosity_function"))[-1]
vol_100 = (sim("A", "TNG100-1", 4, "number_count") / 10**sim("A", "TNG100-1", 4, "luminosity_function"))[-1]
vol_50 = (sim("A", "TNG50-1", 4, "number_count") / 10**sim("A", "TNG50-1", 4, "luminosity_function"))[-1]
## Extract number counts or LF of each simulation
def data_slices(model, redshift, number=True, individual=True):
if individual:
if number:
return sim(model, "TNG300-1", redshift, "number_count"), sim(model, "TNG100-1", redshift, "number_count"), sim(model, "TNG50-1", redshift, "number_count")
return sim(model, "TNG300-1", redshift, "number_count") / vol_300, sim(model, "TNG100-1", redshift, "number_count") / vol_100, sim(model, "TNG50-1", redshift, "number_count") / vol_50
if number:
return np.concatenate((
sim(model, "TNG300-1", redshift, "number_count"),
sim(model, "TNG100-1", redshift, "number_count"),
sim(model, "TNG50-1", redshift, "number_count")
))
return np.concatenate((
sim(model, "TNG300-1", redshift, "number_count") / vol_300,
sim(model, "TNG100-1", redshift, "number_count") / vol_100,
sim(model, "TNG50-1", redshift, "number_count") / vol_50
))
## Integrating tools
# Define order of Gaussian quadrature integration
points, weights = np.polynomial.legendre.leggauss(50)
# Gaussian quadrature integrator
def integrator(f, a, b):
sub = (b - a) / 2.
add = (b + a) / 2.
if sub == 0:
return 0.
return sub * np.dot(f(sub * points + add), weights)
## Comoving Angular diameter distance
def D_A(z, Omega_m=0.3089, h=0.6774):
return integrator(lambda x: 1/np.sqrt(Omega_m * np.power(1 + x,3) + 1. - Omega_m), 0., z) * 299792.458 / (100. * h)
## Redshift bin width of each simulation box
def delta_z(z, Lbox, Omega_m=0.3089, h=0.6774):
return Lbox * 100. * h * np.sqrt(Omega_m * np.power(1 + z,3) + 1. - Omega_m) / 299792.458
## Compute cosmic variance using the galcv code
def cosmic_variance(model, redshift):
# These are the LFs from each simulation
lf300, lf100, lf50 = data_slices(model,redshift,number=False)
# Compute the effective areas in arcmin^2
areas = [(Lbox/D_A(redshift))**2 * (180*60/np.pi)**2 for Lbox in [302.627694125,110.71744907,51.6681428993]]
# galcv can't compute the CV at z = 4, so we use the CV at z = 5 for it (which is a conservative approach)
redshift = max(5, redshift)
# Compute cosmic variance with galcv
cv_300 = np.array(galcv.getcv(mag=magnitudes, area=areas[0], z=redshift, zW=max(0.1, delta_z(redshift, 302.627694125)), appOrAbs="absolute", interpWarning=0))
cv_100 = np.array(galcv.getcv(mag=magnitudes, area=areas[1], z=redshift, zW=max(0.1, delta_z(redshift, 110.71744907)), appOrAbs="absolute", interpWarning=0))
cv_50 = np.array(galcv.getcv(mag=magnitudes, area=areas[2], z=redshift, zW=max(0.1, delta_z(redshift, 51.6681428993)), appOrAbs="absolute", interpWarning=0))
# In some cases galcv gives nan, e.g., if magnitudes are too bright. Therefore we use the largest error there (which is dominated by poisson error anyway)
cv_300[np.isnan(cv_300)] = max(cv_300[np.isfinite(cv_300)])
cv_100[np.isnan(cv_100)] = max(cv_100[np.isfinite(cv_100)])
cv_50[np.isnan(cv_50)] = max(cv_50[np.isfinite(cv_50)])
# Minimal error in cosmic variance
minimal = 0.05
err_300 = np.array(list(map(max,zip(np.repeat(minimal, len(cv_300)), cv_300))))
err_100 = np.array(list(map(max,zip(np.repeat(minimal, len(cv_100)), cv_100))))
err_50 = np.array(list(map(max,zip(np.repeat(minimal, len(cv_50)), cv_50))))
return err_300 * lf300, err_100 * lf100, err_50 * lf50
## Compute Poisson error
def Poisson_error(model, redshift):
# These are the number of galaxies from each simulation
num300, num100, num50 = data_slices(model,redshift)
return np.sqrt(num300)/vol_300, np.sqrt(num100)/vol_100, np.sqrt(num50)/vol_50
## Return combined LF
def data_combined(model, redshift, original=False):
# Return the original, combined UVLF from the raw data
if original:
return 10**sim(model, "combined", redshift, "lf_combined")
# Positions of where we want to transition from one simulation to the other - pos300 or pos100 corresponds...
# ...to the magnitude M_i where the number count of galaxies in the simulation peaks at M_i+2
pos300 = np.argmax(sim(model, "TNG300-1", redshift, "number_count")) - 1
pos100 = np.argmax(sim(model, "TNG100-1", redshift, "number_count")) - 1
# These are the LFs from each simulation
lf_300, lf_100, lf_50 = data_slices(model, redshift, number=False, individual=True)
# set LFs equal to 0 where simulation statistics become relevant (at peak of number of galaxies)
lf_300[pos300:] = 0.
lf_100[pos100:] = 0.
# These are the Poisson errors
poisson_error_300, poisson_error_100, poisson_error_50 = Poisson_error(model, redshift)
# These are the cosmic variances
cv_error_300, cv_error_100, cv_error_50 = cosmic_variance(model,redshift)
# Combine Poisson error and cosmic variance
error_300 = np.sqrt(poisson_error_300**2 + cv_error_300**2)
error_100 = np.sqrt(poisson_error_100**2 + cv_error_100**2)
error_50 = np.sqrt(poisson_error_50**2 + cv_error_50**2)
# Add a minimal error of 20% in each simulation data to account for simulation statistics
min_error = 0.2
error_300 = np.array(list(map(max,zip(min_error * lf_300, error_300))))
error_100 = np.array(list(map(max,zip(min_error * lf_100, error_100))))
error_50 = np.array(list(map(max,zip(min_error * lf_50, error_50))))
error_300[pos300:] = 0.
error_100[pos100:] = 0.
# Compute inverse errors
inv_error_300 = 1/error_300
inv_error_100 = 1/error_100
inv_error_50 = 1/error_50
# Set inverse error equal to 0 where it's infinite
inv_error_300[np.isinf(inv_error_300)] = 0.
inv_error_100[np.isinf(inv_error_100)] = 0.
inv_error_50[np.isinf(inv_error_50)] = 0.
error_tot = 1/np.sqrt(inv_error_300**2 + inv_error_100**2 + inv_error_50**2)
error_tot[np.isinf(error_tot)] = 0.
# inverse variance method
lf_tot = error_tot * np.sqrt((lf_300 * inv_error_300)**2 + (lf_100 * inv_error_100)**2 + (lf_50 * inv_error_50)**2)
return lf_tot, error_tot
## Construct array for output
for_output = []
for z in [4,5,6,7,8,9,10]:
if z < 6.5:
MUV_cutoff = -16.
elif z < 8.5:
MUV_cutoff = -16.5
else:
MUV_cutoff = -16.75
lfs, errors = data_combined("A",z)
for num, LF in enumerate(zip(lfs, errors)):
if np.isfinite(LF[0]) and magnitudes[num] <= MUV_cutoff and LF[0] != 0.:
for_output.append((z, magnitudes[num], bin_width, LF[0], LF[1]))
np.savetxt("UVLF_IllustrisTNG.txt", np.array(for_output))
|
NNSSAREPO_NAMEGALLUMI_publicPATH_START.@GALLUMI_public_extracted@GALLUMI_public-main@Scripts@UVLF_IllustrisTNG@UVLF_IllustrisTNG.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "ChemaPalencia/M_SMiLe",
"repo_path": "M_SMiLe_extracted/M_SMiLe-main/README.md",
"type": "Markdown"
}
|
# Magnification Statistics of Micro-Lensing (M_SMiLe)
This repository contains code that computes an approximation of the probability of magnification for a lens system consisting of microlensing by compact objects within a galaxy cluster.
The code specifically focuses on the scenario where the galaxy cluster is strongly lensing a background galaxy, and the compact objects, such as stars, are sensitive to this microlensing effect.
The microlenses responsible for this effect are stars and stellar remnants, but also exotic objects such as compact dark matter candidates (PBHs, axion mini-halos...) can contribute to this effect.
More information about this code can be obtained from the paper: [Statistics of magnification for extremely lensed high redshift stars](https://arxiv.org/abs/2307.09505)
# Input parameters
This code generates the magnification probability for a system with the desired input parameters, these are:
$z_{\rm s}$: Redshift of the source plane.
$z_{\rm d}$: Redshift of the lens plane. These two combined give, assuming an standard $\Lambda\rm{CDM}$ cosmology, the critical surface mass density, $\Sigma_{\rm crit}$, of the system.
$\mu_{\rm r}$: Radial macro-magnification of the strongly lensed images of the source.
$\mu_{\rm t}$: Tangential macro-magnification of the strongly lensed images of the source. Can be either positive or negative.
$\Sigma_{\ast}$: Surface mass density of microlenses. The product with $\left|\mu_{\rm t}\right|$ gives the effective surface mass density, $\Sigma_{\rm eff}$, that determines the model used to compute the magnification probability.
$\mu_1$: Lower limit to compute the magnification probability.
$\mu_2$: Upper limit to compute the magnification probability.
# Outputs
The magnification probability values at different magnification bins are saved to a file of the desired extension, as a two-column .txt file, as a fits table, or as an hdf5 group with two data sets.
In addition, users have the option to generate a plot of the magnification probability curves saved as a .pdf file.
# Installation
To use this code you need Python. This code has been written and tested with Python 3.9 but older versions should work.
To install and use this code, follow the steps below:
1. Starting the terminal
2. Clone the repository:
```
$ https://github.com/ChemaPalencia/M_SMiLe.git
```
3. Change into the project directory:
```
$ cd M_SMiLe
```
4. Install the required dependencies. It is recommended to set up a virtual environment before installing the dependencies to avoid conflicts with other Python packages:
```
$ pip install -r requirements.txt
```
or
```
$ !conda install --file requirements.txt
```
# Usage
This code can be used in two independent ways:
* Via terminal.
This code can work as a black box that takes the necessary inputs and generated different files with the desire input.
A detailed description of all the parameters and its effects can be obatined trough:
```
$ python M_SMiLe.py -h
usage: M-SMiLe.py [-h] [--mu1 mu1] [--mu2 mu2] [--dir [DIR]] [--plot plot]
[--save save] [--extension extension]
mu_t mu_r sigma_star zd zs
Given a set of parameters regarding an extragalactic microlensing scheme, this
program computes the probability of magnification in a given range.
positional arguments:
mu_t Value of the tangential macro-magnification.
mu_r Value of the radial macro-magnification.
sigma_star Surface mass density of microlenses [Msun/pc2].
zd Redshift at the lens plane (cluster).
zs Redshift at the source plane.
optional arguments:
-h, --help show this help message and exit
--mu1 mu1 Minimum magnification to display the pdf.
--mu2 mu2 Maximum magnification to display the pdf.
--dir [DIR] Directory where the results will be stored.
--plot plot If "True", plot and save the pdf.
--save save If "True", save the pdf in a file.
--extension extension
If save, extension in which the data is saved (txt,
fits, h5).
Contact: palencia@ifca.unican.es / jpalenciasainz@gmail.com
```
Usage example:
```
$ python M_SMiLe.py -600 2 5 1 1.7 --dir /foo/bar/test/ --save False --mu2 1000
```
* As a python class.
Any python program can import the class microlenses from `M_SMiLe.py`.
Once we have imported the class we can create and instance of an object and call its methods to save the data in different files, generate plots, or directly get **numpy** arrays with the value of the magnification probability.
```python
# Import the microlenses class from M_SMiLe.py
from M_SMiLe import microlenses
# Create an object of the class microlenses with the desired inputs
microlens = microlenses(mu_t=200, mu_r=4, sigma_star=12.4, zs=1.3, zd=0.7, mu1=1e-3, mu2=1e5)
# Get magnification probability per logaritmic bin
pdf, log_mu = microlens.get_pdf()
# Save data in a file (h5, txt, fits). Can choose another path.
microlens.save_data(extension='fits')
# Save a plot.
microlens.plot(save_pic=True)
```
A detailed example of some of the capabilities of the code is shown in the script ```example.py```.
# Output examples
[Neg_parity_High_sigma.pdf](https://github.com/ChemaPalencia/M_SMiLe/files/12039962/Neg_parity_High_sigma.pdf)
[Neg_parity_Low_sigma.pdf](https://github.com/ChemaPalencia/M_SMiLe/files/12039963/Neg_parity_Low_sigma.pdf)
[Pos_parity_High_sigma.pdf](https://github.com/ChemaPalencia/M_SMiLe/files/12039964/Pos_parity_High_sigma.pdf)
[Pos_parity_Low_sigma.pdf](https://github.com/ChemaPalencia/M_SMiLe/files/12039965/Pos_parity_Low_sigma.pdf)
# License
This project is licensed under the **MIT License**. Feel free to use and modify the code according to the terms specified in the license.
# Citation
If you use the M_SMiLe code, please tell us and cite its release paper [Statistics of magnification for extremely lensed high redshift stars](https://arxiv.org/abs/2307.09505) as
```
Palencia, J. M., Diego, J. M., Kavanagh, B. J., & Martinez, J. 2023, arXiv eprints, arXiv:2307.09505. https://arxiv.org/abs/2307.09505
```
The corresponding bibtex is:
```
@ARTICLE{2023arXiv230709505P,
author = {{Palencia}, J.~M. and {Diego}, J.~M. and {Kavanagh}, B.~J. and {Martinez}, J.},
title = "{Statistics of magnification for extremely lensed high redshift stars}",
journal = {arXiv e-prints},
keywords = {Astrophysics - Cosmology and Nongalactic Astrophysics, Astrophysics - Astrophysics of Galaxies, High Energy Physics - Phenomenology},
year = 2023,
month = jul,
eid = {arXiv:2307.09505},
pages = {arXiv:2307.09505},
archivePrefix = {arXiv},
eprint = {2307.09505},
primaryClass = {astro-ph.CO},
adsurl = {https://ui.adsabs.harvard.edu/abs/2023arXiv230709505P},
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
```
# Contact
If you have any questions or inquiries regarding this code or its usage, please contact palencia@ifca.unican.es or jpalenciasainz@gmail.com
We hope this code proves to be useful in your research and exploration of magnification probability of high redshift stars by galaxy clusters. Happy computing!
|
ChemaPalenciaREPO_NAMEM_SMiLePATH_START.@M_SMiLe_extracted@M_SMiLe-main@README.md@.PATH_END.py
|
{
"filename": "test_config.py",
"repo_name": "transientskp/tkp",
"repo_path": "tkp_extracted/tkp-master/tests/test_config.py",
"type": "Python"
}
|
import unittest
import os
import getpass
import datetime
import io
from configparser import ConfigParser
from tkp.testutil.data import (default_job_config, default_pipeline_config,
default_header_inject_config)
from tkp.config import (dt_w_microsecond_format, parse_to_dict,
get_database_config, initialize_pipeline_config)
DUMMY_VALUE = "dummy"
DUMMY_INT = "1234"
class TestParsingCode(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Create manually parsed dict to check against"""
example_config_str = """\
[test1]
float = 0.5
; some comment
int = 1
string1 = bob
; default to string when no other parsers work
time1 = 2007-07-20T14:18:09.909001
; times are parsed if matching this format
#But must have at least a single digit after the decimal point:
time2 = 2007-07-20T14:18:09.0
[test2]
string2 = "0.235"
; force string by enclosing in double-quotes
"""
cls.config = ConfigParser()
cls.config.read_file(io.StringIO(example_config_str))
cls.parset = {}
cls.parset['test1']= {
'float':0.5,
'int':1,
'string1':'bob',
'time1':datetime.datetime.strptime('2007-07-20T14:18:09.909001',
dt_w_microsecond_format),
'time2':datetime.datetime.strptime('2007-07-20T14:18:09.0',
dt_w_microsecond_format)
}
cls.parset['test2']= {'string2':'0.235'}
def test_parser(self):
parsed = parse_to_dict(self.config)
self.assertEqual(parsed, self.parset)
class TestConfigParsing(unittest.TestCase):
"""Ensure that the default config files get parsed as expected"""
def test_default_job_config(self):
c = ConfigParser()
c.read(default_job_config)
job_config = parse_to_dict(c)
def test_default_pipeline_config(self):
pipe_config = initialize_pipeline_config(default_pipeline_config, 'test')
def test_default_inject_config(self):
c = ConfigParser()
c.read(default_header_inject_config)
inject_config = parse_to_dict(c)
class DatabaseConfigTestCase(unittest.TestCase):
def setUp(self):
# Wipe out any pre-existing environment settings
self.old_environment = os.environ.copy()
os.environ.pop("TKP_DBENGINE", None)
os.environ.pop("TKP_DBNAME", None)
os.environ.pop("TKP_DBUSER", None)
os.environ.pop("TKP_DBPASSWORD", None)
os.environ.pop("TKP_DBHOST", None)
os.environ.pop("TKP_DBPORT", None)
self.pipeline_cfg = initialize_pipeline_config(default_pipeline_config,
'test')
def tearDown(self):
os.environ = self.old_environment
def test_unconfigured(self):
# Should *not* raise.
get_database_config()
def test_invalid_dbengine(self):
# Should *not* raise; database_config does not sanity check.
os.environ["TKP_DBENGINE"] = DUMMY_VALUE
get_database_config()
def test_defaults_postgresql(self):
# Demonstrate that we get the expected default values
os.environ["TKP_DBENGINE"] = "postgresql"
username = getpass.getuser()
db_config = get_database_config()
self.assertEqual(db_config['engine'], "postgresql")
self.assertEqual(db_config['database'], username)
self.assertEqual(db_config['user'], username)
self.assertEqual(db_config['password'], username)
self.assertEqual(db_config['host'], "localhost")
self.assertEqual(db_config['port'], 5432)
def test_env_vars(self):
# Demonstrate that we correctly read the environment
os.environ["TKP_DBENGINE"] = "monetdb"
os.environ["TKP_DBNAME"] = DUMMY_VALUE
os.environ["TKP_DBUSER"] = DUMMY_VALUE
os.environ["TKP_DBPASSWORD"] = DUMMY_VALUE
os.environ["TKP_DBHOST"] = DUMMY_VALUE
os.environ["TKP_DBPORT"] = DUMMY_INT
db_config = get_database_config(self.pipeline_cfg['database'])
self._test_for_dummy_values(db_config)
def test_use_username_as_default(self):
# database name and password default to the username
os.environ["TKP_DBUSER"] = DUMMY_VALUE
os.environ["TKP_DBENGINE"] = "monetdb"
os.environ["TKP_DBHOST"] = DUMMY_VALUE
os.environ["TKP_DBPORT"] = DUMMY_INT
db_config = get_database_config(self.pipeline_cfg['database'])
self._test_for_dummy_values(db_config)
def _test_for_dummy_values(self, db_config):
self.assertEqual(db_config['engine'], "monetdb")
self.assertEqual(db_config['database'], DUMMY_VALUE)
self.assertEqual(db_config['user'], DUMMY_VALUE)
self.assertEqual(db_config['password'], DUMMY_VALUE)
self.assertEqual(db_config['host'], DUMMY_VALUE)
self.assertEqual(db_config['port'], int(DUMMY_INT))
|
transientskpREPO_NAMEtkpPATH_START.@tkp_extracted@tkp-master@tests@test_config.py@.PATH_END.py
|
{
"filename": "proxy.py",
"repo_name": "sdss/cluplus",
"repo_path": "cluplus_extracted/cluplus-main/python/cluplus/proxy.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
#
# @Author: Florian Briegel (briegel@mpia.de)
# @Date: 2021-08-18
# @Filename: proxy.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
from __future__ import annotations
import sys
import uuid
import os
import fnmatch
import asyncio
from os.path import basename
from socket import gethostname
from functools import partial
from itertools import chain
from typing import Callable, Optional
from collections.abc import MutableMapping
from shlex import quote
import json
from inspect import getcoroutinelocals, iscoroutine
from clu import AMQPClient, AMQPReply, BaseClient, CommandStatus
from .exceptions import ProxyPartialInvokeException, ProxyActorIsNotReachableException, ProxyUnpackKeysNotAllFoundException
class Client(AMQPClient):
"""An amqpc client with enviroment support.
"""
def __init__(self, **kwargs):
""" init """
kwargs = {"url": os.getenv("RMQ_URL", None), "host": os.getenv("RMQ_HOST", "localhost"), **kwargs}
name = f"{gethostname()}_{basename(sys.argv[0])}-{uuid.uuid4().hex[:8]}"
AMQPClient.__init__(self, name=name, **kwargs)
class Proxy():
"""A proxy client with actor commands.
"""
__commands = "__commands"
__commands_key = "help"
__pull_commands_task = "_pull_commands_task"
__amqpc = None
pull_commands_delay = 2
pull_commands_attempts = 42
def __init__(self, actor:str, amqpc:BaseClient = None, **kwargs):
""" init """
self.actor = actor
self.amqpc = amqpc
if not self.amqpc:
if Proxy.__amqpc:
self.amqpc = Proxy.__amqpc
else:
self.amqpc = Proxy.__amqpc = Client(**kwargs)
async def start(self, amqpc:BaseClient = None):
"""Query and set actor commands."""
if amqpc:
if self.amqpc:
await self.amqpc.stop()
self.amqpc = amqpc
if not self.isAmqpcConnected():
await self.amqpc.start()
await self._pull_commands()
return self
async def stop(self):
"""stop actor"""
await self.__delattr_pull_commands_task(cancel=True)
@staticmethod
def setDefaultAmqpc(amqpc):
Proxy.__amqpc = amqpc
def __getattr__(self, attr):
# order is important !
if attr != Proxy.__pull_commands_task and hasattr(self, Proxy.__pull_commands_task):
return partial(self.call_command, attr)
return super(Proxy, self).__getattribute__(attr)
async def __delattr_pull_commands_task(self, cancel=False):
lock = asyncio.Lock()
async with lock:
if hasattr(self, Proxy.__pull_commands_task):
if cancel:
self._pull_commands_task.cancel()
try:
await self._pull_commands_task
except asyncio.exceptions.CancelledError as ex:
self.amqpc.log.debug(f"error {ex}")
delattr(self ,Proxy.__pull_commands_task)
async def _pull_commands(self, delay = 0, attempts = 1):
for c in range(attempts):
try:
await asyncio.sleep(delay)
reply = await self.call_command(Proxy.__commands)
commands = reply[Proxy.__commands_key] if isinstance(reply, dict) else reply.help
for c in commands:
setattr(self, c, partial(self.call_command, c))
# setattr(self, f"nowait_{c}", partial(self.call_command, c, nowait=True))
await self.__delattr_pull_commands_task()
return
except Exception as ex:
if not delay:
self.amqpc.log.warning(f"actor {self.actor} currently not reachable.")
if not hasattr(self, Proxy.__pull_commands_task):
self.amqpc.log.debug(f"actor {self.actor} connect as background task.")
self._pull_commands_task = self.amqpc.loop.create_task(self._pull_commands(Proxy.pull_commands_delay, Proxy.pull_commands_attempts))
return
self.amqpc.log.debug(f"actor {self.actor} connect attempts stopped.")
await self.__delattr_pull_commands_task()
def isAmqpcConnected(self):
if not self.amqpc.connection.connection:
return False
return not self.amqpc.connection.connection.is_closed
async def _handle_command_reply(self, fu):
reply = await fu
if hasattr(reply, "status") and reply.status.did_fail:
raise self._errorMapToException(reply.replies[-1].message['error'])
msg = ProxyDict(reply.replies[-1].message)
msg.sender = reply.actor
return msg
def _handle_callback(self, callback: Optional[Callable[[AMQPReply], None]], reply: AMQPReply):
msg = ProxyDict(json.loads(reply.message.body))
msg.command_status = CommandStatus.code_to_status(reply.message_code)
msg.sender = reply.sender
callback(msg)
async def call_command(self,
command: str,
*args,
callback: Optional[Callable[[dict], None]] = None,
time_limit: Optional[float] = 42.0,
nowait:Bool = False,
nosync:Bool = False,
object_hook: Optional[Callable[[AMQPReply], None]] = None,
**kwargs):
def encode(v):
if isinstance(v, (int, float, bool)): return v
elif isinstance(v, str): return v if v[0] in "'\"" and v[-1] in "'\"" else quote(v)
return f"'{json.dumps(v)}'"
args = [encode(v) for v in args] \
+ list(chain.from_iterable(('--' + k, encode(v))
for k, v in kwargs.items()))
fu = await self.amqpc.send_command(self.actor,
command,
*args,
callback=partial(self._handle_callback, callback) if callback else None,
time_limit=time_limit)
if nosync: return
if nowait: return self._handle_command_reply(fu)
return await self._handle_command_reply(fu)
@staticmethod
def _errorMapToException(em):
if isinstance(em, dict):
return Proxy._stringToException(em['exception_message'],
em['exception_type'],
em['exception_module'])
return Exception(em)
@staticmethod
def _exceptionToMap(ex):
return { "exception_module": ex.__class__.__module__, "exception_type": ex.__class__.__name__, "exception_message": str(ex) }
@staticmethod
def _stringToException(sval, tn='Exception', mn='builtins'):
try:
module = sys.modules[mn] if mn in sys.modules \
else __import__(mn, fromlist=tn)
return getattr(module, tn)(sval)
except AttributeError:
return Exception(f'Unknown exception type {tn}-{sval}')
except ImportError:
return Exception(f'Unknown module type {mn}-{tn}:{sval}')
async def invoke(*cmds, return_exceptions:Bool=False):
"""invokes one or many commands in parallel
On error it throws an exception if one of the commands fails as a dict
with an exception and return values for every command.
"""
def isProxy(p):
return 'self' in p.cr_frame.f_locals and isinstance(p.cr_frame.f_locals['self'], Proxy)
actors=[c.cr_frame.f_locals['self'].actor if isProxy(c) else 'local' for c in cmds]
ret = await asyncio.gather(*cmds, return_exceptions=True)
def _format(r):
if isinstance(r, dict): return ProxyDict(r)
elif isinstance(r, Exception): return ProxyDict({'error': r})
else: return r
ret = ProxyListOfDicts([_format(r) for r in ret])
ret.actors = actors
if not return_exceptions:
for r in ret:
if isinstance(r, dict) and "error" in r.keys():
raise ProxyPartialInvokeException(*ret)
return ret
def unpack(data, *keys, as_seq:bool=False, exception_on_missing_keys:bool=False):
""" unpacks every parameter from the message of the finish reply or list of replies.
Pythons list unpacking mechanism PEP3132 can be used to assign the value(s)
Be warned if you dont use it the correct way,
whould also be a good place to check the reply message format with schemas.
>>> a, b, c = [1, 2, 3]
>>> a
1
>>> a = [1, 2, 3]
>>> a
[1, 2, 3]
>>> a, b = [1, 2, 3]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ValueError: too many values to unpack (expected 2)
>>> a, *b = [1, 2, 3]
>>> a
1
>>> b
[2, 3]
Parameters
----------
keys
return only the parameters from keys
"""
if len(data) == 0:
return
def unpacking(data:list, as_seq:bool=False):
if len(data) > 1 or as_seq: return data
return data[0] if len(data) else None
if isinstance(data, list):
if len(keys) > 0:
rkeys = [k for r in data for k in list(r.keys())]
if exception_on_missing_keys:
bkeys = [k for k in keys if not fnmatch.filter(rkeys, k)]
if bkeys:
raise ProxyUnpackKeysNotAllFoundException(bkeys)
return unpacking([d[fn] for k in keys for d in data for fn in fnmatch.filter(d, k)], as_seq)
else:
return unpacking([val for d in data for val in list(d.values())], as_seq)
if len(data) == 1:
return unpacking(list(data.values()), as_seq)
elif len(keys) > 0:
return unpacking([data[fn] for k in keys for fn in fnmatch.filter(data, k)], as_seq)
return list(data.values())
def flatten(d: MutableMapping, parent_key: str = '', sep: str = '.'):
""" flattens a dict of dicts structure """
def _flatten_dict_gen(d, parent_key, sep):
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, MutableMapping):
yield from flatten(v, new_key, sep=sep).items()
else:
yield new_key, v
return ProxyDict(_flatten_dict_gen(d, parent_key, sep))
class ProxyDict(dict):
""" Extra helper class for the reply dict """
def flatten(self):
return flatten(self)
def unpack(self, *keys, as_seq:bool=False, exception_on_missing_keys:bool=False):
return unpack(self, *keys, as_seq=as_seq, exception_on_missing_keys=exception_on_missing_keys)
class ProxyListOfDicts(list):
""" Extra helper class for the reply list of dicts """
def flatten(self):
return ProxyListOfDicts([flatten(d) for d in self])
def unpack(self, *keys, as_seq:bool=False, exception_on_missing_keys:bool=False):
return unpack(self, *keys, as_seq=as_seq, exception_on_missing_keys=exception_on_missing_keys)
def with_actors(self):
return dict(zip(self.actors, self))
|
sdssREPO_NAMEcluplusPATH_START.@cluplus_extracted@cluplus-main@python@cluplus@proxy.py@.PATH_END.py
|
{
"filename": "tests.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Jinja2/py2/jinja2/tests.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""Built-in template tests used with the ``is`` operator."""
import decimal
import operator
import re
from ._compat import abc
from ._compat import integer_types
from ._compat import string_types
from ._compat import text_type
from .runtime import Undefined
number_re = re.compile(r"^-?\d+(\.\d+)?$")
regex_type = type(number_re)
test_callable = callable
def test_odd(value):
"""Return true if the variable is odd."""
return value % 2 == 1
def test_even(value):
"""Return true if the variable is even."""
return value % 2 == 0
def test_divisibleby(value, num):
"""Check if a variable is divisible by a number."""
return value % num == 0
def test_defined(value):
"""Return true if the variable is defined:
.. sourcecode:: jinja
{% if variable is defined %}
value of variable: {{ variable }}
{% else %}
variable is not defined
{% endif %}
See the :func:`default` filter for a simple way to set undefined
variables.
"""
return not isinstance(value, Undefined)
def test_undefined(value):
"""Like :func:`defined` but the other way round."""
return isinstance(value, Undefined)
def test_none(value):
"""Return true if the variable is none."""
return value is None
def test_boolean(value):
"""Return true if the object is a boolean value.
.. versionadded:: 2.11
"""
return value is True or value is False
def test_false(value):
"""Return true if the object is False.
.. versionadded:: 2.11
"""
return value is False
def test_true(value):
"""Return true if the object is True.
.. versionadded:: 2.11
"""
return value is True
# NOTE: The existing 'number' test matches booleans and floats
def test_integer(value):
"""Return true if the object is an integer.
.. versionadded:: 2.11
"""
return isinstance(value, integer_types) and value is not True and value is not False
# NOTE: The existing 'number' test matches booleans and integers
def test_float(value):
"""Return true if the object is a float.
.. versionadded:: 2.11
"""
return isinstance(value, float)
def test_lower(value):
"""Return true if the variable is lowercased."""
return text_type(value).islower()
def test_upper(value):
"""Return true if the variable is uppercased."""
return text_type(value).isupper()
def test_string(value):
"""Return true if the object is a string."""
return isinstance(value, string_types)
def test_mapping(value):
"""Return true if the object is a mapping (dict etc.).
.. versionadded:: 2.6
"""
return isinstance(value, abc.Mapping)
def test_number(value):
"""Return true if the variable is a number."""
return isinstance(value, integer_types + (float, complex, decimal.Decimal))
def test_sequence(value):
"""Return true if the variable is a sequence. Sequences are variables
that are iterable.
"""
try:
len(value)
value.__getitem__
except Exception:
return False
return True
def test_sameas(value, other):
"""Check if an object points to the same memory address than another
object:
.. sourcecode:: jinja
{% if foo.attribute is sameas false %}
the foo attribute really is the `False` singleton
{% endif %}
"""
return value is other
def test_iterable(value):
"""Check if it's possible to iterate over an object."""
try:
iter(value)
except TypeError:
return False
return True
def test_escaped(value):
"""Check if the value is escaped."""
return hasattr(value, "__html__")
def test_in(value, seq):
"""Check if value is in seq.
.. versionadded:: 2.10
"""
return value in seq
TESTS = {
"odd": test_odd,
"even": test_even,
"divisibleby": test_divisibleby,
"defined": test_defined,
"undefined": test_undefined,
"none": test_none,
"boolean": test_boolean,
"false": test_false,
"true": test_true,
"integer": test_integer,
"float": test_float,
"lower": test_lower,
"upper": test_upper,
"string": test_string,
"mapping": test_mapping,
"number": test_number,
"sequence": test_sequence,
"iterable": test_iterable,
"callable": test_callable,
"sameas": test_sameas,
"escaped": test_escaped,
"in": test_in,
"==": operator.eq,
"eq": operator.eq,
"equalto": operator.eq,
"!=": operator.ne,
"ne": operator.ne,
">": operator.gt,
"gt": operator.gt,
"greaterthan": operator.gt,
"ge": operator.ge,
">=": operator.ge,
"<": operator.lt,
"lt": operator.lt,
"lessthan": operator.lt,
"<=": operator.le,
"le": operator.le,
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Jinja2@py2@jinja2@tests.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/src/amuse/test/suite/reports/__init__.py",
"type": "Python"
}
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@src@amuse@test@suite@reports@__init__.py@.PATH_END.py
|
|
{
"filename": "main.py",
"repo_name": "cdslaborg/paramonte",
"repo_path": "paramonte_extracted/paramonte-main/example/fortran/pm_distUnif/setUnifRand/main.py",
"type": "Python"
}
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import glob
import sys
linewidth = 2
fontsize = 17
marker ={ "CK" : "-"
, "IK" : "."
, "RK" : "-"
}
xlab = { "CK" : "Uniform Random Value ( real/imaginary components )"
, "IK" : "Uniform Random Value ( integer-valued )"
, "RK" : "Uniform Random Value ( real-valued )"
}
#legends = [ r"$lb = 0., ub = 1.$"
# , r"$lb = 0., ub = 1.$"
# , r"$lb = 0., ub = 1.$"
# ]
for kind in ["IK", "CK", "RK"]:
pattern = "*." + kind + ".txt"
fileList = glob.glob(pattern)
if len(fileList) == 1:
df = pd.read_csv(fileList[0], delimiter = ",", header = None)
fig = plt.figure(figsize = 1.25 * np.array([6.4, 4.8]), dpi = 200)
ax = plt.subplot()
for j in range(len(df.values[0,:])):
if kind == "CK":
plt.hist( df.values[:,j]
, histtype = "stepfilled"
, alpha = 0.5
, bins = 75
)
else:
plt.hist( df.values[:,j]
, histtype = "stepfilled"
, alpha = 0.5
, bins = 75
)
#ax.legend ( legends
# , fontsize = fontsize
# )
plt.xticks(fontsize = fontsize - 2)
plt.yticks(fontsize = fontsize - 2)
ax.set_xlabel(xlab[kind], fontsize = 17)
ax.set_ylabel("Count", fontsize = 17)
ax.set_title("Histograms of {} Uniform random values".format(len(df.values[:, 0])), fontsize = 17)
plt.grid(visible = True, which = "both", axis = "both", color = "0.85", linestyle = "-")
ax.tick_params(axis = "y", which = "minor")
ax.tick_params(axis = "x", which = "minor")
plt.savefig(fileList[0].replace(".txt",".png"))
elif len(fileList) > 1:
sys.exit("Ambiguous file list exists.")
|
cdslaborgREPO_NAMEparamontePATH_START.@paramonte_extracted@paramonte-main@example@fortran@pm_distUnif@setUnifRand@main.py@.PATH_END.py
|
{
"filename": "_y.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/ternary/domain/_y.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(self, plotly_name="y", parent_name="layout.ternary.domain", **kwargs):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
items=kwargs.pop(
"items",
[
{"editType": "plot", "max": 1, "min": 0, "valType": "number"},
{"editType": "plot", "max": 1, "min": 0, "valType": "number"},
],
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@ternary@domain@_y.py@.PATH_END.py
|
{
"filename": "_xanchor.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/parcats/line/colorbar/_xanchor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XanchorValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="xanchor", parent_name="parcats.line.colorbar", **kwargs
):
super(XanchorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["left", "center", "right"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@parcats@line@colorbar@_xanchor.py@.PATH_END.py
|
{
"filename": "_tickmode.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattercarpet/marker/colorbar/_tickmode.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TickmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="tickmode",
parent_name="scattercarpet.marker.colorbar",
**kwargs,
):
super(TickmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
implied_edits=kwargs.pop("implied_edits", {}),
values=kwargs.pop("values", ["auto", "linear", "array"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattercarpet@marker@colorbar@_tickmode.py@.PATH_END.py
|
{
"filename": "_idssrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/bar/_idssrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class IdssrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="idssrc", parent_name="bar", **kwargs):
super(IdssrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@bar@_idssrc.py@.PATH_END.py
|
{
"filename": "test_whfast_testparticles.py",
"repo_name": "hannorein/REBOUND",
"repo_path": "REBOUND_extracted/REBOUND-main/rebound/tests/test_whfast_testparticles.py",
"type": "Python"
}
|
import rebound
import unittest
import rebound.data
import warnings
coordinatelist = ["democraticheliocentric","whds","jacobi"]
class TestIntegratorWHFastTestParticle(unittest.TestCase):
pass
def create_whfast_testparticle(coordinates, N, N_active):
def do_test(self):
sim = rebound.Simulation()
sim.ri_whfast.coordinates = coordinates
sim.integrator = "whfast"
sim.dt=1e-1
sim.add(m=1)
for i in range(N):
sim.add(m=0,P=1,e=0.1,f=i)
sim2 = sim.copy()
if N_active>0:
sim2.N_active = N_active
sim2.integrate(1)
# some combinations can't do a simple keplerian orbit exactly (sad but true)
eps = 2e-14
for i in range(sim.N):
self.assertLess(abs(sim.particles[i].x-sim2.particles[i].x),eps)
self.assertLess(abs(sim.particles[i].vx-sim2.particles[i].vx),eps)
self.assertLess(abs(sim.particles[i].y-sim2.particles[i].y),eps)
self.assertLess(abs(sim.particles[i].vy-sim2.particles[i].vy),eps)
# all should be able to do this exactly
sim.integrate(1)
eps = 1e-16
for i in range(sim.N):
self.assertLess(abs(sim.particles[i].x-sim2.particles[i].x),eps)
self.assertLess(abs(sim.particles[i].vx-sim2.particles[i].vx),eps)
self.assertLess(abs(sim.particles[i].y-sim2.particles[i].y),eps)
self.assertLess(abs(sim.particles[i].vy-sim2.particles[i].vy),eps)
return do_test
def create_whfast_testparticle_withplanet(coordinates, N, N_active):
def do_test(self):
eps = 1e-13
sim = rebound.Simulation()
sim.integrator = "whfast"
sim.ri_whfast.coordinates = coordinates
sim.dt=1e-3
sim.add(m=1)
sim.add(m=1e-3,P=0.4)
sim.add(m=1e-3,P=0.7)
for i in range(N):
sim.add(m=0,P=1,e=0.1,f=i)
sim2 = sim.copy()
if N_active>0:
sim2.N_active = N_active
sim2.integrate(1)
sim.integrate(1)
for i in range(sim.N):
self.assertLess(abs(sim.particles[i].x-sim2.particles[i].x),eps)
self.assertLess(abs(sim.particles[i].vx-sim2.particles[i].vx),eps)
self.assertLess(abs(sim.particles[i].y-sim2.particles[i].y),eps)
self.assertLess(abs(sim.particles[i].vy-sim2.particles[i].vy),eps)
return do_test
def create_whfast_testparticletype1(coordinates, N_active):
def do_test(self):
eps = 1e-16
sim = rebound.Simulation()
sim.ri_whfast.coordinates = coordinates
sim.testparticle_type = 1
sim.integrator = "whfast"
sim.dt=1e-3
sim.add(m=1)
sim.add(m=1e-3,P=1,e=0.1)
sim2 = sim.copy()
if N_active>0:
sim2.N_active = N_active
sim2.integrate(1)
sim.integrate(1)
for i in range(sim.N):
self.assertLess(abs(sim.particles[i].x-sim2.particles[i].x),eps)
self.assertLess(abs(sim.particles[i].vx-sim2.particles[i].vx),eps)
self.assertLess(abs(sim.particles[i].y-sim2.particles[i].y),eps)
self.assertLess(abs(sim.particles[i].vy-sim2.particles[i].vy),eps)
return do_test
def create_whfast_testparticletype1_withplanet(coordinates, N_active):
def do_test(self):
eps = 1e-16
sim = rebound.Simulation()
sim.integrator = "whfast"
sim.ri_whfast.coordinates = coordinates
sim.testparticle_type = 1
sim.dt=1e-3
sim.add(m=1)
sim.add(m=1e-3,P=0.4)
sim.add(m=1e-3,P=0.7,e=0.1)
sim.add(m=1e-3,P=1.0)
sim2 = sim.copy()
if N_active>0:
sim2.N_active = N_active
sim2.integrate(1)
sim.integrate(1)
for i in range(sim.N):
self.assertLess(abs(sim.particles[i].x-sim2.particles[i].x),eps)
self.assertLess(abs(sim.particles[i].vx-sim2.particles[i].vx),eps)
self.assertLess(abs(sim.particles[i].y-sim2.particles[i].y),eps)
self.assertLess(abs(sim.particles[i].vy-sim2.particles[i].vy),eps)
return do_test
## Testparticles with mass currently lead to unexpexted behaviour:
def create_whfast_massivetestparticle(coordinates, N):
def do_test(self):
eps = 2e-13
sim = rebound.Simulation()
sim.ri_whfast.coordinates = coordinates
sim.integrator = "whfast"
sim.dt=1e-3
sim.add(m=1)
sim.add(m=1e-3,P=0.4)
for i in range(N):
sim.add(m=0,P=1,e=0.1,f=i)
sim2 = sim.copy()
for i in range(N):
sim2.particles[i+2].m = 1 # particles have zero mass for sim, but finite for sim2
sim2.N_active = 2
sim.integrate(1)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sim2.integrate(1)
self.assertEqual(1,len(w))
for i in range(sim.N):
self.assertLess(abs(sim.particles[i].x-sim2.particles[i].x),eps)
self.assertLess(abs(sim.particles[i].vx-sim2.particles[i].vx),eps)
self.assertLess(abs(sim.particles[i].y-sim2.particles[i].y),eps)
self.assertLess(abs(sim.particles[i].vy-sim2.particles[i].vy),eps)
return do_test
for N in [1,2]:
for coordinates in coordinatelist:
for N_active in [-1]+list(range(1,N+2)):
test_method = create_whfast_testparticle(coordinates,N, N_active)
test_method.__name__ = "test_whfast_testparticle_N%d_N_active%d_"%(N,N_active)+coordinates
setattr(TestIntegratorWHFastTestParticle, test_method.__name__, test_method)
for N_active in [-1]+list(range(3,N+4)):
test_method = create_whfast_testparticle_withplanet(coordinates,N, N_active)
test_method.__name__ = "test_whfast_testparticle_withplanet_N%d_N_active%d_"%(N,N_active)+coordinates
setattr(TestIntegratorWHFastTestParticle, test_method.__name__, test_method)
test_method = create_whfast_massivetestparticle(coordinates,N)
test_method.__name__ = "test_whfast_massivetestparticle_N%d_"%(N)+coordinates
setattr(TestIntegratorWHFastTestParticle, test_method.__name__, test_method)
for coordinates in coordinatelist:
for N_active in [-1,1]:
test_method = create_whfast_testparticletype1(coordinates, N_active)
test_method.__name__ = "test_whfast_testparticletype1_N_active%d_"%(N_active)+coordinates
setattr(TestIntegratorWHFastTestParticle, test_method.__name__, test_method)
for N_active in [-1,3]:
test_method = create_whfast_testparticletype1_withplanet(coordinates, N_active)
test_method.__name__ = "test_whfast_testparticletype1_withplanet_N_active%d_"%(N_active)+coordinates
setattr(TestIntegratorWHFastTestParticle, test_method.__name__, test_method)
if __name__ == "__main__":
unittest.main()
|
hannoreinREPO_NAMEREBOUNDPATH_START.@REBOUND_extracted@REBOUND-main@rebound@tests@test_whfast_testparticles.py@.PATH_END.py
|
{
"filename": "09_colormag_inclinations.py",
"repo_name": "mlipatov/paint_atmospheres",
"repo_path": "paint_atmospheres_extracted/paint_atmospheres-master/pa/usr/09_colormag_inclinations.py",
"type": "Python"
}
|
# Requires: a file listing inclinations and corresponding visual magnitudes of a star,
# the same for blue magnitudes;
# Output: a plot of visual magnitude versus color.
# Note: to get the required files run calc_star and calc_spectra for a set of inclinations, then
# run filter_spectra once for each filter to get a set of magnitudes.
from pa.lib import util as ut
from pa.lib import star as st
import numpy as np
import pickle
from matplotlib import pyplot as plt
from matplotlib import rc
iodir = '../../' # location of the input/output directory
# unpickle the filtered limb darkening information and get the filter indices
with open(iodir + 'data/limbdark_m01f.pkl', 'rb') as f:
ld = pickle.load(f)
iV = ld.bands.index('V')
iB = ld.bands.index('B')
# star parameters
omega, luminosity, mass, Req, distance = [0.6151, 40.346, 2.165, 2.815, 2.3694e19] # vega
n_z = 100
incs = np.arccos( np.linspace(1, 0, 50) ) # inclinations, equally spaced in cos(i)
# the observed inclination and record the index
iobs = 0.08683
# create star
star = st.Star(omega, luminosity, mass, Req, distance, n_z, ld)
# compute its magnitudes at both filters at all inclinations
V = []
B = []
for i in incs:
mags = star.integrate(i)
V.append( mags[iV] )
B.append( mags[iB] )
V = np.array(V)
B = np.array(B)
# colors
color = B - V
# compute the observed values
mags = star.integrate(iobs)
Vobs = mags[iV]
Bobs = mags[iB]
color_obs = Bobs - Vobs
# plot
rc('font',**{'family':'serif','serif':['Computer Modern'],'size': 18})
rc('text', usetex=True)
# arrow length
l = 0.01
# dictionary of arrow parameters
d = {'color':'k', 'fill':True, 'linewidth':2, 'length_includes_head':True,\
'overhang':0.8, 'head_length':l/2, 'head_width':0.03}
max_V = np.max(V)
min_V = np.min(V)
delta_V = max_V - min_V
offset_V = delta_V * 0.1
fig = plt.figure()
ax = plt.axes()
ax.set_ylim([max_V + offset_V, min_V - offset_V])
ax.scatter(color, V, marker='o', c='b', s=15)
ax.scatter(color_obs, Vobs, marker='o', c='k', s=15)
ax.set_xlabel('B - V')
ax.set_xticks(0.01 * np.arange(7)[1:])
ax.set_ylabel('V')
ax.arrow(color_obs + 1.2*l, Vobs, -l, 0, **d)
fig.tight_layout(pad=0.1)
fig.savefig(iodir + 'vega_colormag.pdf', dpi=200)
plt.close(fig)
|
mlipatovREPO_NAMEpaint_atmospheresPATH_START.@paint_atmospheres_extracted@paint_atmospheres-master@pa@usr@09_colormag_inclinations.py@.PATH_END.py
|
{
"filename": "__main__.py",
"repo_name": "AWehrhahn/SME",
"repo_path": "SME_extracted/SME-master/src/pysme/__main__.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
import argparse
from .gui import plot_plotly
from .sme import SME_Structure
from .solve import SME_Solver
from .synthesize import Synthesizer
if __name__ == "__main__":
parser = argparse.ArgumentParser(
"PySME",
description=(
"Synthesizes stellar spectra and determines best fit parameters "
"to observations. The results are stored in the same file as the input."
),
)
parser.add_argument("file", help=".sme input file")
parser.add_argument(
"-s",
"--synthesize",
action="store_true",
help="Only synthesize the spectrum, no fitting",
)
parser.add_argument("-o", "--output", help="Store the output to this file instead")
parser.add_argument(
"-p",
"--plot",
nargs="?",
const=True,
default=False,
help="Create a plot with the results",
)
args = parser.parse_args()
print(args)
synthesize_only = args.synthesize
filename = args.file
output = args.output
plot = args.plot
if output is None:
output = filename
sme = SME_Structure.load(filename)
if synthesize_only:
syn = Synthesizer()
sme = syn.synthesize_spectrum(sme)
else:
solver = SME_Solver()
sme = solver.solve(sme, sme.fitparameters)
sme.save(output)
if plot:
fig = plot_plotly.FinalPlot(sme)
if plot is True:
fig.show()
else:
fig.save(filename=plot, auto_open=False)
|
AWehrhahnREPO_NAMESMEPATH_START.@SME_extracted@SME-master@src@pysme@__main__.py@.PATH_END.py
|
{
"filename": "copy_sam_libs.py",
"repo_name": "nanograv/holodeck",
"repo_path": "holodeck_extracted/holodeck-main/scripts/copy_sam_libs.py",
"type": "Python"
}
|
"""Copy holodeck SAM library files from a set of output directories into a single output folder.
For usage information, run:
python copy_sam_libs.py -h
Example:
python holodeck/scripts/copy_sam_libs.py ~/scratch/astro-strong ~/scratch/ -p "astro-strong-*"
Notes
-----
* The search for folders and library files are NOT recursive.
* The original copies are not deleted or modified.
* The copied files are automatically renamed.
* Configuration and pspace files are also copied and renamed.
"""
from pathlib import Path
import re
import shutil
import argparse
from holodeck.librarian import (
ARGS_CONFIG_FNAME, PSPACE_FILE_SUFFIX
)
ALLOW_MISSING_CONF = True
ALLOW_MISSING_PSPACE = False
def main():
# load command-line arguments
args = _setup_argparse()
# find files
if args.debug:
print("-"*10 + " finding matching files " + "-"*10)
lib_files, pspace_files, conf_files = find_files(args, "sam_lib.hdf5")
# copy files to output directory
if args.debug:
print("-"*10 + " copying files to new directory " + "-"*10)
copy_files(args, conf_files)
copy_files(args, pspace_files)
copy_files(args, lib_files)
return
def find_files(args, file_pattern):
"""
Find folders within the 'start_path' that match 'folder_pattern' (not recursive), then return
files matching 'file_pattern' within those folders (not recursive).
"""
lib_files = []
pspace_files = []
conf_files = []
start_path = _to_abs_path(args.search_path)
# Compile the regex patterns
file_regex = re.compile(re.escape(file_pattern))
folder_pattern = args.pattern
# folder_regex = re.compile(re.escape("*" + folder_pattern))
if args.debug:
print(f"{start_path=}")
print(f"{file_pattern=} ==> {file_regex=}")
print(f"{folder_pattern=}")
for path in start_path.glob(folder_pattern):
if not path.is_dir():
continue
# for path in start_path.rglob('*')
# if not folder_regex.match(str(path)):
# continue
if args.debug:
print(f"Found {path=} ...")
for file in path.glob('*'):
# Check if the file matches the file pattern
if not file.is_file() or not file_regex.search(str(file)):
continue
# store library file
lib_files.append(file)
if args.debug:
print(f"===> found {file=}")
# get parameter-space save file
pspace_file = list(path.glob('*' + PSPACE_FILE_SUFFIX))
if len(pspace_file) == 1:
pspace_files.append(pspace_file[0])
else:
err = f"Could not find unambiguous parameter-space file! matches={pspace_file}"
if ALLOW_MISSING_PSPACE:
print(err)
pspace_files.append(None)
else:
raise FileNotFoundError(err)
# get configuration file
conf_file = path.joinpath(ARGS_CONFIG_FNAME)
if conf_file.is_file():
conf_files.append(conf_file)
else:
err = f"Could not find configuration file! '{conf_file}'"
if ALLOW_MISSING_CONF:
print(err)
conf_files.append(None)
else:
raise FileNotFoundError(err)
if args.debug:
print(f"Found {len(lib_files)} files.")
return lib_files, pspace_files, conf_files
def copy_files(args, files):
"""Copy all of the given files to the output (``args.output``) directory.
"""
for src in files:
if src is None:
continue
src = Path(src)
assert src.is_file()
folder = src.parts[-2]
if args.rename:
new_name = folder + "_" + src.name
dst = args.output.joinpath(new_name)
else:
new_name = src.name
dst = args.output.joinpath(folder, new_name)
if args.debug:
print(f"{src} ==>\n\t==> {dst}")
if dst.exists() and not args.overwrite:
print(f"destination already exists, skipping! '{dst}'\n")
print("Use `--overwrite` to overwrite the file.")
if not args.dry_run:
shutil.copy(src, dst)
assert dst.is_file()
if not args.debug:
print(dst)
return
def _setup_argparse(*args, **kwargs):
parser = argparse.ArgumentParser()
parser.add_argument('output', metavar='output', type=str,
help='output path [created if doesnt exist].')
parser.add_argument('search_path', type=str,
help="where to start the search for matching folders.")
parser.add_argument('-p', '--pattern', action='store', type=str, default='*',
help="regex for folders to match (NOTE: put this in quotations!).")
parser.add_argument('-o', '--overwrite', action='store_true', default=False,
help="overwrite existing files [otherwise raise error].")
# If this is disabled, it will cause problems with config and pspace files...
# so don't leave it as an option for now. See hard-coded value below.
#parser.add_argument('--rename', type=str_to_bool, nargs='?', default=True,
# help='rename the sam_lib files based on their folder name.')
parser.add_argument('--debug', action='store_true', default=False,
help='debug.')
parser.add_argument('--dry-run', action='store_true', default=False, dest='dry_run',
help='dry-run.')
namespace = argparse.Namespace(**kwargs)
args = parser.parse_args(*args, namespace=namespace)
# See note above, hard-code rename as true
args.rename = True
# ---- check / sanitize arguments
if args.dry_run:
print("`dry-run` is enabled. Settings `debug=True` automatically.")
args.debug = True
args.output = _to_abs_path(args.output)
if args.debug:
print(f"absolute path: {args.output=}")
if args.output.is_file():
raise RuntimeError(f"The output path is already a file! {output}")
args.output.mkdir(parents=True, exist_ok=True)
return args
def str_to_bool(v):
"""Convert string to boolean value."""
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def _to_abs_path(rpath):
apath = Path(rpath).resolve()
if not apath.is_absolute:
apath = Path('.').resolve() / apath
apath = apath.resolve()
return apath
if __name__ == "__main__":
main()
|
nanogravREPO_NAMEholodeckPATH_START.@holodeck_extracted@holodeck-main@scripts@copy_sam_libs.py@.PATH_END.py
|
{
"filename": "_dy.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/image/_dy.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class DyValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="dy", parent_name="image", **kwargs):
super(DyValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@image@_dy.py@.PATH_END.py
|
{
"filename": "template_class.py",
"repo_name": "nuc-astro/winnet",
"repo_path": "winnet_extracted/winnet-master/bin/movie_script/src_files/template_class.py",
"type": "Python"
}
|
# Author: Moritz Reichert
# Date : 26.09.2024
import numpy as np
class template(object):
"""
Class to read a WinNet template file.
"""
def __init__(self, path):
"""
Initialize the template class.
"""
self.path = path
def read_data(self):
"""
Read the data from the template file and store it in a dictionary.
"""
# Create an empty dictionary to store the entries
self.__entries = {}
# Read the data from the file
with open(self.path, 'r') as f:
self.data = f.readlines()
for line in self.data:
if line.strip().startswith('#'):
continue
if line.strip() =="":
continue
key = line.split("=")[0].strip()
value = line.split("=")[1].strip().replace('"', '').replace("'", "")
self.__entries[key] = value
@property
def entries(self):
"""
Get the entries of the template file.
"""
# Check if entry exists.
#print all attributes of the object
if not hasattr(self, '_template__entries'):
self.read_data()
return self.__entries
def __getitem__(self, key):
"""
Get the value of a specific key.
"""
if not hasattr(self, '_template__entries'):
self.read_data()
return self.entries[key]
def __setitem__(self, key, value):
"""
Set the value of a specific key.
"""
if not hasattr(self, '_template__entries'):
self.read_data()
self.__entries[key] = value
def save_template(self, path, winnet_path=None):
"""
Save the template file.
"""
with open(path, 'w') as f:
for key, value in self.entries.items():
if winnet_path is None:
f.write(f"{key} = {value}\n")
else:
entry = str(value).replace("@WINNET@",winnet_path)
f.write(f"{key} = {entry}\n")
if __name__ == '__main__':
# Example:
path = '/home/mreichert/data/Networks/comparison_winNet/WinNet-dev/par/NSE_comp.par'
t = template(path)
print(t["isotopes_file"])
t.save_template('test.par', winnet_path='../../runs/winnet')
|
nuc-astroREPO_NAMEwinnetPATH_START.@winnet_extracted@winnet-master@bin@movie_script@src_files@template_class.py@.PATH_END.py
|
{
"filename": "factor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pygments/py3/pygments/lexers/factor.py",
"type": "Python"
}
|
"""
pygments.lexers.factor
~~~~~~~~~~~~~~~~~~~~~~
Lexers for the Factor language.
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.lexer import RegexLexer, bygroups, default, words
from pygments.token import Text, Comment, Keyword, Name, String, Number, \
Whitespace, Punctuation
__all__ = ['FactorLexer']
class FactorLexer(RegexLexer):
"""
Lexer for the Factor language.
"""
name = 'Factor'
url = 'http://factorcode.org'
aliases = ['factor']
filenames = ['*.factor']
mimetypes = ['text/x-factor']
version_added = '1.4'
builtin_kernel = words((
'-rot', '2bi', '2bi@', '2bi*', '2curry', '2dip', '2drop', '2dup', '2keep', '2nip',
'2over', '2tri', '2tri@', '2tri*', '3bi', '3curry', '3dip', '3drop', '3dup', '3keep',
'3tri', '4dip', '4drop', '4dup', '4keep', '<wrapper>', '=', '>boolean', 'clone',
'?', '?execute', '?if', 'and', 'assert', 'assert=', 'assert?', 'bi', 'bi-curry',
'bi-curry@', 'bi-curry*', 'bi@', 'bi*', 'boa', 'boolean', 'boolean?', 'both?',
'build', 'call', 'callstack', 'callstack>array', 'callstack?', 'clear', '(clone)',
'compose', 'compose?', 'curry', 'curry?', 'datastack', 'die', 'dip', 'do', 'drop',
'dup', 'dupd', 'either?', 'eq?', 'equal?', 'execute', 'hashcode', 'hashcode*',
'identity-hashcode', 'identity-tuple', 'identity-tuple?', 'if', 'if*',
'keep', 'loop', 'most', 'new', 'nip', 'not', 'null', 'object', 'or', 'over',
'pick', 'prepose', 'retainstack', 'rot', 'same?', 'swap', 'swapd', 'throw',
'tri', 'tri-curry', 'tri-curry@', 'tri-curry*', 'tri@', 'tri*', 'tuple',
'tuple?', 'unless', 'unless*', 'until', 'when', 'when*', 'while', 'with',
'wrapper', 'wrapper?', 'xor'), suffix=r'(\s+)')
builtin_assocs = words((
'2cache', '<enum>', '>alist', '?at', '?of', 'assoc', 'assoc-all?',
'assoc-any?', 'assoc-clone-like', 'assoc-combine', 'assoc-diff',
'assoc-diff!', 'assoc-differ', 'assoc-each', 'assoc-empty?',
'assoc-filter', 'assoc-filter!', 'assoc-filter-as', 'assoc-find',
'assoc-hashcode', 'assoc-intersect', 'assoc-like', 'assoc-map',
'assoc-map-as', 'assoc-partition', 'assoc-refine', 'assoc-size',
'assoc-stack', 'assoc-subset?', 'assoc-union', 'assoc-union!',
'assoc=', 'assoc>map', 'assoc?', 'at', 'at+', 'at*', 'cache', 'change-at',
'clear-assoc', 'delete-at', 'delete-at*', 'enum', 'enum?', 'extract-keys',
'inc-at', 'key?', 'keys', 'map>assoc', 'maybe-set-at', 'new-assoc', 'of',
'push-at', 'rename-at', 'set-at', 'sift-keys', 'sift-values', 'substitute',
'unzip', 'value-at', 'value-at*', 'value?', 'values', 'zip'), suffix=r'(\s+)')
builtin_combinators = words((
'2cleave', '2cleave>quot', '3cleave', '3cleave>quot', '4cleave',
'4cleave>quot', 'alist>quot', 'call-effect', 'case', 'case-find',
'case>quot', 'cleave', 'cleave>quot', 'cond', 'cond>quot', 'deep-spread>quot',
'execute-effect', 'linear-case-quot', 'no-case', 'no-case?', 'no-cond',
'no-cond?', 'recursive-hashcode', 'shallow-spread>quot', 'spread',
'to-fixed-point', 'wrong-values', 'wrong-values?'), suffix=r'(\s+)')
builtin_math = words((
'-', '/', '/f', '/i', '/mod', '2/', '2^', '<', '<=', '<fp-nan>', '>',
'>=', '>bignum', '>fixnum', '>float', '>integer', '(all-integers?)',
'(each-integer)', '(find-integer)', '*', '+', '?1+',
'abs', 'align', 'all-integers?', 'bignum', 'bignum?', 'bit?', 'bitand',
'bitnot', 'bitor', 'bits>double', 'bits>float', 'bitxor', 'complex',
'complex?', 'denominator', 'double>bits', 'each-integer', 'even?',
'find-integer', 'find-last-integer', 'fixnum', 'fixnum?', 'float',
'float>bits', 'float?', 'fp-bitwise=', 'fp-infinity?', 'fp-nan-payload',
'fp-nan?', 'fp-qnan?', 'fp-sign', 'fp-snan?', 'fp-special?',
'if-zero', 'imaginary-part', 'integer', 'integer>fixnum',
'integer>fixnum-strict', 'integer?', 'log2', 'log2-expects-positive',
'log2-expects-positive?', 'mod', 'neg', 'neg?', 'next-float',
'next-power-of-2', 'number', 'number=', 'number?', 'numerator', 'odd?',
'out-of-fixnum-range', 'out-of-fixnum-range?', 'power-of-2?',
'prev-float', 'ratio', 'ratio?', 'rational', 'rational?', 'real',
'real-part', 'real?', 'recip', 'rem', 'sgn', 'shift', 'sq', 'times',
'u<', 'u<=', 'u>', 'u>=', 'unless-zero', 'unordered?', 'when-zero',
'zero?'), suffix=r'(\s+)')
builtin_sequences = words((
'1sequence', '2all?', '2each', '2map', '2map-as', '2map-reduce', '2reduce',
'2selector', '2sequence', '3append', '3append-as', '3each', '3map', '3map-as',
'3sequence', '4sequence', '<repetition>', '<reversed>', '<slice>', '?first',
'?last', '?nth', '?second', '?set-nth', 'accumulate', 'accumulate!',
'accumulate-as', 'all?', 'any?', 'append', 'append!', 'append-as',
'assert-sequence', 'assert-sequence=', 'assert-sequence?',
'binary-reduce', 'bounds-check', 'bounds-check?', 'bounds-error',
'bounds-error?', 'but-last', 'but-last-slice', 'cartesian-each',
'cartesian-map', 'cartesian-product', 'change-nth', 'check-slice',
'check-slice-error', 'clone-like', 'collapse-slice', 'collector',
'collector-for', 'concat', 'concat-as', 'copy', 'count', 'cut', 'cut-slice',
'cut*', 'delete-all', 'delete-slice', 'drop-prefix', 'each', 'each-from',
'each-index', 'empty?', 'exchange', 'filter', 'filter!', 'filter-as', 'find',
'find-from', 'find-index', 'find-index-from', 'find-last', 'find-last-from',
'first', 'first2', 'first3', 'first4', 'flip', 'follow', 'fourth', 'glue', 'halves',
'harvest', 'head', 'head-slice', 'head-slice*', 'head*', 'head?',
'if-empty', 'immutable', 'immutable-sequence', 'immutable-sequence?',
'immutable?', 'index', 'index-from', 'indices', 'infimum', 'infimum-by',
'insert-nth', 'interleave', 'iota', 'iota-tuple', 'iota-tuple?', 'join',
'join-as', 'last', 'last-index', 'last-index-from', 'length', 'lengthen',
'like', 'longer', 'longer?', 'longest', 'map', 'map!', 'map-as', 'map-find',
'map-find-last', 'map-index', 'map-integers', 'map-reduce', 'map-sum',
'max-length', 'member-eq?', 'member?', 'midpoint@', 'min-length',
'mismatch', 'move', 'new-like', 'new-resizable', 'new-sequence',
'non-negative-integer-expected', 'non-negative-integer-expected?',
'nth', 'nths', 'pad-head', 'pad-tail', 'padding', 'partition', 'pop', 'pop*',
'prefix', 'prepend', 'prepend-as', 'produce', 'produce-as', 'product', 'push',
'push-all', 'push-either', 'push-if', 'reduce', 'reduce-index', 'remove',
'remove!', 'remove-eq', 'remove-eq!', 'remove-nth', 'remove-nth!', 'repetition',
'repetition?', 'replace-slice', 'replicate', 'replicate-as', 'rest',
'rest-slice', 'reverse', 'reverse!', 'reversed', 'reversed?', 'second',
'selector', 'selector-for', 'sequence', 'sequence-hashcode', 'sequence=',
'sequence?', 'set-first', 'set-fourth', 'set-last', 'set-length', 'set-nth',
'set-second', 'set-third', 'short', 'shorten', 'shorter', 'shorter?',
'shortest', 'sift', 'slice', 'slice-error', 'slice-error?', 'slice?',
'snip', 'snip-slice', 'start', 'start*', 'subseq', 'subseq?', 'suffix',
'suffix!', 'sum', 'sum-lengths', 'supremum', 'supremum-by', 'surround', 'tail',
'tail-slice', 'tail-slice*', 'tail*', 'tail?', 'third', 'trim',
'trim-head', 'trim-head-slice', 'trim-slice', 'trim-tail', 'trim-tail-slice',
'unclip', 'unclip-last', 'unclip-last-slice', 'unclip-slice', 'unless-empty',
'virtual-exemplar', 'virtual-sequence', 'virtual-sequence?', 'virtual@',
'when-empty'), suffix=r'(\s+)')
builtin_namespaces = words((
'+@', 'change', 'change-global', 'counter', 'dec', 'get', 'get-global',
'global', 'inc', 'init-namespaces', 'initialize', 'is-global', 'make-assoc',
'namespace', 'namestack', 'off', 'on', 'set', 'set-global', 'set-namestack',
'toggle', 'with-global', 'with-scope', 'with-variable', 'with-variables'),
suffix=r'(\s+)')
builtin_arrays = words((
'1array', '2array', '3array', '4array', '<array>', '>array', 'array',
'array?', 'pair', 'pair?', 'resize-array'), suffix=r'(\s+)')
builtin_io = words((
'(each-stream-block-slice)', '(each-stream-block)',
'(stream-contents-by-block)', '(stream-contents-by-element)',
'(stream-contents-by-length-or-block)',
'(stream-contents-by-length)', '+byte+', '+character+',
'bad-seek-type', 'bad-seek-type?', 'bl', 'contents', 'each-block',
'each-block-size', 'each-block-slice', 'each-line', 'each-morsel',
'each-stream-block', 'each-stream-block-slice', 'each-stream-line',
'error-stream', 'flush', 'input-stream', 'input-stream?',
'invalid-read-buffer', 'invalid-read-buffer?', 'lines', 'nl',
'output-stream', 'output-stream?', 'print', 'read', 'read-into',
'read-partial', 'read-partial-into', 'read-until', 'read1', 'readln',
'seek-absolute', 'seek-absolute?', 'seek-end', 'seek-end?',
'seek-input', 'seek-output', 'seek-relative', 'seek-relative?',
'stream-bl', 'stream-contents', 'stream-contents*', 'stream-copy',
'stream-copy*', 'stream-element-type', 'stream-flush',
'stream-length', 'stream-lines', 'stream-nl', 'stream-print',
'stream-read', 'stream-read-into', 'stream-read-partial',
'stream-read-partial-into', 'stream-read-partial-unsafe',
'stream-read-unsafe', 'stream-read-until', 'stream-read1',
'stream-readln', 'stream-seek', 'stream-seekable?', 'stream-tell',
'stream-write', 'stream-write1', 'tell-input', 'tell-output',
'with-error-stream', 'with-error-stream*', 'with-error>output',
'with-input-output+error-streams',
'with-input-output+error-streams*', 'with-input-stream',
'with-input-stream*', 'with-output-stream', 'with-output-stream*',
'with-output>error', 'with-output+error-stream',
'with-output+error-stream*', 'with-streams', 'with-streams*',
'write', 'write1'), suffix=r'(\s+)')
builtin_strings = words((
'1string', '<string>', '>string', 'resize-string', 'string',
'string?'), suffix=r'(\s+)')
builtin_vectors = words((
'1vector', '<vector>', '>vector', '?push', 'vector', 'vector?'),
suffix=r'(\s+)')
builtin_continuations = words((
'<condition>', '<continuation>', '<restart>', 'attempt-all',
'attempt-all-error', 'attempt-all-error?', 'callback-error-hook',
'callcc0', 'callcc1', 'cleanup', 'compute-restarts', 'condition',
'condition?', 'continuation', 'continuation?', 'continue',
'continue-restart', 'continue-with', 'current-continuation',
'error', 'error-continuation', 'error-in-thread', 'error-thread',
'ifcc', 'ignore-errors', 'in-callback?', 'original-error', 'recover',
'restart', 'restart?', 'restarts', 'rethrow', 'rethrow-restarts',
'return', 'return-continuation', 'thread-error-hook', 'throw-continue',
'throw-restarts', 'with-datastack', 'with-return'), suffix=r'(\s+)')
tokens = {
'root': [
# factor allows a file to start with a shebang
(r'#!.*$', Comment.Preproc),
default('base'),
],
'base': [
(r'\s+', Whitespace),
# defining words
(r'((?:MACRO|MEMO|TYPED)?:[:]?)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Function)),
(r'(M:[:]?)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Class, Whitespace,
Name.Function)),
(r'(C:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Function, Whitespace,
Name.Class)),
(r'(GENERIC:)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Function)),
(r'(HOOK:|GENERIC#)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Function, Whitespace,
Name.Function)),
(r'(\()(\s)', bygroups(Name.Function, Whitespace), 'stackeffect'),
(r'(;)(\s)', bygroups(Keyword, Whitespace)),
# imports and namespaces
(r'(USING:)(\s+)',
bygroups(Keyword.Namespace, Whitespace), 'vocabs'),
(r'(USE:|UNUSE:|IN:|QUALIFIED:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Whitespace, Name.Namespace)),
(r'(QUALIFIED-WITH:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword.Namespace, Whitespace, Name.Namespace,
Whitespace, Name.Namespace)),
(r'(FROM:|EXCLUDE:)(\s+)(\S+)(\s+=>\s)',
bygroups(Keyword.Namespace, Whitespace, Name.Namespace,
Whitespace), 'words'),
(r'(RENAME:)(\s+)(\S+)(\s+)(\S+)(\s+)(=>)(\s+)(\S+)',
bygroups(Keyword.Namespace, Whitespace, Name.Function, Whitespace,
Name.Namespace, Whitespace, Punctuation, Whitespace,
Name.Function)),
(r'(ALIAS:|TYPEDEF:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword.Namespace, Whitespace, Name.Function, Whitespace,
Name.Function)),
(r'(DEFER:|FORGET:|POSTPONE:)(\s+)(\S+)',
bygroups(Keyword.Namespace, Whitespace, Name.Function)),
# tuples and classes
(r'(TUPLE:|ERROR:)(\s+)(\S+)(\s+)(<)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Class, Whitespace, Punctuation,
Whitespace, Name.Class), 'slots'),
(r'(TUPLE:|ERROR:|BUILTIN:)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Class), 'slots'),
(r'(MIXIN:|UNION:|INTERSECTION:)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Class)),
(r'(PREDICATE:)(\s+)(\S+)(\s+)(<)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Class, Whitespace,
Punctuation, Whitespace, Name.Class)),
(r'(C:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Function, Whitespace, Name.Class)),
(r'(INSTANCE:)(\s+)(\S+)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Class, Whitespace, Name.Class)),
(r'(SLOT:)(\s+)(\S+)', bygroups(Keyword, Whitespace, Name.Function)),
(r'(SINGLETON:)(\s+)(\S+)', bygroups(Keyword, Whitespace, Name.Class)),
(r'SINGLETONS:', Keyword, 'classes'),
# other syntax
(r'(CONSTANT:|SYMBOL:|MAIN:|HELP:)(\s+)(\S+)',
bygroups(Keyword, Whitespace, Name.Function)),
(r'(SYMBOLS:)(\s+)', bygroups(Keyword, Whitespace), 'words'),
(r'(SYNTAX:)(\s+)', bygroups(Keyword, Whitespace)),
(r'(ALIEN:)(\s+)', bygroups(Keyword, Whitespace)),
(r'(STRUCT:)(\s+)(\S+)', bygroups(Keyword, Whitespace, Name.Class)),
(r'(FUNCTION:)(\s+)'
r'(\S+)(\s+)(\S+)(\s+)'
r'(\()(\s+)([^)]+)(\))(\s)',
bygroups(Keyword.Namespace, Whitespace,
Text, Whitespace, Name.Function, Whitespace,
Punctuation, Whitespace, Text, Punctuation, Whitespace)),
(r'(FUNCTION-ALIAS:)(\s+)'
r'(\S+)(\s+)(\S+)(\s+)'
r'(\S+)(\s+)'
r'(\()(\s+)([^)]+)(\))(\s)',
bygroups(Keyword.Namespace, Whitespace,
Text, Whitespace, Name.Function, Whitespace,
Name.Function, Whitespace,
Punctuation, Whitespace, Text, Punctuation, Whitespace)),
# vocab.private
(r'(<PRIVATE|PRIVATE>)(\s)', bygroups(Keyword.Namespace, Whitespace)),
# strings
(r'"""\s(?:.|\n)*?\s"""', String),
(r'"(?:\\\\|\\"|[^"])*"', String),
(r'(\S+")(\s+)((?:\\\\|\\"|[^"])*")',
bygroups(String, Whitespace, String)),
(r'(CHAR:)(\s+)(\\[\\abfnrstv]|[^\\]\S*)(\s)',
bygroups(String.Char, Whitespace, String.Char, Whitespace)),
# comments
(r'!\s+.*$', Comment),
(r'#!\s+.*$', Comment),
(r'/\*\s+(?:.|\n)*?\s\*/', Comment),
# boolean constants
(r'[tf]\b', Name.Constant),
# symbols and literals
(r'[\\$]\s+\S+', Name.Constant),
(r'M\\\s+\S+\s+\S+', Name.Constant),
# numbers
(r'[+-]?(?:[\d,]*\d)?\.(?:\d([\d,]*\d)?)?(?:[eE][+-]?\d+)?\s', Number),
(r'[+-]?\d(?:[\d,]*\d)?(?:[eE][+-]?\d+)?\s', Number),
(r'0x[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number),
(r'NAN:\s+[a-fA-F\d](?:[a-fA-F\d,]*[a-fA-F\d])?(?:p\d([\d,]*\d)?)?\s', Number),
(r'0b[01]+\s', Number.Bin),
(r'0o[0-7]+\s', Number.Oct),
(r'(?:\d([\d,]*\d)?)?\+\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number),
(r'(?:\-\d([\d,]*\d)?)?\-\d(?:[\d,]*\d)?/\d(?:[\d,]*\d)?\s', Number),
# keywords
(r'(?:deprecated|final|foldable|flushable|inline|recursive)\s',
Keyword),
# builtins
(builtin_kernel, bygroups(Name.Builtin, Whitespace)),
(builtin_assocs, bygroups(Name.Builtin, Whitespace)),
(builtin_combinators, bygroups(Name.Builtin, Whitespace)),
(builtin_math, bygroups(Name.Builtin, Whitespace)),
(builtin_sequences, bygroups(Name.Builtin, Whitespace)),
(builtin_namespaces, bygroups(Name.Builtin, Whitespace)),
(builtin_arrays, bygroups(Name.Builtin, Whitespace)),
(builtin_io, bygroups(Name.Builtin, Whitespace)),
(builtin_strings, bygroups(Name.Builtin, Whitespace)),
(builtin_vectors, bygroups(Name.Builtin, Whitespace)),
(builtin_continuations, bygroups(Name.Builtin, Whitespace)),
# everything else is text
(r'\S+', Text),
],
'stackeffect': [
(r'\s+', Whitespace),
(r'(\()(\s+)', bygroups(Name.Function, Whitespace), 'stackeffect'),
(r'(\))(\s+)', bygroups(Name.Function, Whitespace), '#pop'),
(r'(--)(\s+)', bygroups(Name.Function, Whitespace)),
(r'\S+', Name.Variable),
],
'slots': [
(r'\s+', Whitespace),
(r'(;)(\s+)', bygroups(Keyword, Whitespace), '#pop'),
(r'(\{)(\s+)(\S+)(\s+)([^}]+)(\s+)(\})(\s+)',
bygroups(Text, Whitespace, Name.Variable, Whitespace,
Text, Whitespace, Text, Whitespace)),
(r'\S+', Name.Variable),
],
'vocabs': [
(r'\s+', Whitespace),
(r'(;)(\s+)', bygroups(Keyword, Whitespace), '#pop'),
(r'\S+', Name.Namespace),
],
'classes': [
(r'\s+', Whitespace),
(r'(;)(\s+)', bygroups(Keyword, Whitespace), '#pop'),
(r'\S+', Name.Class),
],
'words': [
(r'\s+', Whitespace),
(r'(;)(\s+)', bygroups(Keyword, Whitespace), '#pop'),
(r'\S+', Name.Function),
],
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pygments@py3@pygments@lexers@factor.py@.PATH_END.py
|
{
"filename": "distribution_lib.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/distribution/distribution_lib.py",
"type": "Python"
}
|
"""Unified high-level distribution APIs across backends.
Currently only the JAX backend is supported. The TensorFlow backend
will be supported in the future (via tf.dtensor API).
"""
import collections
import contextlib
import os
import re
import warnings
import numpy as np
from keras.src.api_export import keras_export
from keras.src.backend import KerasTensor
from keras.src.backend import distribution_lib
from keras.src.backend.common import global_state
DEFAULT_BATCH_DIM_NAME = "batch"
GLOBAL_ATTRIBUTE_NAME = "distribution"
@keras_export("keras.distribution.list_devices")
def list_devices(device_type=None):
"""Return all the available devices based on the device type.
Note: in a distributed setting, global devices are returned.
Args:
device_type: string, one of `"cpu"`, `"gpu"` or `"tpu"`.
Defaults to `"gpu"` or `"tpu"` if available when
`device_type` is not provided. Otherwise
will return the `"cpu"` devices.
Return:
List of devices that are available for distribute computation.
"""
return distribution_lib.list_devices(device_type)
@keras_export("keras.distribution.initialize")
def initialize(job_addresses=None, num_processes=None, process_id=None):
"""Initialize the distribution system for multi-host/process setting.
Calling `initialize` will prepare the backend for execution on multi-host
GPU or TPUs. It should be called before any computations.
Note that the parameters can also be injected via environment variables,
which can be better controlled by the launch script at startup time.
For certain backend that also rely on the environment variables to
configure, Keras will properly forward them.
Args:
job_addresses: string. Comma separated IP addresses for all the jobs
that will form the whole computation cluster. Note that for JAX
backend, only the address for job 0 (coodinator) is needed. For
certain runtime like cloud TPU, this value can be `None`, and the
backend will figure it out with the TPU environment variables. You
can also config this value via environment variable
`KERAS_DISTRIBUTION_JOB_ADDRESSES`.
num_processes: int. The number of worker/processes that will form the
whole computation cluster. For certain runtime like cloud TPU, this
value can be `None`, and the backend will figure it out with the TPU
environment variables. You can also configure this value via
environment variable `KERAS_DISTRIBUTION_NUM_PROCESSES`.
process_id: int. The ID number of the current worker/process. The value
should be ranged from `0` to `num_processes - 1`. `0` will indicate
the current worker/process is the master/coordinate job. You can
also configure this value via environment variable
`KERAS_DISTRIBUTION_PROCESS_ID`.
Example:
Suppose there are two GPU processes, and process 0 is running at
address `10.0.0.1:1234`, and process 1 is running at address
`10.0.0.2:2345`. To configure such cluster, you can run
On process 0:
```python
keras.distribute.initialize(
job_addresses="10.0.0.1:1234,10.0.0.2:2345",
num_processes=2,
process_id=0)
```
On process 1:
```python
keras.distribute.initialize(
job_addresses="10.0.0.1:1234,10.0.0.2:2345",
num_processes=2,
process_id=1)
```
or via the environment variables:
On process 0:
```python
os.environ[
"KERAS_DISTRIBUTION_JOB_ADDRESSES"] = "10.0.0.1:1234,10.0.0.2:2345"
os.environ["KERAS_DISTRIBUTION_NUM_PROCESSES"] = "2"
os.environ["KERAS_DISTRIBUTION_PROCESS_ID"] = "0"
keras.distribute.initialize()
```
On process 1:
```python
os.environ[
"KERAS_DISTRIBUTION_JOB_ADDRESSES"] = "10.0.0.1:1234,10.0.0.2:2345"
os.environ["KERAS_DISTRIBUTION_NUM_PROCESSES"] = "2"
os.environ["KERAS_DISTRIBUTION_PROCESS_ID"] = "1"
keras.distribute.initialize()
```
Also note that for JAX backend, the `job_addresses` can be further
reduced to just the master/coordinator address, which is
`10.0.0.1:1234`.
"""
if (
job_addresses is None
and "KERAS_DISTRIBUTION_JOB_ADDRESSES" in os.environ
):
job_addresses = os.environ["KERAS_DISTRIBUTION_JOB_ADDRESSES"]
if (
num_processes is None
and "KERAS_DISTRIBUTION_NUM_PROCESSES" in os.environ
):
num_processes = int(os.environ["KERAS_DISTRIBUTION_NUM_PROCESSES"])
if process_id is None and "KERAS_DISTRIBUTION_PROCESS_ID" in os.environ:
process_id = int(os.environ["KERAS_DISTRIBUTION_PROCESS_ID"])
distribution_lib.initialize(job_addresses, num_processes, process_id)
@keras_export("keras.distribution.DeviceMesh")
class DeviceMesh:
"""A cluster of computation devices for distributed computation.
This API is aligned with `jax.sharding.Mesh` and `tf.dtensor.Mesh`, which
represents the computation devices in the global context.
See more details in [jax.sharding.Mesh](
https://jax.readthedocs.io/en/latest/jax.sharding.html#jax.sharding.Mesh)
and [tf.dtensor.Mesh](
https://www.tensorflow.org/api_docs/python/tf/experimental/dtensor/Mesh).
Args:
shape: tuple of list of integers. The shape of the overall
`DeviceMesh`, e.g. `(8,)` for a data parallel only distribution,
or `(4, 2)` for a model+data parallel distribution.
axis_names: List of string. The logical name of the each axis for
the `DeviceMesh`. The length of the `axis_names` should match to
the rank of the `shape`. The `axis_names` will be used to
match/create the `TensorLayout` when distribute the data and
variables.
devices: Optional list of devices. Defaults to all the available
devices locally from `keras.distribution.list_devices()`.
"""
def __init__(
self,
shape,
axis_names,
devices=None,
):
if not shape or not axis_names:
raise ValueError(
"Shape and axis_names cannot be empty. Received: "
f"shape={shape}, axis_names={axis_names}"
)
if len(shape) != len(axis_names):
raise ValueError(
"Shape and axis_names should have same size. "
f"Received: shape={shape}, axis_names={axis_names}"
)
if devices is None:
devices = list_devices()
devices = np.array(devices)
if np.prod(shape) != np.prod(devices.shape):
raise ValueError(
"Shape does not match the number of devices. "
f"Received: shape={shape}; devices.shape="
f"{devices.shape}"
)
self._shape = shape
self._axis_names = axis_names
self._devices = np.reshape(devices, shape)
@property
def shape(self):
return self._shape
@property
def axis_names(self):
return self._axis_names
@property
def devices(self):
return self._devices
def __repr__(self):
return (
f"<{self.__class__.__name__} "
f"shape={self.shape}, axis_names={self.axis_names}>"
)
def __str__(self):
return self.__repr__()
@keras_export("keras.distribution.TensorLayout")
class TensorLayout:
"""A layout to apply to a tensor.
This API is aligned with `jax.sharding.NamedSharding`
and `tf.dtensor.Layout`.
See more details in [jax.sharding.NamedSharding](
https://jax.readthedocs.io/en/latest/jax.sharding.html#jax.sharding.NamedSharding)
and [tf.dtensor.Layout](
https://www.tensorflow.org/api_docs/python/tf/experimental/dtensor/Layout).
Args:
axes: tuple of strings that should map to the `axis_names` in
a `DeviceMesh`. For any dimensions that doesn't need any sharding,
A `None` can be used a placeholder.
device_mesh: Optional `DeviceMesh` that will be used to create
the layout. The actual mapping of tensor to physical device
is not known until the mesh is specified.
"""
def __init__(self, axes, device_mesh=None):
self._axes = tuple(axes)
self._device_mesh = device_mesh
self._validate_axes()
@property
def axes(self):
return self._axes
@property
def device_mesh(self):
return self._device_mesh
@device_mesh.setter
def device_mesh(self, device_mesh):
if self._device_mesh is not None:
raise ValueError(
"Cannot override device mesh value. Existing "
f"value is {self._device_mesh}"
)
self._device_mesh = device_mesh
self._validate_axes()
def _validate_axes(self):
if self._device_mesh:
valid_axis_names = set(self._device_mesh.axis_names)
axis_names = set(self._axes) - set([None])
if axis_names - valid_axis_names:
raise ValueError(
"Invalid axis names for Layout. Valid axis "
f"names: {valid_axis_names}, Got {axis_names}"
)
def __repr__(self):
return (
f"<{self.__class__.__name__} "
f"axes={self.axes}, device_mesh={self.device_mesh}>"
)
def __str__(self):
return self.__repr__()
class Distribution:
"""Base class for variable distribution strategies.
A `Distribution` has following key functionalities:
1. Distribute the model variables to a `DeviceMesh`.
2. Distribute the input data to a `DeviceMesh`.
3. Distribute an intermediate state tensor in the model.
It can create a context scope so that the framework to properly detect the
`Distribution` and distribute the variable/data accordingly.
Args:
device_mesh: A `DeviceMesh` instance.
"""
def __init__(self, device_mesh):
self._device_mesh = device_mesh
def get_data_layout(self, data_shape):
"""Retrieve the `TensorLayout` for the input data.
Args:
data_shape: shape for the input data in list or tuple format.
Returns:
The `TensorLayout` for the data, which can be used by
`backend.distribute_value()` to redistribute a input data.
"""
raise NotImplementedError()
def get_variable_layout(self, variable):
"""Retrieve the `TensorLayout` for the variable.
Args:
variable: A `Variable` instance.
return:
The `TensorLayout` for the variable, which can be used by
`backend.distribute_value()` to redistribute a variable.
"""
raise NotImplementedError()
def get_tensor_layout(self, path):
"""Retrieve the `TensorLayout` for the intermediate tensor.
Args:
path: a string path for the corresponding tensor.
return:
The `TensorLayout` for the intermediate tensor, which can be used
by `backend.relayout()` to reshard the tensor. Could also return
None.
"""
raise NotImplementedError()
@contextlib.contextmanager
def scope(self):
"""Context manager to make the `Distribution` current."""
original_scope = distribution()
set_distribution(self)
try:
yield
finally:
set_distribution(original_scope)
@property
def device_mesh(self):
return self._device_mesh
def distribute_dataset(self, dataset):
"""Create a distributed dataset instance from the original user dataset.
Args:
dataset: the original global dataset instance. Only
`tf.data.Dataset` is supported at the moment.
Returns:
a sharded `tf.data.Dataset` instance, which will produce data for
the current local worker/process.
"""
raise NotImplementedError()
def __repr__(self):
return f"<{self.__class__.__name__} device_mesh={self.device_mesh}>"
def __str__(self):
return self.__repr__()
@keras_export("keras.distribution.DataParallel")
class DataParallel(Distribution):
"""Distribution for data parallelism.
You can choose to create this instance by either specifying
the `device_mesh` or `devices` arguments (but not both).
The `device_mesh` argument is expected to be a `DeviceMesh` instance,
and is expected to be 1D only. In case that the mesh has multiple axes,
then the first axis will be treated as the data parallel dimension
(and a warning will be raised).
When a list of `devices` are provided, they will be used to construct a
1D mesh.
When both `mesh` and `devices` are absent, then `list_devices()`
will be used to detect any available devices and create a 1D mesh from
them.
Args:
device_mesh: Optional `DeviceMesh` instance.
devices: Optional list of devices.
auto_shard_dataset: Automatically shard the dataset amongst processes.
Defaults to true.
"""
def __init__(self, device_mesh=None, devices=None, auto_shard_dataset=True):
if device_mesh:
self._initialize_with_device_mesh(device_mesh)
elif devices:
self._initialize_mesh_from_devices(devices)
else:
self._initialize_mesh_from_list_devices()
self._batch_dim_name = self.device_mesh.axis_names[0]
# Those following attributes might get convert to public methods.
self._num_process = distribution_lib.num_processes()
self._process_id = distribution_lib.process_id()
self._is_multi_process = self._num_process > 1
self._auto_shard_dataset = auto_shard_dataset
def _initialize_with_device_mesh(self, device_mesh):
if not isinstance(device_mesh, DeviceMesh):
raise ValueError(
"Expect `mesh` to be an instance of `DeviceMesh`. "
f"Received: mesh={device_mesh} (of type {type(device_mesh)})"
)
super().__init__(device_mesh)
if self.device_mesh.devices.ndim != 1:
warnings.warn(
"Expect the input mesh to be 1D, but received "
"mesh.devices.ndim=%d. "
"The first axis will be used for data-parallel sharding.",
device_mesh.devices.ndim,
)
def _initialize_mesh_from_devices(self, devices):
devices = np.array(devices)
device_mesh = DeviceMesh(
shape=devices.shape,
axis_names=[DEFAULT_BATCH_DIM_NAME],
devices=devices,
)
super().__init__(device_mesh)
def _initialize_mesh_from_list_devices(self):
devices = np.array(list_devices())
device_mesh = DeviceMesh(
shape=devices.shape,
axis_names=[DEFAULT_BATCH_DIM_NAME],
devices=devices,
)
super().__init__(device_mesh)
def get_data_layout(self, data_shape):
data_shard_spec = [None] * len(data_shape)
data_shard_spec[0] = self._batch_dim_name # Shard on the first dim
return TensorLayout(data_shard_spec, self.device_mesh)
def get_variable_layout(self, variable):
variable_shard_spec = [None] * len(variable.shape)
return TensorLayout(variable_shard_spec, self.device_mesh)
def get_tensor_layout(self, path):
# For data parallel training, the intermediate state is not changed.
return None
def distribute_dataset(self, dataset):
from tensorflow.python.data.experimental.ops import (
distribute as tf_data_distribute,
)
from keras.src.utils.module_utils import tensorflow as tf
if not isinstance(dataset, tf.data.Dataset):
raise ValueError(
"Only `tf.data.Dataset` is supported for "
f"sharding, got {type(dataset)}"
)
if not self._is_multi_process or not self._auto_shard_dataset:
return dataset
batch_size = tf_data_distribute.compute_batch_size(dataset)
if batch_size.numpy() < 0:
raise ValueError(
"The batch size of the input dataset is "
"unknown. Please config the batch size for "
"the input dataset, e.g via `dataset.batch(batch_size)`"
)
per_worker_batch_size = tf_data_distribute.batch_sizes_for_worker(
global_batch_size=batch_size,
num_workers=self._num_process,
num_replicas_per_worker=1, # We hard code this for now.
worker_index=self._process_id,
)
distributed_dataset = dataset.rebatch(per_worker_batch_size)
distributed_dataset = tf_data_distribute._AutoShardDataset(
distributed_dataset,
num_workers=self._num_process,
index=self._process_id,
num_replicas=self._num_process,
)
return distributed_dataset.prefetch(tf.data.AUTOTUNE)
@keras_export("keras.distribution.ModelParallel")
class ModelParallel(Distribution):
"""Distribution that shards model variables.
Compare to `DataParallel` which replicates the variables across all devices,
`ModelParallel` allows you to shard variables in addition to the input data.
To construct a `ModelParallel` distribution, you need to provide a
`DeviceMesh` and a `LayoutMap`.
1. `DeviceMesh` contains physical device information. The axis names in
the mesh will be used to map the variable and data layout.
2. `LayoutMap` contains the mapping between variable paths to their
corresponding `TensorLayout`.
Example:
```python
devices = list_devices() # Assume there are 8 devices.
# Create a mesh with 2 devices for data parallelism and 4 devices for
# model parallelism.
device_mesh = DeviceMesh(shape=(2, 4), axis_names=('batch', 'model'),
devices=devices)
# Create a layout map that shard the `Dense` layer and `Conv2D`
# layer variables on the last dimension.
# Based on the `device_mesh`, this means the variables
# will be split across 4 devices. Any other variable that doesn't
# match any key in the layout map will be fully replicated.
layout_map = LayoutMap(device_mesh)
layout_map['dense.*kernel'] = (None, 'model')
layout_map['dense.*bias'] = ('model',)
layout_map['conv2d.*kernel'] = (None, None, None, 'model')
layout_map['conv2d.*bias'] = ('model',)
distribution = ModelParallel(
layout_map=layout_map,
batch_dim_name='batch',
)
# Set the global distribution, or via `with distribution.scope():`
set_distribution(distribution)
model = model_creation()
model.compile()
model.fit(data)
```
You can quickly update the device mesh shape to change the sharding factor
of the variables. E.g.
```python
# With only the shape change for the device mesh, the variables will be
# sharded across 8 devices instead of 4, which further reduces the memory
# footprint of variables on each of the device.
device_mesh = DeviceMesh(
shape=(1, 8),
axis_names=('batch', 'model'),
devices=devices,
)
```
To figure out a proper layout mapping rule for all the model variables, you
can first list out all the model variable paths, which will be used as the
key to map the variables to `TensorLayout`.
e.g.
```python
model = create_model()
for v in model.variables:
print(v.path)
```
Args:
layout_map: `LayoutMap` instance which map the variable path to the
corresponding tensor layout.
batch_dim_name: Optional string, the axis name in the device mesh
(of the `layout_map` object)
that will be used to distribute data. If unspecified, the
first axis from the device mesh will be used.
"""
def __init__(self, *, layout_map=None, batch_dim_name=None, **kwargs):
kwargs.pop("device_mesh", None)
if layout_map is None:
raise ValueError("You must specify a layout_map argument.")
if not isinstance(layout_map, LayoutMap):
raise ValueError(
"Argument `layout_map` must be a `LayoutMap` instance. "
f"Received: layout_map={layout_map}"
)
device_mesh = layout_map.device_mesh
super().__init__(device_mesh)
self._layout_map = layout_map
self._batch_dim_name = batch_dim_name or self.device_mesh.axis_names[0]
# Those following attributes might get convert to public methods.
self._num_process = distribution_lib.num_processes()
self._process_id = distribution_lib.process_id()
self._is_multi_process = self._num_process > 1
def get_data_layout(self, data_shape):
data_shard_spec = [None] * len(data_shape)
data_shard_spec[0] = self._batch_dim_name # Shard on the first dim
return TensorLayout(data_shard_spec, self.device_mesh)
def get_variable_layout(self, variable):
variable_layout = self._layout_map[variable.path]
if variable_layout is not None:
return variable_layout
variable_shard_spec = [None] * len(variable.shape)
return TensorLayout(variable_shard_spec, self.device_mesh)
def get_tensor_layout(self, path):
return self._layout_map[path]
def distribute_dataset(self, dataset):
from tensorflow.python.data.experimental.ops import (
distribute as tf_data_distribute,
)
from keras.src.utils.module_utils import tensorflow as tf
if not isinstance(dataset, tf.data.Dataset):
raise ValueError(
"Only `tf.data.Dataset` is supported for "
f"sharding, got {type(dataset)}"
)
if not self._is_multi_process:
return dataset
global_batch_size = tf_data_distribute.compute_batch_size(dataset)
if global_batch_size.numpy() < 0:
raise ValueError(
"The batch size of the input dataset is "
"unknown. Please config the batch size for "
"the input dataset, e.g via `dataset.batch(batch_size)`"
)
# We need to compute the per-process/worker/host batch size.
# This will depend on how many model replicas we have on each process.
# Note that this might be smaller than one if model replicas are sharded
# across multiple processes.
mesh_batch_dim_index = self.device_mesh.axis_names.index(
self._batch_dim_name
)
num_model_replicas = self.device_mesh.shape[mesh_batch_dim_index]
if num_model_replicas == 1:
# No sharding is needed in this case. Each process will have the
# global batch size, and data from the iterator will need to be
# replicated across all processes.
return dataset.prefetch(tf.data.AUTOTUNE)
num_model_replicas_per_process = num_model_replicas / self._num_process
if num_model_replicas_per_process >= 1:
# Each process will have one or more full model replicas. Data will
# be sharded across all processes without replication.
if global_batch_size % self._num_process != 0:
raise ValueError(
"Global batch size must be divisible by the number of "
f"processes. `global_batch_size`={global_batch_size} and "
f"`num_process`={self._num_process}"
)
per_process_batch_size = global_batch_size // self._num_process
distributed_dataset = dataset.rebatch(per_process_batch_size)
distributed_dataset = distributed_dataset.shard(
num_shards=self._num_process,
index=self._process_id,
)
return distributed_dataset.prefetch(tf.data.AUTOTUNE)
else:
# Model replicas are sharded across multiple processes. Data will be
# sharded across model replicas, and replicated across processes
# within the same model replica.
if global_batch_size % num_model_replicas != 0:
raise ValueError(
"Global batch size must be divisible by the number of "
f"replicas. `global_batch_size`={global_batch_size} and "
f"`num_model_replicas`={num_model_replicas}"
)
per_process_batch_size = global_batch_size // num_model_replicas
distributed_dataset = dataset.rebatch(per_process_batch_size)
processes_per_replica = self._num_process // num_model_replicas
# TODO: Figure out what the convention is for data sharding id.
data_shard_id = self._process_id % processes_per_replica
distributed_dataset = distributed_dataset.shard(
num_shards=num_model_replicas,
index=data_shard_id,
)
return distributed_dataset.prefetch(tf.data.AUTOTUNE)
@keras_export("keras.distribution.LayoutMap")
class LayoutMap(collections.abc.MutableMapping):
"""A dict-like object that maps string to `TensorLayout` instances.
`LayoutMap` uses a string as key and a `TensorLayout` as value. There is a
behavior difference between a normal Python dict and this class. The string
key will be treated as a regex when retrieving the value. See the docstring
of `get` for more details.
See below for a usage example. You can define the naming schema
of the `TensorLayout`, and then retrieve the corresponding
`TensorLayout` instance.
In the normal case, the key to query is usually the `variable.path`, which
is the identifier of the variable.
As shortcut, tuple or list of axis names are also allowed when inserting
as value, and will be converted to `TensorLayout`.
```python
layout_map = LayoutMap(device_mesh)
layout_map['dense.*kernel'] = (None, 'model')
layout_map['dense.*bias'] = ('model',)
layout_map['conv2d.*kernel'] = (None, None, None, 'model')
layout_map['conv2d.*bias'] = ('model',)
layout_1 = layout_map['dense_1.kernel'] # layout_1 == layout_2d
layout_2 = layout_map['dense_1.bias'] # layout_2 == layout_1d
layout_3 = layout_map['dense_2.kernel'] # layout_3 == layout_2d
layout_4 = layout_map['dense_2.bias'] # layout_4 == layout_1d
layout_5 = layout_map['my_model/conv2d_123/kernel'] # layout_5 == layout_4d
layout_6 = layout_map['my_model/conv2d_123/bias'] # layout_6 == layout_1d
layout_7 = layout_map['my_model/conv3d_1/kernel'] # layout_7 == None
layout_8 = layout_map['my_model/conv3d_1/bias'] # layout_8 == None
```
Args:
device_mesh: `keras.distribution.DeviceMesh` instance.
"""
def __init__(self, device_mesh):
self._layout_map = collections.OrderedDict()
self._device_mesh = device_mesh
def __getitem__(self, key):
"""Retrieves the corresponding layout by the string key.
When there isn't an exact match, all the existing keys in the layout map
will be treated as a regex and map against the input key again. When
there are multiple matches for the regex, an `ValueError` will be
raised. Returns `None` if there isn't any match found.
Args:
key: String key to query a layout.
Returns:
Corresponding layout based on the query.
"""
if key in self._layout_map:
return self._layout_map[key]
matching_keys = []
for k in self._layout_map:
if re.search(k, key):
matching_keys.append(k)
if len(matching_keys) > 1:
raise ValueError(
f"Path '{key}' matches multiple layout "
f"specification keys: {matching_keys}. Please make "
"sure each tensor/variable path only matches at most "
"one layout specification key in the LayoutMap."
)
elif len(matching_keys) == 1:
return self._layout_map[matching_keys[0]]
return None
def __setitem__(self, key, layout):
"""Insert TensorLayout to the LayoutMap.
Args:
key: String key for the `TensorLayout`.
layout: The `TensorLayout`. As a shortcut, tuple of string and None
are also acceptable, and will be converted to `TensorLayout`.
"""
if key in self._layout_map:
raise ValueError(
f"{key} already exist in the LayoutMap with "
f"value {self._layout_map[key]}. Please make sure to "
"not use duplicated keys."
)
if isinstance(layout, tuple):
layout = TensorLayout(axes=layout, device_mesh=None)
if not isinstance(layout, TensorLayout):
raise ValueError(
f"{layout} should be a TensorLayout type, got {type(layout)}"
)
self._maybe_populate_device_mesh(layout)
self._layout_map[key] = layout
def __delitem__(self, key):
# let the dict to handle the key missing error
return self._layout_map.pop(key)
def __len__(self):
return len(self._layout_map)
def __iter__(self):
return iter(self._layout_map)
@property
def device_mesh(self):
return self._device_mesh
def _maybe_populate_device_mesh(self, layout):
if layout.device_mesh is None and self.device_mesh is not None:
layout.device_mesh = self.device_mesh
LayoutMap.get.__doc__ = LayoutMap.__getitem__.__doc__
@keras_export("keras.distribution.distribute_tensor")
def distribute_tensor(tensor, layout):
"""Change the layout of a Tensor value in the jit function execution.
Args:
tensor: a Tensor to change the layout.
layout: `TensorLayout` to be applied on the value.
Returns:
a new value with the specified tensor layout.
"""
if isinstance(tensor, KerasTensor):
# keras tensor is only used for building functional model, and can't be
# used to alter layout/sharding.
return tensor
return distribution_lib.distribute_tensor(tensor, layout)
@keras_export("keras.distribution.distribution")
def distribution():
"""Retrieve the current distribution from global context."""
return global_state.get_global_attribute(GLOBAL_ATTRIBUTE_NAME)
@keras_export("keras.distribution.set_distribution")
def set_distribution(value):
"""Set the distribution as the global distribution setting.
Args:
value: a `Distribution` instance.
"""
global_state.set_global_attribute(GLOBAL_ATTRIBUTE_NAME, value)
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@distribution@distribution_lib.py@.PATH_END.py
|
{
"filename": "test_coreBurkert.py",
"repo_name": "sibirrer/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/test/test_LensModel/test_Profiles/test_coreBurkert.py",
"type": "Python"
}
|
__author__ = "dgilman"
from lenstronomy.LensModel.Profiles.coreBurkert import CoreBurkert
import numpy as np
import numpy.testing as npt
import pytest
class TestcBurk(object):
"""Tests the Gaussian methods."""
def setup_method(self):
self.cb = CoreBurkert()
def _kappa_integrand(self, x, y, Rs, m0, r_core):
return 2 * np.pi * x * self.cb.density_2d(x, y, Rs, m0, r_core)
def test_mproj(self):
Rs = 10
r_core = 0.7 * Rs
Rmax = np.linspace(0.5 * Rs, 1.5 * Rs, 1000000)
dr = Rmax[1] - Rmax[0]
m0 = 1
m2d = self.cb.mass_2d(Rmax, Rs, m0, r_core)
integrand = np.gradient(m2d, dr)
kappa_integrand = self._kappa_integrand(Rmax, 0, Rs, m0, r_core)
npt.assert_almost_equal(integrand, kappa_integrand, decimal=3)
def test_potential(self):
Rs = 10
rho0 = 1
r_core = 0.6 * Rs
R = np.linspace(0.1 * Rs, 2 * Rs, 1000000)
potential = self.cb.function(R, 0, Rs, rho0, r_core)
alpha_num = np.gradient(potential, R[1] - R[0])
alpha = self.cb.derivatives(R, 0, Rs, rho0, r_core)[0]
npt.assert_almost_equal(alpha_num, alpha, decimal=4)
def test_derivatives(self):
Rs = 10
rho0 = 1
r_core = 7
R = np.linspace(0.1 * Rs, 4 * Rs, 1000)
alpha = self.cb.coreBurkAlpha(R, Rs, rho0, r_core, R, 0)[0]
alpha_theory = self.cb.mass_2d(R, Rs, rho0, r_core) / np.pi / R
npt.assert_almost_equal(alpha / alpha_theory, 1)
def test_rho_angle_transform(self):
Rs = float(10)
rho0 = float(1)
r_core = float(7)
alpha_Rs = self.cb._rho2alpha(rho0, Rs, r_core)
alpha_Rs_2 = self.cb.coreBurkAlpha(Rs, Rs, rho0, r_core, Rs, 0)[0]
npt.assert_almost_equal(alpha_Rs * alpha_Rs_2**-1, 1)
rho0_2 = self.cb._alpha2rho0(alpha_Rs, Rs, r_core)
npt.assert_almost_equal(rho0, rho0_2)
if __name__ == "__main__":
pytest.main()
|
sibirrerREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@test@test_LensModel@test_Profiles@test_coreBurkert.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "gwpy/gwpy",
"repo_path": "gwpy_extracted/gwpy-main/gwpy/utils/__init__.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2014-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Miscellaneous utilties for GWpy
"""
from sys import stdout
from .misc import (
gprint,
if_not_none,
null_context,
round_to_power,
unique,
)
__author__ = 'Duncan Macleod <duncan.macleod@ligo.org>'
|
gwpyREPO_NAMEgwpyPATH_START.@gwpy_extracted@gwpy-main@gwpy@utils@__init__.py@.PATH_END.py
|
{
"filename": "10_AF_Lep_MCMC.ipynb",
"repo_name": "markusbonse/fours",
"repo_path": "fours_extracted/fours-main/docs/source/04_use_the_fours/paper_experiments/10_AF_Lep_MCMC.ipynb",
"type": "Jupyter Notebook"
}
|
# MCMC for AF Lep b
## 1. Imports
```python
import os
os.environ['OMP_NUM_THREADS'] = '5'
```
```python
from pathlib import Path
from tqdm import tqdm
import numpy as np
# Plotting
import cmocean
import matplotlib.pylab as plt
color_map = cmocean.cm.ice
import seaborn as sns
from matplotlib.lines import Line2D
# Methods
import pynpoint as pp
from pynpoint.core.dataio import OutputPort, InputPort
from pynpoint.readwrite.hdf5reading import Hdf5ReadingModule
from fours.utils.pca import pca_psf_subtraction_gpu
from fours.utils.data_handling import read_fours_root_dir
# Evaluation
from applefy.detections.uncertainty import compute_detection_uncertainty
from applefy.utils.photometry import AperturePhotometryMode
from applefy.statistics import TTest
```
## 2. All directories needed
```python
root_dir = Path(read_fours_root_dir())
```
```python
experiment_root_dir = root_dir / Path("70_results/x2_af_lep/MCMC/")
```
```python
fwhm = 3.6
```
## 3. Estimate the Photometry and astrometry with PynPoint
### 3.1 Create the PynPoint database
```python
pipeline = pp.Pypeline(
working_place_in=str(experiment_root_dir),
input_place_in=str(experiment_root_dir),
output_place_in=str(experiment_root_dir))
```
================
PynPoint v0.10.1
================
A new version (0.11.0) is available!
Want to stay informed about updates, bug fixes, and new features?
Please consider using the 'Watch' button on the Github page:
https://github.com/PynPoint/PynPoint
Working place: /home/ipa/quanz/user_accounts/mbonse/2023_S4/70_results/x2_af_lep/MCMC
Input place: /home/ipa/quanz/user_accounts/mbonse/2023_S4/70_results/x2_af_lep/MCMC
Output place: /home/ipa/quanz/user_accounts/mbonse/2023_S4/70_results/x2_af_lep/MCMC
Database: /home/ipa/quanz/user_accounts/mbonse/2023_S4/70_results/x2_af_lep/MCMC/PynPoint_database.hdf5
Configuration: /home/ipa/quanz/user_accounts/mbonse/2023_S4/70_results/x2_af_lep/MCMC/PynPoint_config.ini
Number of CPUs: 32
Number of threads: 5
### 3.2 Read in the HDF5 file
```python
read_data_module = Hdf5ReadingModule(
name_in="01_read_data",
input_filename="HD35850_294_088_C-0085_A_.hdf5",
input_dir=str(root_dir / Path("30_data")),
tag_dictionary={"object_selected" : "object_selected",
"psf_selected" : "psf_selected"})
```
```python
pipeline.add_module(read_data_module)
```
```python
pipeline.run_module("01_read_data")
```
-----------------
Hdf5ReadingModule
-----------------
Module name: 01_read_data
Reading HDF5 file... [DONE]
Output ports: object_selected (13809, 165, 165), psf_selected (1526, 23, 23)
### 3.3 Cut and stack the science data for faster PCA iterations
```python
science_data = pipeline.get_data("object_selected")
angles = pipeline.get_attribute("object_selected", "PARANG", static=False)
print(science_data.shape)
```
(13809, 165, 165)
(13809,)
```python
science_data_cut = science_data[:, 25:-25, 25:-25]
```
```python
binning = 5 # stack every 5 frames
angles_stacked = np.array([
np.mean(i)
for i in np.array_split(angles, int(len(angles) / binning))])
science_stacked = np.array([
np.mean(i, axis=0)
for i in np.array_split(science_data_cut, int(len(angles) / binning))])
```
```python
obj_port = OutputPort(
"01_science_prep",
pipeline.m_data_storage)
obj_port.set_all(science_stacked)
```
```python
input_port = InputPort(
"object_selected",
pipeline.m_data_storage)
obj_port.copy_attributes(input_port)
```
```python
pipeline.set_attribute("01_science_prep", "PARANG", angles_stacked, False)
```
### 3.4 Find the best number of PCA components
```python
science_data_prep = pipeline.get_data("01_science_prep")
angles_prep = pipeline.get_attribute("01_science_prep", "PARANG", static=False)
angles_prep = np.deg2rad(angles_prep)
```
```python
print(science_data_prep.shape)
print(angles_prep.shape)
```
(2761, 115, 115)
(2761,)
```python
num_components = np.arange(5, 200, 10)
```
```python
pca_residuals = pca_psf_subtraction_gpu(
images=science_data_prep,
angles=angles_prep,
pca_numbers=num_components,
device="cpu",
approx_svd=2000,
verbose = True)
```
Compute PCA basis ...[DONE]
Compute PCA residuals ...
100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 20/20 [00:10<00:00, 1.83it/s]
[DONE]
```python
init_planet_position = np.array((68.55, 54.78)) # Result from previous mcmc
# Use pixel values spaced by the FWHM
photometry_mode_planet = AperturePhotometryMode("AS", search_area=0.5, psf_fwhm_radius=fwhm/2)
photometry_mode_noise = AperturePhotometryMode("AS", psf_fwhm_radius=fwhm/2)
```
```python
all_snr_pca = []
for i in tqdm(range(len(num_components))):
_, _, snr_mean = compute_detection_uncertainty(
frame=pca_residuals[i],
planet_position=init_planet_position,
statistical_test=TTest(),
psf_fwhm_radius=fwhm,
photometry_mode_planet=photometry_mode_planet,
photometry_mode_noise=photometry_mode_noise,
safety_margin=1.,
num_rot_iter=50)
all_snr_pca.append(np.round(np.mean(snr_mean), 1))
```
100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 20/20 [00:06<00:00, 2.89it/s]
```python
plt.plot(num_components, all_snr_pca)
plt.ylabel("S/N", fontsize=12)
plt.xlabel("PCA components", fontsize=12)
plt.title("Find the best S/N of PCA", fontsize=14)
plt.xlim(0, 200)
plt.ylim(2, 5)
plt.grid()
```

```python
best_num_pca = num_components[np.argmax(all_snr_pca)]
print("The best number of components is: " + str(best_num_pca))
print("The peak S/N is : " + str(np.max(all_snr_pca)))
```
The best number of components is: 65
The peak S/N is : 4.7
```python
pca_residual_frame = pca_residuals[np.argmax(all_snr_pca)]
median= np.median(pca_residual_frame)
scale = np.max(np.abs(pca_residual_frame))
zoom = 15
plt.imshow(
pca_residual_frame[zoom:-zoom, zoom:-zoom],
cmap=color_map,
vmin=median - scale*0.5, vmax=median + scale*0.8,
origin="lower")
plt.axis("off")
plt.title("Optimal PCA (65 components)", fontsize=14,y=1.01)
```
Text(0.5, 1.01, 'Optimal PCA (65 components)')

### 3.5 Create a Master Template for the PSF
```python
psf_frames = pipeline.get_data("psf_selected")
```
```python
psf_template = np.median(psf_frames, axis=0)
```
```python
# pad the psf template
padded_psf = np.pad(psf_template,
pad_width=((46, 46), (46, 46)),
mode='constant',
constant_values=0)
plt.imshow(padded_psf)
plt.axis("off")
```
(-0.5, 114.5, 114.5, -0.5)

```python
psf_port = OutputPort("01_psf_padded",
pipeline.m_data_storage)
psf_port.set_all(np.expand_dims(padded_psf, axis=0))
```
### 3.6 Simplex minimization, used as initialization for MCMC
```python
simplex_module = pp.SimplexMinimizationModule(
name_in='03_simplex',
image_in_tag='01_science_prep',
psf_in_tag='01_psf_padded',
res_out_tag='03_simplex_residual',
flux_position_tag='03_simplex_result',
position=(init_planet_position[0], init_planet_position[1]),
magnitude=9.5,
psf_scaling=-1/0.0179, # The data is taken with an ND filter
merit='hessian',
tolerance=0.1,
pca_number=int(best_num_pca),
residuals='mean',
offset=2)
```
```python
pipeline.add_module(simplex_module)
pipeline.run_module('03_simplex')
```
-------------------------
SimplexMinimizationModule
-------------------------
Module name: 03_simplex
Input ports: 01_science_prep (2761, 115, 115), 01_psf_padded (1, 115, 115)
Input parameters:
- Number of principal components = [65]
- Figure of merit = hessian
- Residuals type = mean
- Absolute tolerance (pixels/mag) = 0.1
- Maximum offset = 2
- Guessed position (x, y) = (68.55, 54.78)
- Aperture position (x, y) = (69, 55)
- Aperture radius (pixels) = 3
Image center (y, x) = (57.0, 57.0)
Simplex minimization... 65 PC - chi^2 = 1.52e+01 [DONE]
Best-fit parameters:
- Position (x, y) = (68.78, 54.75)
- Separation (mas) = 323.70
- Position angle (deg) = 259.19
- Contrast (mag) = 9.98
Output ports: 03_simplex_residual (41, 115, 115), 03_simplex_result (41, 6)
```python
simplex_results = pipeline.get_data('03_simplex_result')
```
```python
# Simplex Error
plt.plot(simplex_results[:, -1])
plt.yscale("log")
plt.xlabel("Iterations", fontsize=12)
plt.ylabel("Hessian Error", fontsize=12)
plt.title("Simplex minimization", fontsize=14)
plt.xlim(0, 40)
plt.grid()
```

```python
best_idx = np.argmin(simplex_results[:, -1])
best_idx
```
40
```python
simplex_best_result = simplex_results[best_idx, :]
simplex_best_result
```
array([ 68.77608758, 54.75116433, 0.32370005, 259.18860748,
9.98117147, 15.16769487])
```python
residual_no_planet = pipeline.get_data('03_simplex_residual')[best_idx]
```
```python
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 5))
# Plot the original residual
median= np.median(pca_residual_frame)
scale = np.max(np.abs(pca_residual_frame))
zoom = 15
ax1.imshow(
pca_residual_frame[zoom:-zoom, zoom:-zoom],
cmap=color_map,
vmin=median - scale*0.5, vmax=median + scale*0.8,
origin="lower")
ax1.axis("off")
ax1.set_title("Optimal PCA (65 components)", fontsize=14,y=1.01)
# Plot the residual without the planet
ax2.imshow(
residual_no_planet[zoom:-zoom, zoom:-zoom],
cmap=color_map,
vmin=median - scale*0.5, vmax=median + scale*0.8,
origin="lower")
ax2.axis("off")
ax2.set_title("Optimal PCA - After subtraction", fontsize=14,y=1.01)
plt.tight_layout()
```

### 3.7 Run MCMC to get an estimate of the Error
```python
# Bounds for the MCMC
# Separations +- 100 mas
# Angle +- 10 deg
# Contrast +- 1 mag
dsep, dphi, dcontrast = 0.1, 10, 1.0
mcmc_module = pp.MCMCsamplingModule(
name_in='04_MCMC_planet',
image_in_tag='01_science_prep',
psf_in_tag='01_psf_padded',
chain_out_tag='04_MCMC_chain',
param=tuple(simplex_best_result[2:5]),
bounds=((simplex_best_result[2]-dsep, simplex_best_result[2]+dsep),
(simplex_best_result[3]-dphi, simplex_best_result[3]+dphi),
(simplex_best_result[4]-dcontrast, simplex_best_result[4]+dcontrast)),
nwalkers=100,
nsteps=500,
psf_scaling=-1/0.0179, # ND filter
pca_number=int(best_num_pca),
mask=None,
extra_rot=0.0,
merit='hessian',
residuals='mean',
resume=True)
```
```python
pipeline.add_module(mcmc_module)
pipeline.run_module('04_MCMC_planet')
```
### 3.8 Plot the result
```python
mcmc_results = pipeline.get_data('04_MCMC_chain')
```
```python
mcmc_angles = mcmc_results[-50:,:, 1].flatten()
mcmc_separations = mcmc_results[-50:,:, 0].flatten() * 1000
mcmc_contrast = mcmc_results[-50:,:, 2].flatten()
```
The true north in NACO can be off by about 0.5 deg. Rameau et. al 2013 calibtrated north for our dataset.
We use their calibration to update the position angles
```python
mcmc_angles = mcmc_angles - 0.38 # the error is negligible
```
```python
def get_statistic(sample_in):
median = np.median(sample_in)
low = np.quantile(sample_in, 0.16)
high = np.quantile(sample_in, 0.84)
plus = high - median
minus = low - median
return np.round(median, 2), np.round(plus, 2), np.round(minus, 2)
```
```python
print(get_statistic(mcmc_angles))
median_angle, plus_angle, minus_angle = get_statistic(mcmc_angles)
```
(258.81, 0.53, -0.59)
```python
print(get_statistic(mcmc_separations))
median_separations, plus_separations, minus_separations = get_statistic(mcmc_separations)
```
(323.24, 6.71, -6.44)
```python
x = mcmc_angles
y = mcmc_separations
# Draw a combo histogram and scatterplot with density contours
f, ax = plt.subplots(figsize=(6, 6))
sns.scatterplot(
x=mcmc_angles,
y=mcmc_separations,
s=100,
color=".15",
alpha=0.05,
label='MCMC planet position sample')
ax.errorbar(
x=median_angle,
xerr=np.array([plus_angle, -minus_angle]).reshape(2, 1),
y=median_separations,
yerr=np.array([plus_separations, -minus_separations]).reshape(2, 1),
c='darkorange',
lw=2,
capsize=5)
legend_elements = [Line2D([0], [0], marker='o', color='w', label='MCMC Planet Position Sample',
markerfacecolor='k', markersize=10, alpha=0.2),
Line2D([0], [0], color='darkorange', lw=2, label='Error [16%, 84%]')]
ax.legend(handles=legend_elements, loc='best')
ax.ticklabel_format(useOffset=False)
ax.set_xlabel('Separation (arcsec)')
ax.set_ylabel(r'Position angle ($^\circ$)')
ax.set_title('AF Lep b - NACO position estimate Oct 2011')
f.savefig('final_plots/x1_AF_Lep_NACO_position_estimate.pdf', bbox_inches='tight')
plt.show()
```

```python
print(get_statistic(mcmc_contrast))
median_contrast, plus_contrast, minus_contrast = get_statistic(mcmc_contrast)
```
(10.03, 0.13, -0.12)
```python
host_mag = 4.93
host_error = 0.01
```
```python
apparent_mag = np.round(median_contrast + host_mag, 2)
apparent_mag
```
14.96
```python
Abs_mag = 12.81
```
|
markusbonseREPO_NAMEfoursPATH_START.@fours_extracted@fours-main@docs@source@04_use_the_fours@paper_experiments@10_AF_Lep_MCMC.ipynb@.PATH_END.py
|
{
"filename": "CalcOrbitalElements.ipynb",
"repo_name": "bradkav/BlackHolesDarkDress",
"repo_path": "BlackHolesDarkDress_extracted/BlackHolesDarkDress-master/Nbody/CalcOrbitalElements.ipynb",
"type": "Jupyter Notebook"
}
|
# Calculating the orbital elements for simulations
```python
%matplotlib inline
from __future__ import division
from pygadgetreader import *
import numpy as np
import matplotlib.pyplot as pl
import matplotlib as mpl
from tqdm import tqdm
import sys
#----- MATPLOTLIB paramaters ---------
mpl.rcParams.update({'font.size': 18,'font.family':'serif'})
mpl.rcParams['xtick.major.size'] = 7
mpl.rcParams['xtick.major.width'] = 1
mpl.rcParams['xtick.minor.size'] = 3
mpl.rcParams['xtick.minor.width'] = 1
mpl.rcParams['ytick.major.size'] = 7
mpl.rcParams['ytick.major.width'] = 1
mpl.rcParams['ytick.minor.size'] = 3
mpl.rcParams['ytick.minor.width'] = 1
mpl.rcParams['ytick.right'] = True
mpl.rc('text', usetex=True)
#--------------------------------------
from scipy.interpolate import interp1d
from scipy.integrate import quad
```
### Some constants and parameters
You should specify the name of the simulation with `simID`, as well as the total number of snapshots - this varies between simulations, so you should check the 'out/' folder of each simulation to check.
```python
G_N = 4.302e-3 #(pc/solar mass) (km/s)^2
simID = "M30_a0.01_lo_bin_e0.995_full"
Nframes = 2001
#simID = "M1_a0.002_hi_bin_e0.995"
#Nframes = 550
rootdir = "sims/" + simID + "/out/"
print "Loading output from folder: ", rootdir
M_PBH = readsnap(rootdir+"snapshot_000", "mass", 1, suppress=1)[0]
```
Loading output from folder: sims/M30_a0.01_lo_bin_e0.995_full/out/
```python
#Read in the first header file
part_string = ["dmcount", "diskcount","bulgecount", "starcount","bndrycount"]
nParticles = np.zeros(5, dtype='int')
for i in range(5):
nParticles[i] = readheader(rootdir+"snapshot_000", part_string[i])
nPBH = nParticles[0]
nDM = nParticles[1:]
print " Number of PBHs:", nPBH
print " Number of DM particles:", nDM
mDM = np.zeros(4)
for i in range(4):
#print nDM[i]
if (nDM[i] > 0):
#print readsnap(rootdir+"snapshot_000", "mass", i+2, suppress=1)[0]
mDM[i] = readsnap(rootdir+"snapshot_000", "mass", i+2, suppress=1)[0]
print " DM particle masses:", mDM
r_tr = 1.0
aPBH = 1.0
scaling = 1e-5
```
Number of PBHs: 2
Number of DM particles: [18080 0 0 0]
DM particle masses: [ 0.00034321 0. 0. 0. ]
#### Functions for reading in and calculating simulation properties
```python
def Getv_PBH(fid):
id = format(fid, '03d')
vPBH = readsnap(rootdir+"snapshot_" + str(id), "vel", 1, suppress=1)
return vPBH[0,:] - vPBH[1,:]
def Getr_PBH(fid):
id = format(fid, '03d')
xPBH = scaling*readsnap(rootdir+"snapshot_" + str(id), "pos", 1, suppress=1)
return xPBH[0,:] - xPBH[1,:]
```
#### Functions for calculating the semi-major axis and eccentricity
```python
mu1 = 2.0*G_N*M_PBH
def specific_energy(r, v_abs):
return 0.5*(v_abs)**2 - (mu1/r)
def specific_angmom(x,v):
h = np.cross(x,v)
return np.sqrt(np.sum(h**2))
def semimajoraxis(x,v):
r = np.sqrt(np.sum(x**2))*1
v_abs = np.sqrt(np.sum(v**2))
eps = specific_energy(r, v_abs)
return -0.5*mu1/eps
def ecc(x, v):
r0 = 1
r = np.sqrt(np.sum(x**2))*r0
v_abs = np.sqrt(np.sum(v**2))
eps = specific_energy(r, v_abs)
h = specific_angmom(x*r0,v)
return np.sqrt(1.0+2.0*eps*h**2/mu1**2)
```
#### Get times of each snapshot
```python
dt = 9.785e-3 #kyr
def GetSimTime(fid):
id = format(fid, '03d')
t = readheader(rootdir+"snapshot_" + id, "time")
return t
tlist = np.zeros(Nframes)
for i in tqdm(range(Nframes)):
tlist[i] = GetSimTime(i)
tvals = tlist*dt
```
100%|██████████| 2001/2001 [00:01<00:00, 1920.09it/s]
#### Calculating + plotting $a$ and $e$ of the PBH pair
Note that here `e` is the eccentricity of the PBH-PBH system (i.e. as if there were no DM)...
```python
e_list = np.zeros(Nframes)
a_list = np.zeros(Nframes)
for i in tqdm(range(Nframes)):
v1 = Getv_PBH(i)
r1 = Getr_PBH(i)
a_list[i] = semimajoraxis(r1,v1)
e_list[i] = ecc(r1, v1)
print "Final semi-major axis [pc]:", np.mean(a_list[-10:])
print "Final eccentricity:", np.mean(e_list[-10:])
```
100%|██████████| 2001/2001 [00:03<00:00, 662.70it/s]
Final semi-major axis [pc]: 0.00129788226136
Final eccentricity: 0.960376444091
```python
fig, ax1 = pl.subplots(figsize=(7.5,5))
ax1.semilogy(tvals,a_list)
#ax1.set_ylim(5e-4, 5e-2)
ax1.set_xlabel(r"Simulation time [kyr]")
ax1.set_ylabel(r"Semi-major axis [pc]")
pl.tight_layout()
```

```python
fig, ax1 = pl.subplots(figsize=(7.5,5))
ax1.semilogy(tvals,1-e_list)
#ax1.set_ylim(1e-3, 1.0)
ax1.set_xlabel(r"Simulation time [kyr]")
ax1.set_ylabel(r"$(1-e)")
pl.tight_layout()
```

|
bradkavREPO_NAMEBlackHolesDarkDressPATH_START.@BlackHolesDarkDress_extracted@BlackHolesDarkDress-master@Nbody@CalcOrbitalElements.ipynb@.PATH_END.py
|
{
"filename": "_colorsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattercarpet/hoverlabel/font/_colorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="colorsrc",
parent_name="scattercarpet.hoverlabel.font",
**kwargs
):
super(ColorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattercarpet@hoverlabel@font@_colorsrc.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/densitymap/hoverlabel/font/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self, plotly_name="color", parent_name="densitymap.hoverlabel.font", **kwargs
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@densitymap@hoverlabel@font@_color.py@.PATH_END.py
|
{
"filename": "numba_convolution.py",
"repo_name": "sibirrer/lenstronomy",
"repo_path": "lenstronomy_extracted/lenstronomy-main/lenstronomy/ImSim/Numerics/numba_convolution.py",
"type": "Python"
}
|
import numpy as np
from lenstronomy.Util import numba_util
from lenstronomy.ImSim.Numerics.partial_image import PartialImage
from lenstronomy.Util import image_util
__all__ = ["NumbaConvolution"]
class NumbaConvolution(object):
"""Class to convolve explicit pixels only.
the convolution is inspired by pyautolens: https://github.com/Jammy2211/PyAutoLens
"""
def __init__(
self,
kernel,
conv_pixels,
compute_pixels=None,
nopython=True,
cache=True,
parallel=False,
memory_raise=True,
):
"""
:param kernel: convolution kernel in units of the image pixels provided, odd length per axis
:param conv_pixels: bool array same size as data, pixels to be convolved and their light to be blurred
:param compute_pixels: bool array of size of image, these pixels (if True) will get blurred light from other
pixels
:param nopython: bool, numba jit setting to use python or compiled.
:param cache: bool, numba jit setting to use cache
:param parallel: bool, numba jit setting to use parallel mode
:param memory_raise: bool, if True, checks whether memory required to store the convolution kernel is within
certain bounds
"""
# numba_util.nopython = nopython
# numba_util.cache = cache
# numba_util.parallel = parallel
self._memory_raise = memory_raise
self._kernel = kernel
self._conv_pixels = conv_pixels
self._nx, self._ny = np.shape(conv_pixels)
if compute_pixels is None:
compute_pixels = np.ones_like(conv_pixels)
compute_pixels = np.array(compute_pixels, dtype=bool)
assert np.shape(conv_pixels) == np.shape(compute_pixels)
self._mask = compute_pixels
self._partialInput = PartialImage(partial_read_bools=conv_pixels)
self._partialOutput = PartialImage(partial_read_bools=compute_pixels)
index_array_out = self._partialOutput.index_array
index_array_input = self._partialInput.index_array
kernel_shape = kernel.shape
self.kernel_max_size = kernel_shape[0] * kernel_shape[1]
image_index = 0
if (
self._partialInput.num_partial * self.kernel_max_size > 10**9
and self._memory_raise is True
):
raise ValueError(
"kernel length %s combined with data size %s requires %s memory elements, which might"
"exceed the memory limit and thus gives a raise. If you wish to ignore this raise, set"
" memory_raise=False"
% (
self.kernel_max_size,
self._partialInput.num_partial,
self._partialInput.num_partial * self.kernel_max_size,
)
)
self._image_frame_indexes = np.zeros(
(self._partialInput.num_partial, self.kernel_max_size), dtype="int"
)
self._image_frame_psfs = np.zeros(
(self._partialInput.num_partial, self.kernel_max_size)
)
self._image_frame_lengths = np.zeros(
(self._partialInput.num_partial), dtype="int"
)
for x in range(index_array_input.shape[0]):
for y in range(index_array_input.shape[1]):
if conv_pixels[x][y]:
(
image_frame_psfs,
image_frame_indexes,
frame_length,
) = self._pre_compute_frame_kernel(
(x, y), self._kernel[:, :], compute_pixels, index_array_out
)
self._image_frame_indexes[image_index, :] = image_frame_indexes
self._image_frame_psfs[image_index, :] = image_frame_psfs
self._image_frame_lengths[image_index] = frame_length
image_index += 1
def convolve2d(self, image):
"""2d convolution.
:param image: 2d numpy array, image to be convolved
:return: convolved image, 2d numpy array
"""
image_array_partial = self._partialInput.partial_array(image)
conv_array = self._convolve_jit(
image_array_partial,
num_data=self._partialOutput.num_partial,
image_frame_kernels=self._image_frame_psfs,
image_frame_indexes=self._image_frame_indexes,
image_frame_lengths=self._image_frame_lengths,
)
conv_image = self._partialOutput.image_from_partial(conv_array)
return conv_image
@staticmethod
@numba_util.jit()
def _pre_compute_frame_kernel(image_index, kernel, mask, index_array):
"""
:param image_index: (int, int) index of pixels
:param kernel: kernel, 2d rectangular array
:param mask: mask (size of full image)
:return:
frame_kernels: values of kernel
frame_indexes: (int) 1d index corresponding to the pixel receiving the kernel value
frame_counter: number of pixels with non-zero addition due to kernel convolution
"""
kernel_shape = kernel.shape
i0, j0 = image_index
kx, ky = kernel_shape[0], kernel_shape[1]
mask_shape = index_array.shape
nx, ny = mask_shape[0], mask_shape[1]
kx2 = int((kx - 1) / 2)
ky2 = int((ky - 1) / 2)
frame_counter = 0
frame_kernels = np.zeros(kx * ky)
frame_indexes = np.zeros(kx * ky)
for i in range(kx):
for j in range(ky):
x = i0 + i - kx2
y = j0 + j - ky2
if 0 <= x < nx and 0 <= y < ny:
if mask[x, y]:
frame_indexes[frame_counter] = index_array[x, y]
frame_kernels[frame_counter] = kernel[i, j]
frame_counter += 1
return frame_kernels, frame_indexes, frame_counter
@staticmethod
@numba_util.jit()
def _convolve_jit(
image_array,
num_data,
image_frame_kernels,
image_frame_indexes,
image_frame_lengths,
):
"""
:param image_array: selected subset of image in 1d array conventions
:param num_data: number of 1d data that get convolved light and are output
:param image_frame_kernels: list of indexes that have a response for certain pixel (as a list
:param image_frame_lengths: length of image_frame_kernels
:return:
"""
conv_array = np.zeros(num_data)
for image_index in range(
len(image_array)
): # loop through pixels that are to be blurred
value = image_array[image_index] # value of pixel that gets blurred
frame_length = image_frame_lengths[
image_index
] # number of pixels that gets assigned a fraction of the convolution
frame_indexes = image_frame_indexes[
image_index
] # list of 1d indexes that get added flux from the blurred image
frame_kernels = image_frame_kernels[
image_index
] # values of kernel for each frame indexes
for kernel_index in range(
frame_length
): # loop through all pixels that are impacted by the kernel of the pixel being blurred
vector_index = frame_indexes[
kernel_index
] # 1d coordinate of pixel to be added value
kernel = frame_kernels[kernel_index] # kernel response of pixel
conv_array[vector_index] += value * kernel # ad value to pixel
return conv_array
class SubgridNumbaConvolution(object):
"""Class that inputs a supersampled grid and convolution kernel and computes the
response on the regular grid This makes use of the regualr NumbaConvolution class as
a loop through the different sub-pixel positions."""
def __init__(
self,
kernel_super,
supersampling_factor,
conv_pixels,
compute_pixels=None,
kernel_size=None,
nopython=True,
cache=True,
parallel=False,
):
"""
:param kernel_super: convolution kernel in units of super sampled pixels provided, odd length per axis
:param supersampling_factor: factor of supersampling relative to pixel grid
:param conv_pixels: bool array same size as data, pixels to be convolved and their light to be blurred
:param compute_pixels: bool array of size of image, these pixels (if True) will get blurred light from other pixels
:param nopython: bool, numba jit setting to use python or compiled.
:param cache: bool, numba jit setting to use cache
:param parallel: bool, numba jit setting to use parallel mode
"""
self._nx, self._ny = conv_pixels.shape
self._supersampling_factor = supersampling_factor
# loop through the different supersampling sectors
self._numba_conv_list = []
if compute_pixels is None:
compute_pixels = np.ones_like(conv_pixels)
compute_pixels = np.array(compute_pixels, dtype=bool)
for i in range(supersampling_factor):
for j in range(supersampling_factor):
# compute shifted psf kernel
kernel = self._partial_kernel(kernel_super, i, j)
if kernel_size is not None:
kernel = image_util.cut_edges(kernel, kernel_size)
numba_conv = NumbaConvolution(
kernel,
conv_pixels,
compute_pixels=compute_pixels,
nopython=nopython,
cache=cache,
parallel=parallel,
)
self._numba_conv_list.append(numba_conv)
def convolve2d(self, image_high_res):
"""
:param image_high_res: supersampled image/model to be convolved and re-bined to regular resolution
:return: convolved and re-bind image
"""
conv_image = np.zeros((self._nx, self._ny))
count = 0
for i in range(self._supersampling_factor):
for j in range(self._supersampling_factor):
image_select = self._partial_image(image_high_res, i, j)
conv_image += self._numba_conv_list[count].convolve2d(image_select)
count += 1
return conv_image
def _partial_image(self, image_high_res, i, j):
"""
:param image_high_res: 2d array supersampled
:param i: index of super-sampled position in first axis
:param j: index of super-sampled position in second axis
:return: 2d array only selected the specific supersampled position within a regular pixel
"""
return image_high_res[
i :: self._supersampling_factor, j :: self._supersampling_factor
]
def _partial_kernel(self, kernel_super, i, j):
"""
:param kernel_super: supersampled kernel
:param i: index of super-sampled position in first axis
:param j: index of super-sampled position in second axis
:return: effective kernel rebinned to regular grid resulting from the subpersampled position (i,j)
"""
n = len(kernel_super)
kernel_size = int(round(n / float(self._supersampling_factor) + 1.5))
if kernel_size % 2 == 0:
kernel_size += 1
n_match = kernel_size * self._supersampling_factor
kernel_super_match = np.zeros((n_match, n_match))
delta = int((n_match - n - self._supersampling_factor) / 2) + 1
i0 = delta # index where to start kernel for i=0
j0 = delta # index where to start kernel for j=0 (should be symmetric)
kernel_super_match[i0 + i : i0 + i + n, j0 + j : j0 + j + n] = kernel_super
# kernel_super_match = image_util.cut_edges(kernel_super_match, numPix=n)
kernel = image_util.re_size(
kernel_super_match, factor=self._supersampling_factor
)
return kernel
|
sibirrerREPO_NAMElenstronomyPATH_START.@lenstronomy_extracted@lenstronomy-main@lenstronomy@ImSim@Numerics@numba_convolution.py@.PATH_END.py
|
{
"filename": "APTProgram_simulator_use_examples.ipynb",
"repo_name": "spacetelescope/mirage",
"repo_path": "mirage_extracted/mirage-master/examples/APTProgram_simulator_use_examples.ipynb",
"type": "Jupyter Notebook"
}
|
# Simulating APT Programs with MIRaGe
---
In this notebook, we will demonstrate how to use MIRAGE (Multi-Instrument Ramp Generator) to simulate exposures from Astronomer's Proposal Tool ([APT](https://jwst-docs.stsci.edu/display/JPP/JWST+Astronomers+Proposal+Tool+Overview)) programs using the `mirage.apt` module. This module extracts the instrument, detector, pointing, and filter information from APT program output files, rather than requiring that the user defines them all manually.
The process for APT simulations with MIRaGe is as follows:
- Download needed files from the APT program, and parse those files to access the details and structure of a given program
- Generate catalogs for all exposures in the program
- Generate MIRaGe YAML input files that contain the specifications for each exposure simulation
- Use the `mirage.imaging_simulator` to create seed images and add dark exposures and detector effects (See the [Imaging Simulator example](Imaging_simulator_use_examples.ipynb) for more detail on that process)
### Table of Contents:
1. [Export program information from APT](#export_apt)
2. [Generate catalog files of sources in FOV](#query_cat)
3. [Create YAML files from APT files](#yaml)
5. [Generate the simulated image](#simulate_images)
Appendix A: [Generating data for an entire observation](#simulate_whole_obs)
## Import necessary packages and modules
```python
# Standard Library Imports
from glob import glob
import os
import shutil
import urllib
# Third Party Imports
import pysiaf
from astropy.io import ascii as asc
from astropy.io import fits
from matplotlib import cm
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt
# Local Imports (from nircam_simulator package)
from mirage import imaging_simulator
from mirage.catalogs import create_catalog
from mirage.utils.utils import ensure_dir_exists
from mirage.yaml import yaml_generator
# View matplotlib plots inline
%matplotlib inline
```
---
<a id='export_apt'></a>
# Export Program Information from APT
MIRaGe requires APT program output files in order to generate data with unstacked PSFs.
## Get needed files from APT program
For this example, we are using APT output from a parallel NIRCam-FGS commissioning program: program 1071, "NIRCam Absolute FGS-NIRCam Alignment".
```python
# Define the proposal ID
prop_id = 1071
# Where the pointing and XML file for this particular OTE CAR are
input_dir = os.path.abspath('./apt_data/')
ensure_dir_exists(input_dir)
# Define the names of the pointing and XML files
# NOTE: Change the root if you name your files differently.
root = 'apt_{}'.format(prop_id)
pointing_file = os.path.join(input_dir, '{}.pointing'.format(root))
xml_file = os.path.join(input_dir, '{}.xml'.format(root))
```
### Option 1: Manually download the `.pointing` and `.xml` files
Open the APT file for the program you want to simulate. If you don't have the file locally, you can load this program in APT by selecting `File > Retrieve from STScI > Retrieve Using Proposal ID` and then entering the program ID (e.g. 1140). (You must be running APT in STScI mode for this retrieval method to be available.)
Export the `.pointing` and `.xml` files for your given proposal in APT by selecting `File > Export...` and selecting both the xml and pointing file options.
### Option 2: Automatically download the `.pointing` and `.xml` files
If you don't want to bother opening APT and your program is publicly accessible, you can use the below code to download APT files.
*Note that you must have APT installed for the below command to work.*
```python
# Build temporary directory
temp_apt_dir = os.path.abspath('./temp_apt/')
if not os.path.isdir(temp_apt_dir):
os.mkdir(temp_apt_dir)
print('Create temporary directory to download APT files.')
# Download the APT file
apt_file = os.path.join(temp_apt_dir, '{}.aptx'.format(prop_id))
urllib.request.urlretrieve(
'http://www.stsci.edu/jwst/phase2-public/{}.aptx'.format(prop_id),
apt_file
)
print('Downloaded temporary APT file:', apt_file)
# Determine the user's installation of APT
apt_app = sorted(glob('/Applications/APT*'))[-1].replace(' ', '\ ')
print('Will export pointing and XML files using the following installation of APT:', apt_app)
# Export the APT XML and pointing files
os.system(
"{}/bin/apt -nogui -export xml,pointing {}".format(apt_app, apt_file)
)
# Move the XML and pointing files to the apt_data directory
os.rename(os.path.join(temp_apt_dir, '{}.xml'.format(prop_id)),
xml_file)
os.rename(os.path.join(temp_apt_dir, '{}.pointing'.format(prop_id)),
pointing_file)
print('Downloaded APT pointing file:', pointing_file)
print('Downloaded APT XML file:', xml_file)
# Tear down temporary directory
shutil.rmtree(temp_apt_dir)
print('Deleted temporary APT file and directory.')
```
And before we move on, let's just make sure that worked:
```python
# Make sure the pointing and XML files exist
assert os.path.exists(pointing_file)
assert os.path.exists(xml_file)
```
### Define location of output files
The process of generating simulated images with MIRaGe produces a lot of files:
- YAML files carrying the OTE mirror state
- YAML files carrying the specifications for simulations
- FITS files of the simulated seed, dark, and compiled images
Additionally, we must create FITS library files of the segment PSF images in order to simulate images with nonnominal PSFs.
Let's define the directories to save these output files to:
```python
# Where to save MIRaGe output
out_dir = os.path.join(input_dir, 'output')
```
---
<a id='query_cat'></a>
# Query online catalogs to generate catalog files of sources in FOV
Next, we need to generate catalog files for the sources around the target in this proposal.
Mirage contains the `create_catalog.for_proposal` function that can be used to create point source and galaxy catalogs from an APT file. This function collects the target RA and Dec values from the proposal, as well as the list of instruments and filters used for the observations. It then runs `mirage.catalog.create_catalog.get_all_catalogs` and `mirage.catalog.create_catalog.galaxy_background` to produce point source and galaxy catalogs. These catalogs can then be used as input when producing the yaml files needed to run Mirage.
```python
catalog_out = create_catalog.for_proposal(xml_file, pointing_file,
point_source=True,
extragalactic=True,
catalog_splitting_threshold=0.12,
email='someone@somewhere.edu',
out_dir=out_dir,
save_catalogs=True)
ptsrc_cat, gal_cat, ptsrc_names, gal_names, pmap, gmap = catalog_out
cat_dict = {'ptsrc': ptsrc_names[0],
'gal': gal_names[0]}
```
## Plot all sources in catalogs
Let's see how many sources we're dealing with here.
```python
# Plot all queried sources
for catalog_filename in ptsrc_names:
target_catalog = asc.read(catalog_filename)
plt.scatter(target_catalog['x_or_RA'], target_catalog['y_or_Dec'], s=1, alpha=.7, label='Point Source Catalog')
for catalog_filename in gal_names:
target_catalog = asc.read(catalog_filename)
plt.scatter(target_catalog['x_or_RA'], target_catalog['y_or_Dec'], s=1, alpha=.7, label='Galactic Catalog')
plt.xlabel('Right Ascension [degrees]')
plt.ylabel('Declination [degrees]')
plt.legend()
plt.show()
```
---
<a id='yaml'></a>
# Create YAML files from APT files
Next, we need to make the YAML files that include all of the parameters for MIRaGe to run.
Use `mirage.yaml.yaml_generator` to make all of the YAML files for the given APT program - one file per exposure.
```python
# Create a series of data simulator input yaml files from APT files
yaml_dir = os.path.join(out_dir, 'yamls')
yam = yaml_generator.SimInput(input_xml=xml_file, pointing_file=pointing_file,
catalogs=cat_dict,
verbose=True, output_dir=yaml_dir, simdata_output_dir=out_dir)
# Create all input YAML files (one per each exposure)
yam.create_inputs()
```
## Choose which yaml (visit/tile) to use
Now that we've generated all of the needed YAML files, we need to choose one to simulate images with. MIRaGE can only generate one simulated exposure at a time, so we need to choose one YAML file in our yamls directory that we will use to produce an image. (See [Appendix A](#simulate_whole_obs) for how use a wrapper to simulate multiple exposures at once with MIRaGe.)
Not every exposure necessarily has the same pointing, so we should choose an exposure that places the target star in the desired detector field-of-view.
### Examine target pointings relative to apertures and V2/V3 references
Looking at the `.pointing` file, let's plot where the target will appear relative to the NIRCam apertures for each unique pointing.
```python
# Examine apertures and V2/V3 references for each array/subarray
nc_siaf = pysiaf.Siaf('NIRCam')
nc_full = nc_siaf['NRCA1_FULL']
plt.figure(figsize=(15,10))
for apername in sorted(nc_siaf.apernames):
a = apername
if ('_FULL' in a) and ('OSS' not in a) and ('MASK' not in a) and (a[-1] != 'P'):
nc_siaf[a].plot(frame='tel', label=True, fill_color='white')
plt.gca().invert_xaxis()
# Compare V2/V3 of targets (from .pointing file)
all_pointings = set([(v2, v3, filename) for v2, v3, filename in zip(yam.info['v2'],
yam.info['v3'],
yam.info['yamlfile'])])
print('Example files for each pointing:')
print('--------------------------------')
plotted_points = []
for i_point, (v2, v3, filename) in enumerate(all_pointings):
if (v2, v3) not in plotted_points:
plotted_points.append((v2, v3))
plt.scatter(v2, v3, marker='*', s=500,
label='Pointing {}/{}'.format(i_point + 1, len(all_pointings)))
print('{}. {}'.format(i_point + 1, filename))
plt.legend()
plt.show()
```
### Select the YAML to generate an image from
Looking at the pointing figure above, choose one YAML file that we will create a seed image with MIRaGe for. (Be sure to choose a YAML that has a detector and filter that matches the library files you have created so far.)
*See [JDox](https://jwst-docs.stsci.edu/display/JDAT/File+Naming+Conventions+and+Data+Products) for a detailed explanation of the MIRaGe YAML file name format.*
```python
# Select one YAML to estimate where the sources will be
test_yaml_filename = 'jw01071001001_01103_00001_nrca4.yaml'
test_yaml = os.path.join(yaml_dir, test_yaml_filename)
assert os.path.isfile(test_yaml)
print(test_yaml)
```
---
<a id='simulate_images'></a>
# Simulate image with MIRaGe
Finally, we can run MIRaGe to generate a seed image simulation of our unstacked mirror state during OTE-01.
From here on out, from the user perspective, the simulation process is identical to that of nominal imaging cases (see the [imaging example notebook](#Imaging_simulator_use_examples.ipynb). To reiterate, it is the specifications made in the YAML files that enable the simulation of unstacked mirror simulations with MIRaGe.
```python
# Run the image simulator using the input defined in test_yaml
img_sim = imaging_simulator.ImgSim()
img_sim.paramfile = test_yaml
img_sim.create()
```
```python
# Plot the seed image, dark image, and final exposure simulation
fig, [ax1, ax2, ax3] = plt.subplots(1, 3, figsize=(20, 7))
plt.tight_layout()
# Define scale limits and colormap
clim=(0.001, 0.1)
cmap = cm.get_cmap('viridis')
cmap.set_bad(cmap(0))
# Plot seed image
fitsplot = ax1.imshow(img_sim.seedimage, clim=clim, cmap=cmap)
ax1.set_title('Seed Image', size=24)
ax1.invert_xaxis()
ax1.invert_yaxis()
# Plot dark current
ax2.imshow(img_sim.linDark.data[0,-1,:,:] - img_sim.linDark.data[0,0,:,:], clim=clim, cmap=cmap)
ax2.set_title('Dark Current', size=24)
ax2.invert_xaxis()
ax2.invert_yaxis()
# Plot final exposure
file_root = os.path.basename(test_yaml_filename).split('.yaml')[0]
linear_output = os.path.join(out_dir, '{}_linear.fits'.format(file_root))
with fits.open(linear_output) as h:
lindata = h[1].data
header = h[0].header
exptime = header['EFFINTTM']
diffdata = (lindata[0,-1,:,:] - lindata[0,0,:,:]) / exptime
ax3.imshow(diffdata, clim=clim, cmap=cmap)
ax3.set_title('Final Exposure Simulation', size=24)
ax3.invert_xaxis()
ax3.invert_yaxis()
# Define the colorbar
cbar_ax = fig.add_axes([1, 0.09, 0.03, 0.87])
cbar = plt.colorbar(fitsplot, cbar_ax)
cbar.set_label('Count Rate', rotation=270, labelpad=30, size=24)
```
---
<a id='simulate_whole_obs'></a>
# Appendix A: Simulating many exposures at once
Chances are, you don't want to simulate just one exposure from one detector. In order to simulate all of the exposures from a given observation, write a for loop to iterate over all the YAMLs. We include an example for program 1134 observation 1 below.
```python
from mirage import imaging_simulator
# Get all the 1134 Obs 1 NRCA3 yamls
all_yaml_files = glob(os.path.join(yaml_dir, 'jw01134001*.yaml'))
n_yamls = len(all_yaml_files)
print('{} FITS files will be generated.'.format(n_yamls))
# Run imaging_simulator for all YAMLs
for i_yaml, yaml in enumerate(all_yaml_files):
print('*** SIMULATING YAML {}/{}: {} ***'.format(i_yaml+1, n_yamls, yaml))
img_sim = imaging_simulator.ImgSim()
img_sim.paramfile = yaml
img_sim.create()
```
(If you are impatient and ambitious, you can use Python's `multiprocessing` module to the simulation go faster. Even better on a server with more processors!)
|
spacetelescopeREPO_NAMEmiragePATH_START.@mirage_extracted@mirage-master@examples@APTProgram_simulator_use_examples.ipynb@.PATH_END.py
|
{
"filename": "mode_data.py",
"repo_name": "n-claes/legolas",
"repo_path": "legolas_extracted/legolas-master/post_processing/pylbo/visualisation/modes/mode_data.py",
"type": "Python"
}
|
from __future__ import annotations
import difflib
from typing import List, Union
import numpy as np
from pylbo.data_containers import LegolasDataSet
from pylbo.exceptions import BackgroundNotPresent
from pylbo.utilities.logger import pylboLogger
from pylbo.visualisation.utils import ef_name_to_latex, validate_ef_name
class ModeVisualisationData:
"""
Class that contains the data used for eigenmode visualisations.
Parameters
----------
ds : ~pylbo.data_containers.LegolasDataSet
The dataset containing the eigenfunctions and modes to visualise.
omega : list[complex]
The (approximate) eigenvalue(s) of the mode(s) to visualise.
ef_name : str
The name of the eigenfunction to visualise.
use_real_part : bool
Whether to use the real part of the eigenmode solution.
complex_factor : complex
A complex factor to multiply the eigenmode solution with.
add_background : bool
Whether to add the equilibrium background to the eigenmode solution.
Attributes
----------
ds : ~pylbo.data_containers.LegolasDataSet
The dataset containing the eigenfunctions and modes to visualise.
omega : list[complex]
The (approximate) eigenvalue(s) of the mode(s) to visualise.
eigenfunction : list[np.ndarray]
The eigenfunction of the mode(s) to visualise.
use_real_part : bool
Whether to use the real part of the eigenmode solution.
complex_factor : complex
The complex factor to multiply the eigenmode solution with.
add_background : bool
Whether to add the equilibrium background to the eigenmode solution.
"""
def __init__(
self,
ds: LegolasDataSet,
omega: list[complex],
ef_name: str = None,
use_real_part: bool = True,
complex_factor: complex = None,
add_background: bool = False,
) -> None:
self.ds = ds
self.use_real_part = use_real_part
self.complex_factor = self._validate_complex_factor(complex_factor)
if add_background and not ds.has_background:
raise BackgroundNotPresent(ds.datfile, "add background to solution")
self.add_background = add_background
self._print_bg_info = True
self._ef_name = None if ef_name is None else validate_ef_name(ds, ef_name)
self._ef_name_latex = None if ef_name is None else self.get_ef_name_latex()
self._all_efs = self._get_all_efs(ds, omega)
self.omega = [all_efs.get("eigenvalue") for all_efs in self._all_efs]
self.eigenfunction = [all_efs.get(self._ef_name) for all_efs in self._all_efs]
@property
def k2(self) -> float:
"""The k2 wave number of the eigenmode solution."""
return self.ds.parameters["k2"]
@property
def k3(self) -> float:
"""The k3 wave number of the eigenmode solution."""
return self.ds.parameters["k3"]
@property
def part_name(self) -> str:
"""
Returns the name of the part of the eigenmode solution to use, i.e.
'real' or 'imag'.
"""
return "real" if self.use_real_part else "imag"
def _get_all_efs(self, ds: LegolasDataSet, omega: List[complex]) -> np.ndarray:
"""
Returns an array of dicts with all eigenfunctions for every eigenvalue.
The dictionaries will be updated with the derived eigenfunctions if they
are available in the dataset.
Parameters
----------
ds : ~pylbo.data_containers.LegolasDataSet
The dataset containing the eigenfunctions.
omega : list[complex]
The (approximate) eigenvalue(s) of the mode(s) to retrieve the
eigenfunctions from.
Returns
-------
np.ndarray
An array of dicts with all eigenfunctions for every eigenvalue.
"""
arr1 = ds.get_eigenfunctions(omega)
if not ds.has_derived_efs:
return arr1
arr2 = ds.get_derived_eigenfunctions(omega)
arr = np.empty(len(omega), dtype=dict)
for i, (dict1, dict2) in enumerate(zip(arr1, arr2)):
ev1 = dict1.get("eigenvalue")
ev2 = dict2.get("eigenvalue")
if not np.isclose(ev1, ev2, atol=1e-12):
pylboLogger.warning(
f"The eigenvalue of the eigenfunction {ev1:.6e} and the derived "
f"eigenfunction {ev2:.6e} do not match. Using eigenfunctions only."
)
return arr1
arr[i] = {**dict1, **dict2}
return arr
def get_ef_name_latex(self) -> str:
"""Returns the latex representation of the eigenfunction name."""
return ef_name_to_latex(
self._ef_name, geometry=self.ds.geometry, real_part=self.use_real_part
)
def _validate_complex_factor(self, complex_factor: complex) -> complex:
"""
Validates the complex factor.
Parameters
----------
complex_factor : complex
The complex factor to validate.
Returns
-------
complex
The complex factor if it is valid, otherwise 1.
"""
return complex_factor if complex_factor is not None else 1
def get_mode_solution(
self,
ef: np.ndarray,
omega: complex,
u2: Union[float, np.ndarray],
u3: Union[float, np.ndarray],
t: Union[float, np.ndarray],
) -> np.ndarray:
"""
Calculates the full eigenmode solution for given coordinates and time.
If a complex factor was given, the eigenmode solution is multiplied with the
complex factor. If :attr:`use_real_part` is True the real part of the eigenmode
solution is returned, otherwise the complex part.
Parameters
----------
ef : np.ndarray
The eigenfunction to use.
omega : complex
The eigenvalue to use.
u2 : Union[float, np.ndarray]
The y coordinate(s) of the eigenmode solution.
u3 : Union[float, np.ndarray]
The z coordinate(s) of the eigenmode solution.
t : Union[float, np.ndarray]
The time(s) of the eigenmode solution.
Returns
-------
np.ndarray
The real or imaginary part of the eigenmode solution for the given
set of coordinate(s) and time(s).
"""
solution = (
self.complex_factor
* ef
* np.exp(1j * self.k2 * u2 + 1j * self.k3 * u3 - 1j * omega * t)
)
return getattr(solution, self.part_name)
def get_background(self, shape: tuple[int, ...], name=None) -> np.ndarray:
"""
Returns the background of the eigenmode solution.
Parameters
----------
shape : tuple[int, ...]
The shape of the eigenmode solution.
name : str
The name of the background to use. If None, the background name
will be inferred from the eigenfunction name.
Returns
-------
np.ndarray
The background of the eigenmode solution, sampled on the eigenfunction
grid and broadcasted to the same shape as the eigenmode solution.
"""
if name is None:
name = self._get_background_name()
bg = self.ds.equilibria[name]
bg_sampled = self._sample_background_on_ef_grid(bg)
if self._print_bg_info:
pylboLogger.info(f"background {name} broadcasted to shape {shape}")
return np.broadcast_to(bg_sampled, shape=reversed(shape)).transpose()
def _sample_background_on_ef_grid(self, bg: np.ndarray) -> np.ndarray:
"""
Samples the background array on the eigenfunction grid.
Parameters
----------
bg : np.ndarray
The background array with Gaussian grid spacing
Returns
-------
np.ndarray
The background array with eigenfunction grid spacing
"""
if self._print_bg_info:
pylboLogger.info(
f"sampling background [{len(bg)}] on eigenfunction grid "
f"[{len(self.ds.ef_grid)}]"
)
return np.interp(self.ds.ef_grid, self.ds.grid_gauss, bg)
def _get_background_name(self) -> str:
"""
Returns the name of the background.
Returns
-------
str
The closest match between the eigenfunction name and the equilibrium
name.
Raises
------
ValueError
If the eigenfunction name is a magnetic vector potential component.
"""
if self._ef_name in ("a1", "a2", "a3"):
raise ValueError(
"Unable to add a background to the magnetic vector potential."
)
(name,) = difflib.get_close_matches(self._ef_name, self.ds.eq_names, 1)
if self._print_bg_info:
pylboLogger.info(
f"adding background for '{self._ef_name}', closest match is '{name}'"
)
return name
|
n-claesREPO_NAMElegolasPATH_START.@legolas_extracted@legolas-master@post_processing@pylbo@visualisation@modes@mode_data.py@.PATH_END.py
|
{
"filename": "_selected.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/box/_selected.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class SelectedValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="selected", parent_name="box", **kwargs):
super(SelectedValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Selected"),
data_docs=kwargs.pop(
"data_docs",
"""
marker
:class:`plotly.graph_objects.box.selected.Marke
r` instance or dict with compatible properties
""",
),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@box@_selected.py@.PATH_END.py
|
{
"filename": "hwp_pcu.py",
"repo_name": "simonsobs/socs",
"repo_path": "socs_extracted/socs-main/socs/agents/hwp_pcu/drivers/hwp_pcu.py",
"type": "Python"
}
|
import time
import serial
patterns = {
'off': [0, 0, 0, 0, 0, 0],
'on_1': [1, 1, 1, 0, 0, 0],
'on_2': [1, 1, 1, 1, 1, 1],
'stop': [0, 1, 1, 1, 0, 0],
}
class PCU:
"""Class to communicate with the phase compensation unit.
Args:
port (str): Path to USB device in '/dev/'
Attributes:
status (str): The status of the unit (off/on_1/on_2/stop)
"""
def __init__(self, port):
self.port = serial.Serial(
port,
baudrate=19200,
timeout=1,
bytesize=serial.EIGHTBITS,
stopbits=serial.STOPBITS_ONE,
)
def close(self):
self.port.close()
def sleep(self):
time.sleep(0.1)
def read(self):
return self.port.read_until(b'\n\r').strip().decode()
def clear_buffer(self):
while True:
res = self.read()
if len(res) == 0:
break
def relay_on(self, channel):
cmd = "relay on " + str(channel) + "\n\r"
self.port.write(cmd.encode('utf-8'))
self.sleep()
def relay_off(self, channel):
cmd = "relay off " + str(channel) + "\n\r"
self.port.write(cmd.encode('utf-8'))
self.sleep()
def relay_read(self, channel):
cmd = "relay read " + str(channel) + "\n\r"
self.port.write(cmd.encode('utf-8'))
self.sleep()
response = self.read()
response = self.read()
if response == "on":
return 1
elif response == "off":
return 0
else:
return -1
def send_command(self, command):
pattern = patterns[command]
for i, p in zip([0, 1, 2, 5, 6, 7], pattern):
if p:
self.relay_on(i)
else:
self.relay_off(i)
self.sleep()
self.read()
def get_status(self):
"""get_status()
**Task** - Get the operation mode of the phase compensation unit.
off: The compensation phase is zero.
on_1: The compensation phase is +120 deg.
on_2: The compensation phase is -120 deg.
stop: Stop the HWP spin.
"""
channel = [0, 1, 2, 5, 6, 7]
channel_switch = []
for i in channel:
channel_switch.append(self.relay_read(i))
if -1 in channel_switch:
return 'failed'
for command, pattern in patterns.items():
if channel_switch == pattern:
return command
return 'undefined'
|
simonsobsREPO_NAMEsocsPATH_START.@socs_extracted@socs-main@socs@agents@hwp_pcu@drivers@hwp_pcu.py@.PATH_END.py
|
{
"filename": "_y.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/streamtube/lightposition/_y.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class YValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="y", parent_name="streamtube.lightposition", **kwargs
):
super(YValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 100000),
min=kwargs.pop("min", -100000),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@streamtube@lightposition@_y.py@.PATH_END.py
|
{
"filename": "prolog.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/Pygments/py3/pygments/lexers/prolog.py",
"type": "Python"
}
|
"""
pygments.lexers.prolog
~~~~~~~~~~~~~~~~~~~~~~
Lexers for Prolog and Prolog-like languages.
:copyright: Copyright 2006-2024 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import RegexLexer, bygroups
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Number, Punctuation
__all__ = ['PrologLexer', 'LogtalkLexer']
class PrologLexer(RegexLexer):
"""
Lexer for Prolog files.
"""
name = 'Prolog'
aliases = ['prolog']
filenames = ['*.ecl', '*.prolog', '*.pro', '*.pl']
mimetypes = ['text/x-prolog']
url = 'https://en.wikipedia.org/wiki/Prolog'
version_added = ''
tokens = {
'root': [
(r'/\*', Comment.Multiline, 'nested-comment'),
(r'%.*', Comment.Single),
# character literal
(r'0\'.', String.Char),
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[0-9a-fA-F]+', Number.Hex),
# literal with prepended base
(r'\d\d?\'[a-zA-Z0-9]+', Number.Integer),
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer),
(r'[\[\](){}|.,;!]', Punctuation),
(r':-|-->', Punctuation),
(r'"(?:\\x[0-9a-fA-F]+\\|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|'
r'\\[0-7]+\\|\\["\\abcefnrstv]|[^\\"])*"', String.Double),
(r"'(?:''|[^'])*'", String.Atom), # quoted atom
# Needs to not be followed by an atom.
# (r'=(?=\s|[a-zA-Z\[])', Operator),
(r'is\b', Operator),
(r'(<|>|=<|>=|==|=:=|=|/|//|\*|\+|-)(?=\s|[a-zA-Z0-9\[])',
Operator),
(r'(mod|div|not)\b', Operator),
(r'_', Keyword), # The don't-care variable
(r'([a-z]+)(:)', bygroups(Name.Namespace, Punctuation)),
(r'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
r'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
r'(\s*)(:-|-->)',
bygroups(Name.Function, Text, Operator)), # function defn
(r'([a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
r'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*)'
r'(\s*)(\()',
bygroups(Name.Function, Text, Punctuation)),
(r'[a-z\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]'
r'[\w$\u00c0-\u1fff\u3040-\ud7ff\ue000-\uffef]*',
String.Atom), # atom, characters
# This one includes !
(r'[#&*+\-./:<=>?@\\^~\u00a1-\u00bf\u2010-\u303f]+',
String.Atom), # atom, graphics
(r'[A-Z_]\w*', Name.Variable),
(r'\s+|[\u2000-\u200f\ufff0-\ufffe\uffef]', Text),
],
'nested-comment': [
(r'\*/', Comment.Multiline, '#pop'),
(r'/\*', Comment.Multiline, '#push'),
(r'[^*/]+', Comment.Multiline),
(r'[*/]', Comment.Multiline),
],
}
def analyse_text(text):
"""Competes with IDL and Visual Prolog on *.pro"""
if ':-' in text:
# Visual Prolog also uses :-
return 0.5
else:
return 0
class LogtalkLexer(RegexLexer):
"""
For Logtalk source code.
"""
name = 'Logtalk'
url = 'http://logtalk.org/'
aliases = ['logtalk']
filenames = ['*.lgt', '*.logtalk']
mimetypes = ['text/x-logtalk']
version_added = '0.10'
tokens = {
'root': [
# Directives
(r'^\s*:-\s', Punctuation, 'directive'),
# Comments
(r'%.*?\n', Comment),
(r'/\*(.|\n)*?\*/', Comment),
# Whitespace
(r'\n', Text),
(r'\s+', Text),
# Numbers
(r"0'[\\]?.", Number),
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number),
# Variables
(r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable),
# Event handlers
(r'(after|before)(?=[(])', Keyword),
# Message forwarding handler
(r'forward(?=[(])', Keyword),
# Execution-context methods
(r'(context|parameter|this|se(lf|nder))(?=[(])', Keyword),
# Reflection
(r'(current_predicate|predicate_property)(?=[(])', Keyword),
# DCGs and term expansion
(r'(expand_(goal|term)|(goal|term)_expansion|phrase)(?=[(])', Keyword),
# Entity
(r'(abolish|c(reate|urrent))_(object|protocol|category)(?=[(])', Keyword),
(r'(object|protocol|category)_property(?=[(])', Keyword),
# Entity relations
(r'co(mplements_object|nforms_to_protocol)(?=[(])', Keyword),
(r'extends_(object|protocol|category)(?=[(])', Keyword),
(r'imp(lements_protocol|orts_category)(?=[(])', Keyword),
(r'(instantiat|specializ)es_class(?=[(])', Keyword),
# Events
(r'(current_event|(abolish|define)_events)(?=[(])', Keyword),
# Flags
(r'(create|current|set)_logtalk_flag(?=[(])', Keyword),
# Compiling, loading, and library paths
(r'logtalk_(compile|l(ibrary_path|oad|oad_context)|make(_target_action)?)(?=[(])', Keyword),
(r'\blogtalk_make\b', Keyword),
# Database
(r'(clause|retract(all)?)(?=[(])', Keyword),
(r'a(bolish|ssert(a|z))(?=[(])', Keyword),
# Control constructs
(r'(ca(ll|tch)|throw)(?=[(])', Keyword),
(r'(fa(il|lse)|true|(instantiation|system)_error)\b', Keyword),
(r'(uninstantiation|type|domain|existence|permission|representation|evaluation|resource|syntax)_error(?=[(])', Keyword),
# All solutions
(r'((bag|set)of|f(ind|or)all)(?=[(])', Keyword),
# Multi-threading predicates
(r'threaded(_(ca(ll|ncel)|once|ignore|exit|peek|wait|notify))?(?=[(])', Keyword),
# Engine predicates
(r'threaded_engine(_(create|destroy|self|next|next_reified|yield|post|fetch))?(?=[(])', Keyword),
# Term unification
(r'(subsumes_term|unify_with_occurs_check)(?=[(])', Keyword),
# Term creation and decomposition
(r'(functor|arg|copy_term|numbervars|term_variables)(?=[(])', Keyword),
# Evaluable functors
(r'(div|rem|m(ax|in|od)|abs|sign)(?=[(])', Keyword),
(r'float(_(integer|fractional)_part)?(?=[(])', Keyword),
(r'(floor|t(an|runcate)|round|ceiling)(?=[(])', Keyword),
# Other arithmetic functors
(r'(cos|a(cos|sin|tan|tan2)|exp|log|s(in|qrt)|xor)(?=[(])', Keyword),
# Term testing
(r'(var|atom(ic)?|integer|float|c(allable|ompound)|n(onvar|umber)|ground|acyclic_term)(?=[(])', Keyword),
# Term comparison
(r'compare(?=[(])', Keyword),
# Stream selection and control
(r'(curren|se)t_(in|out)put(?=[(])', Keyword),
(r'(open|close)(?=[(])', Keyword),
(r'flush_output(?=[(])', Keyword),
(r'(at_end_of_stream|flush_output)\b', Keyword),
(r'(stream_property|at_end_of_stream|set_stream_position)(?=[(])', Keyword),
# Character and byte input/output
(r'(nl|(get|peek|put)_(byte|c(har|ode)))(?=[(])', Keyword),
(r'\bnl\b', Keyword),
# Term input/output
(r'read(_term)?(?=[(])', Keyword),
(r'write(q|_(canonical|term))?(?=[(])', Keyword),
(r'(current_)?op(?=[(])', Keyword),
(r'(current_)?char_conversion(?=[(])', Keyword),
# Atomic term processing
(r'atom_(length|c(hars|o(ncat|des)))(?=[(])', Keyword),
(r'(char_code|sub_atom)(?=[(])', Keyword),
(r'number_c(har|ode)s(?=[(])', Keyword),
# Implementation defined hooks functions
(r'(se|curren)t_prolog_flag(?=[(])', Keyword),
(r'\bhalt\b', Keyword),
(r'halt(?=[(])', Keyword),
# Message sending operators
(r'(::|:|\^\^)', Operator),
# External call
(r'[{}]', Keyword),
# Logic and control
(r'(ignore|once)(?=[(])', Keyword),
(r'\brepeat\b', Keyword),
# Sorting
(r'(key)?sort(?=[(])', Keyword),
# Bitwise functors
(r'(>>|<<|/\\|\\\\|\\)', Operator),
# Predicate aliases
(r'\bas\b', Operator),
# Arithmetic evaluation
(r'\bis\b', Keyword),
# Arithmetic comparison
(r'(=:=|=\\=|<|=<|>=|>)', Operator),
# Term creation and decomposition
(r'=\.\.', Operator),
# Term unification
(r'(=|\\=)', Operator),
# Term comparison
(r'(==|\\==|@=<|@<|@>=|@>)', Operator),
# Evaluable functors
(r'(//|[-+*/])', Operator),
(r'\b(e|pi|div|mod|rem)\b', Operator),
# Other arithmetic functors
(r'\b\*\*\b', Operator),
# DCG rules
(r'-->', Operator),
# Control constructs
(r'([!;]|->)', Operator),
# Logic and control
(r'\\+', Operator),
# Mode operators
(r'[?@]', Operator),
# Existential quantifier
(r'\^', Operator),
# Punctuation
(r'[()\[\],.|]', Text),
# Atoms
(r"[a-z][a-zA-Z0-9_]*", Text),
(r"'", String, 'quoted_atom'),
# Double-quoted terms
(r'"', String, 'double_quoted_term'),
],
'quoted_atom': [
(r"''", String),
(r"'", String, '#pop'),
(r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape),
(r"[^\\'\n]+", String),
(r'\\', String),
],
'double_quoted_term': [
(r'""', String),
(r'"', String, '#pop'),
(r'\\([\\abfnrtv"\']|(x[a-fA-F0-9]+|[0-7]+)\\)', String.Escape),
(r'[^\\"\n]+', String),
(r'\\', String),
],
'directive': [
# Conditional compilation directives
(r'(el)?if(?=[(])', Keyword, 'root'),
(r'(e(lse|ndif))(?=[.])', Keyword, 'root'),
# Entity directives
(r'(category|object|protocol)(?=[(])', Keyword, 'entityrelations'),
(r'(end_(category|object|protocol))(?=[.])', Keyword, 'root'),
# Predicate scope directives
(r'(public|protected|private)(?=[(])', Keyword, 'root'),
# Other directives
(r'e(n(coding|sure_loaded)|xport)(?=[(])', Keyword, 'root'),
(r'in(clude|itialization|fo)(?=[(])', Keyword, 'root'),
(r'(built_in|dynamic|synchronized|threaded)(?=[.])', Keyword, 'root'),
(r'(alias|d(ynamic|iscontiguous)|m(eta_(non_terminal|predicate)|ode|ultifile)|s(et_(logtalk|prolog)_flag|ynchronized))(?=[(])', Keyword, 'root'),
(r'op(?=[(])', Keyword, 'root'),
(r'(c(alls|oinductive)|module|reexport|use(s|_module))(?=[(])', Keyword, 'root'),
(r'[a-z][a-zA-Z0-9_]*(?=[(])', Text, 'root'),
(r'[a-z][a-zA-Z0-9_]*(?=[.])', Text, 'root'),
],
'entityrelations': [
(r'(complements|extends|i(nstantiates|mp(lements|orts))|specializes)(?=[(])', Keyword),
# Numbers
(r"0'[\\]?.", Number),
(r'0b[01]+', Number.Bin),
(r'0o[0-7]+', Number.Oct),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'\d+\.?\d*((e|E)(\+|-)?\d+)?', Number),
# Variables
(r'([A-Z_][a-zA-Z0-9_]*)', Name.Variable),
# Atoms
(r"[a-z][a-zA-Z0-9_]*", Text),
(r"'", String, 'quoted_atom'),
# Double-quoted terms
(r'"', String, 'double_quoted_term'),
# End of entity-opening directive
(r'([)]\.)', Text, 'root'),
# Scope operator
(r'(::)', Operator),
# Punctuation
(r'[()\[\],.|]', Text),
# Comments
(r'%.*?\n', Comment),
(r'/\*(.|\n)*?\*/', Comment),
# Whitespace
(r'\n', Text),
(r'\s+', Text),
]
}
def analyse_text(text):
if ':- object(' in text:
return 1.0
elif ':- protocol(' in text:
return 1.0
elif ':- category(' in text:
return 1.0
elif re.search(r'^:-\s[a-z]', text, re.M):
return 0.9
else:
return 0.0
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@Pygments@py3@pygments@lexers@prolog.py@.PATH_END.py
|
{
"filename": "exceptions.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/attrs/py2/attr/exceptions.py",
"type": "Python"
}
|
from __future__ import absolute_import, division, print_function
class FrozenError(AttributeError):
"""
A frozen/immutable instance or attribute have been attempted to be
modified.
It mirrors the behavior of ``namedtuples`` by using the same error message
and subclassing `AttributeError`.
.. versionadded:: 20.1.0
"""
msg = "can't set attribute"
args = [msg]
class FrozenInstanceError(FrozenError):
"""
A frozen instance has been attempted to be modified.
.. versionadded:: 16.1.0
"""
class FrozenAttributeError(FrozenError):
"""
A frozen attribute has been attempted to be modified.
.. versionadded:: 20.1.0
"""
class AttrsAttributeNotFoundError(ValueError):
"""
An ``attrs`` function couldn't find an attribute that the user asked for.
.. versionadded:: 16.2.0
"""
class NotAnAttrsClassError(ValueError):
"""
A non-``attrs`` class has been passed into an ``attrs`` function.
.. versionadded:: 16.2.0
"""
class DefaultAlreadySetError(RuntimeError):
"""
A default has been set using ``attr.ib()`` and is attempted to be reset
using the decorator.
.. versionadded:: 17.1.0
"""
class UnannotatedAttributeError(RuntimeError):
"""
A class with ``auto_attribs=True`` has an ``attr.ib()`` without a type
annotation.
.. versionadded:: 17.3.0
"""
class PythonTooOldError(RuntimeError):
"""
It was attempted to use an ``attrs`` feature that requires a newer Python
version.
.. versionadded:: 18.2.0
"""
class NotCallableError(TypeError):
"""
A ``attr.ib()`` requiring a callable has been set with a value
that is not callable.
.. versionadded:: 19.2.0
"""
def __init__(self, msg, value):
super(TypeError, self).__init__(msg, value)
self.msg = msg
self.value = value
def __str__(self):
return str(self.msg)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@attrs@py2@attr@exceptions.py@.PATH_END.py
|
{
"filename": "demo_CH_coords.py",
"repo_name": "projectchrono/chrono",
"repo_path": "chrono_extracted/chrono-main/src/demos/python/core/demo_CH_coords.py",
"type": "Python"
}
|
# =============================================================================
# PROJECT CHRONO - http://projectchrono.org
#
# Copyright (c) 2014 projectchrono.org
# All rights reserved.
#
# Use of this source code is governed by a BSD-style license that can be found
# in the LICENSE file at the top level of the distribution and at
# http://projectchrono.org/license-chrono.txt.
#
# =============================================================================
print ("First tutorial for PyChrono: vectors, matrices etc.");
# Load the Chrono::Engine core module!
import pychrono as chrono
try:
import numpy as np
from numpy import linalg as LA
except ImportError:
print("You need NumPy to run this demo!")
# Test logging
print("-----------------------------")
print("result is: ", 11+1.5)
print("-----------------------------")
# Test vectors
my_vect1 = chrono.ChVector3d()
my_vect1.x=5
my_vect1.y=2
my_vect1.z=3
my_vect2 = chrono.ChVector3d(3,4,5)
my_vect4 = my_vect1*10 + my_vect2
my_len = my_vect4.Length()
print ('vect sum =', my_vect1 + my_vect2)
print ('vect cross =', my_vect1 % my_vect2)
print ('vect dot =', my_vect1 ^ my_vect2)
# Test quaternions
my_quat = chrono.ChQuaterniond(1,2,3,4)
my_qconjugate = ~my_quat
print ('quat. conjugate =', my_qconjugate)
print ('quat. dot product=', my_qconjugate ^ my_quat)
print ('quat. product=', my_qconjugate * my_quat)
# Test matrices and NumPy interoperability
mlist = [[1,2,3,4], [5,6,7,8], [9,10,11,12], [13,14,15,16]]
ma = chrono.ChMatrixDynamicd()
ma.SetMatr(mlist) # Create a Matrix from a list. Size is adjusted automatically.
npmat = np.asarray(ma.GetMatr()) # Create a 2D npy array from the list extracted from ChMatrixDynamic
w, v = LA.eig(npmat) # get eigenvalues and eigenvectors using numpy
mb = chrono.ChMatrixDynamicd(4,4)
prod = v * npmat # you can perform linear algebra operations with numpy and then feed results into a ChMatrixDynamicd using SetMatr
mb.SetMatr(v.tolist()) # create a ChMatrixDynamicd from the numpy eigenvectors
mr = chrono.ChMatrix33d()
mr.SetMatr([[1,2,3], [4,5,6], [7,8,9]])
print (mr*my_vect1);
# Test frames -
# create a frame representing a translation and a rotation
# of 20 degrees on X axis
my_frame = chrono.ChFramed(my_vect2, chrono.QuatFromAngleAxis(20*chrono.CH_DEG_TO_RAD, chrono.ChVector3d(1,0,0)))
my_vect5 = my_vect1 >> my_frame
# Use the ChFunction classes
my_funct = chrono.ChFunctionSine(3.0,0.5)
print ('function f(0.2)=', my_funct.GetVal(0.2) )
# Inherit from the ChFunction, from the Python side,
# (do not forget the __init__ constructor)
class MySquareFunct (chrono.ChFunction):
def __init__(self):
chrono.ChFunction.__init__(self)
def GetVal(self,x):
return x*x
my_funct2 = MySquareFunct()
print('function f(2) =', my_funct2.GetVal(3) )
print('function df/dx=', my_funct2.GetDer(3) )
|
projectchronoREPO_NAMEchronoPATH_START.@chrono_extracted@chrono-main@src@demos@python@core@demo_CH_coords.py@.PATH_END.py
|
{
"filename": "starproperties.py",
"repo_name": "pmaxted/pycheops",
"repo_path": "pycheops_extracted/pycheops-master/pycheops/starproperties.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
#
# pycheops - Tools for the analysis of data from the ESA CHEOPS mission
#
# Copyright (C) 2018 Dr Pierre Maxted, Keele University
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""
StarProperties
==============
Object class to obtain/store observed properties of a star and to infer
parameters such as radius and density.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
from astropy.table import Table
from astropy.coordinates import SkyCoord
import requests
from .core import load_config
from pathlib import Path
from time import localtime, mktime
from uncertainties import ufloat, UFloat
from .ld import stagger_power2_interpolator, atlas_h1h2_interpolator
from .ld import phoenix_h1h2_interpolator
from numpy.random import normal
import os
from astropy.io.ascii import convert_numpy
from contextlib import redirect_stderr
from dace_query.cheops import Cheops
class StarProperties(object):
"""
CHEOPS StarProperties object
The observed properties T_eff, log_g and [Fe/H] are obtained from
DACE or SWEET-Cat, or can be specified by the user.
Set match_arcsec=None to skip extraction of parameters from SWEET-Cat.
By default properties are obtained from SWEET-Cat.
Set dace=True to obtain parameters from the stellar properties table at
DACE.
User-defined properties are specified either as a ufloat or as a 2-tuple
(value, error), e.g., teff=(5000,100).
User-defined properties over-write values obtained from SWEET-Cat or DACE.
The stellar density is estimated using an linear relation between log(rho)
and log(g) derived using the method of Moya et al. (2018ApJS..237...21M)
Limb darkening parameters in the CHEOPS band are interpolated from Table 2
of Maxted (2018A&A...616A..39M). The error on these parameters is
propogated from the errors in Teff, log_g and [Fe/H] plus an additional
error of 0.01 for h_1 and 0.05 for h_2, as recommended in Maxted (2018).
If [Fe/H] for the star is not specified, the value 0.0 +/- 0.3 is assumed.
If the stellar parameters are outside the range covered by Table 2 of
Maxted (2018), then the results from ATLAS model from Table 10 of Claret
(2019RNAAS...3...17C) are used instead. For stars cooler than 3500K the
PHOENIX models for solar metalicity from Table 5 of Claret (2019) are
used. The parameters h_1 and h_2 are both given nominal errors of 0.1 for
both ATLAS model, and 0.15 for PHOENIX models.
"""
def __init__(self, identifier, force_download=False, dace=False,
match_arcsec=5, configFile=None,
teff=None, logg=None, metal=None,
verbose=True):
self.identifier = identifier
coords = SkyCoord.from_name(identifier)
self.ra = coords.ra.to_string(precision=2,unit='hour',sep=':',pad=True)
self.dec = coords.dec.to_string(precision=1,sep=':',unit='degree',
alwayssign=True,pad=True)
config = load_config(configFile)
_cache_path = config['DEFAULT']['data_cache_path']
sweetCatPath = Path(_cache_path,'sweetcat.csv')
if force_download:
download_sweetcat = True
elif dace:
download_sweetcat = False
elif sweetCatPath.is_file():
file_age = mktime(localtime())-os.path.getmtime(sweetCatPath)
if file_age > int(config['SWEET-Cat']['update_interval']):
download_sweetcat = True
else:
download_sweetcat = False
else:
download_sweetcat = True
if download_sweetcat:
url = config['SWEET-Cat']['download_url']
try:
req=requests.post(url)
except:
req=requests.post(url,verify=False)
with open(sweetCatPath, 'wb') as file:
file.write(req.content)
if verbose:
print('SWEET-Cat data downloaded from \n {}'.format(url))
if dace:
db = Cheops.query_catalog("stellar")
cat_c = SkyCoord(db['obj_pos_ra_deg'], db['obj_pos_dec_deg'],
unit='degree,degree')
idx, sep, _ = coords.match_to_catalog_sky(cat_c)
if sep.arcsec[0] > match_arcsec:
raise ValueError(
'No matching star in DACE stellar properties table')
self.teff = ufloat(db['obj_phys_teff_k'][idx],99)
self.teff_note = "DACE"
self.logg = ufloat(db['obj_phys_logg'][idx],0.09)
self.logg_note = "DACE"
self.metal = ufloat(db['obj_phys_feh'][idx],0.09)
self.metal_note = "DACE"
self.gaiadr2 = db['obj_id_gaiadr2'][idx]
else:
converters={'gaia_dr2': [convert_numpy(np.int64)],
'gaia_dr3': [convert_numpy(np.int64)] }
sweetCat = Table.read(sweetCatPath, encoding='UTF-8',
format='csv', converters=converters)
# Use NaN for masked values
sweetCat = sweetCat.filled(fill_value=np.nan)
if match_arcsec is None:
entry = None
else:
cat_c = SkyCoord(sweetCat['RA'],sweetCat['DEC'],
unit='hour,degree')
idx, sep, _ = coords.match_to_catalog_sky(cat_c)
if sep.arcsec[0] > match_arcsec:
raise ValueError('No matching star in SWEET-Cat')
entry = sweetCat[idx]
try:
self.teff = ufloat(entry['Teff'],entry['eTeff'])
self.teff_note = "SWEET-Cat"
except:
self.teff = None
try:
if entry['Logg_gaia'] > 0:
self.logg = ufloat(entry['Logg_gaia'],entry['eLogg_gaia'])
self.logg_note = "SWEET-Cat (gaia)"
else:
self.logg = ufloat(entry['Logg'],entry['eLogg'])
self.logg_note = "SWEET-Cat (spec)"
except:
self.logg = None
try:
self.metal=ufloat(entry['[Fe/H]'], entry['e[Fe/H]'] )
self.metal_note = "SWEET-Cat"
except:
self.metal = None
# User defined values
if teff:
self.teff = teff if isinstance(teff, UFloat) else ufloat(*teff)
self.teff_note = "User"
if logg:
self.logg = logg if isinstance(logg, UFloat) else ufloat(*logg)
self.logg_note = "User"
if metal:
self.metal = metal if isinstance(metal, UFloat) else ufloat(*metal)
self.metal_note = "User"
# log rho from log g using method of Moya et al.
# (2018ApJS..237...21M). Accuracy is 4.4%
self.logrho = None
if self.logg:
if (self.logg.n > 3.697) and (self.logg.n < 4.65):
logrho = -7.352 + 1.6580*self.logg
self.logrho = ufloat(logrho.n, np.hypot(logrho.s, 0.044))
self.h_1 = None
self.h_2 = None
self.ld_ref = None
if self.teff and self.logg:
metal = self.metal if self.metal else ufloat(0,0.3)
power2 = stagger_power2_interpolator()
_,_,h_1,h_2 = power2(self.teff.n,self.logg.n,metal.n)
if not np.isnan(h_1):
self.ld_ref = 'Stagger'
Xteff = normal(self.teff.n, self.teff.s, 256)
Xlogg = normal(self.logg.n, self.logg.s, 256)
Xmetal = normal(metal.n, metal.s, 256)
X = power2(Xteff,Xlogg,Xmetal)
# Additional error derived in Maxted, 2019
e_h_1 = np.hypot(0.01,np.sqrt(np.nanmean((X[:,2]-h_1)**2)))
e_h_2 = np.hypot(0.05,np.sqrt(np.nanmean((X[:,3]-h_2)**2)))
self.h_1 = ufloat(round(h_1,3),round(e_h_1,3))
self.h_2 = ufloat(round(h_2,3),round(e_h_2,3))
if self.ld_ref is None:
atlas = atlas_h1h2_interpolator()
h_1,h_2 = atlas(self.teff.n,self.logg.n,metal.n)
if not np.isnan(h_1):
self.h_1 = ufloat(round(h_1,3),0.1)
self.h_2 = ufloat(round(h_2,3),0.1)
self.ld_ref = 'ATLAS'
if self.ld_ref is None:
phoenix = phoenix_h1h2_interpolator()
h_1,h_2 = phoenix(self.teff.n,self.logg.n)
if not np.isnan(h_1):
self.h_1 = ufloat(round(h_1,3),0.15)
self.h_2 = ufloat(round(h_2,3),0.15)
self.ld_ref = 'PHOENIX-COND'
def __repr__(self):
s = 'Identifier : {}\n'.format(self.identifier)
s += 'Coordinates: {} {}\n'.format(self.ra, self.dec)
if self.teff:
s += 'T_eff : {:5.0f} +/- {:3.0f} K [{}]\n'.format(
self.teff.n, self.teff.s,self.teff_note)
if self.logg:
s += 'log g : {:5.2f} +/- {:0.2f} [{}]\n'.format(
self.logg.n, self.logg.s, self.logg_note)
if self.metal:
s += '[M/H] : {:+5.2f} +/- {:0.2f} [{}]\n'.format(
self.metal.n, self.metal.s, self.metal_note)
if self.logrho:
s += 'log rho : {:5.2f} +/- {:0.2f} (solar units)\n'.format(
self.logrho.n, self.logrho.s)
if self.ld_ref:
s += 'h_1 : {:5.3f} +/- {:0.3f} [{}]\n'.format(
self.h_1.n, self.h_1.s,self.ld_ref)
s += 'h_2 : {:5.3f} +/- {:0.3f} [{}]\n'.format(
self.h_2.n, self.h_2.s,self.ld_ref)
return s
|
pmaxtedREPO_NAMEpycheopsPATH_START.@pycheops_extracted@pycheops-master@pycheops@starproperties.py@.PATH_END.py
|
{
"filename": "_bgcolorsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/histogram2dcontour/hoverlabel/_bgcolorsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BgcolorsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="bgcolorsrc",
parent_name="histogram2dcontour.hoverlabel",
**kwargs
):
super(BgcolorsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@histogram2dcontour@hoverlabel@_bgcolorsrc.py@.PATH_END.py
|
{
"filename": "plot_utils.py",
"repo_name": "matt77hias/fibpy",
"repo_path": "fibpy_extracted/fibpy-master/src/plot_utils.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
###############################################################################
## Plot Utilities 2D
###############################################################################
def set_equal_aspect_ratio_2D(ax, xs, ys, alpha=1.5, delta=0.0):
ax.set_aspect('equal')
mn = np.array([xs.min(), ys.min()])
mx = np.array([xs.max(), ys.max()])
d = 0.5 * (mx - mn)
c = mn + d
d = alpha * np.max(d) + delta
ax.set_xlim(c[0] - d, c[0] + d)
ax.set_ylim(c[1] - d, c[1] + d)
def vis_samples_2D(ss, fname=None):
plt.figure()
ax = plt.gca()
ax.scatter(ss[:,0], ss[:,1])
set_equal_aspect_ratio_2D(ax, ss[:,0], ss[:,1])
ax.set_xlabel("x")
ax.set_ylabel("y")
if fname is None:
plt.show()
else:
plt.savefig(fname)
plt.close()
###############################################################################
## Plot Utilities 3D
###############################################################################
def set_equal_aspect_ratio_3D(ax, xs, ys, zs, alpha=1.5, delta=0.0):
ax.set_aspect('equal')
mn = np.array([xs.min(), ys.min(), zs.min()])
mx = np.array([xs.max(), ys.max(), zs.max()])
d = 0.5 * (mx - mn)
c = mn + d
d = alpha * np.max(d) + delta
ax.set_xlim(c[0] - d, c[0] + d)
ax.set_ylim(c[1] - d, c[1] + d)
ax.set_zlim(c[2] - d, c[2] + d)
def vis_samples_3D(ss, fname=None):
plt.figure()
ax = plt.gca(projection='3d')
ax.scatter(ss[:,0], ss[:,1], ss[:,2])
set_equal_aspect_ratio_3D(ax, ss[:,0], ss[:,1], ss[:,2])
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
if fname is None:
plt.show()
else:
plt.savefig(fname)
plt.close()
|
matt77hiasREPO_NAMEfibpyPATH_START.@fibpy_extracted@fibpy-master@src@plot_utils.py@.PATH_END.py
|
{
"filename": "java-reference_catboostpredictions__get.md",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/catboost/docs/en/concepts/java-reference_catboostpredictions__get.md",
"type": "Markdown"
}
|
# get
```java
public double get(int objectIndex,
int predictionIndex)
```
#### {{ dl--purpose }}
{% include [reusage-java-java-reference_catboostpredictions__get__desc](../_includes/work_src/reusage-java/java-reference_catboostpredictions__get__desc.md) %}
#### {{ dl--parameters }}
**objectIndex**
The index of the object to get the prediction for.
**predictionIndex**
The index of the prediction dimension.
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@catboost@docs@en@concepts@java-reference_catboostpredictions__get.md@.PATH_END.py
|
{
"filename": "_funnelarea.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/_funnelarea.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceType as _BaseTraceType
import copy as _copy
class Funnelarea(_BaseTraceType):
# class properties
# --------------------
_parent_path_str = ""
_path_str = "funnelarea"
_valid_props = {
"aspectratio",
"baseratio",
"customdata",
"customdatasrc",
"dlabel",
"domain",
"hoverinfo",
"hoverinfosrc",
"hoverlabel",
"hovertemplate",
"hovertemplatesrc",
"hovertext",
"hovertextsrc",
"ids",
"idssrc",
"insidetextfont",
"label0",
"labels",
"labelssrc",
"legendgroup",
"marker",
"meta",
"metasrc",
"name",
"opacity",
"scalegroup",
"showlegend",
"stream",
"text",
"textfont",
"textinfo",
"textposition",
"textpositionsrc",
"textsrc",
"texttemplate",
"texttemplatesrc",
"title",
"type",
"uid",
"uirevision",
"values",
"valuessrc",
"visible",
}
# aspectratio
# -----------
@property
def aspectratio(self):
"""
Sets the ratio between height and width
The 'aspectratio' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["aspectratio"]
@aspectratio.setter
def aspectratio(self, val):
self["aspectratio"] = val
# baseratio
# ---------
@property
def baseratio(self):
"""
Sets the ratio between bottom length and maximum top length.
The 'baseratio' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["baseratio"]
@baseratio.setter
def baseratio(self, val):
self["baseratio"] = val
# customdata
# ----------
@property
def customdata(self):
"""
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note that,
"scatter" traces also appends customdata items in the markers
DOM elements
The 'customdata' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["customdata"]
@customdata.setter
def customdata(self, val):
self["customdata"] = val
# customdatasrc
# -------------
@property
def customdatasrc(self):
"""
Sets the source reference on Chart Studio Cloud for customdata
.
The 'customdatasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["customdatasrc"]
@customdatasrc.setter
def customdatasrc(self, val):
self["customdatasrc"] = val
# dlabel
# ------
@property
def dlabel(self):
"""
Sets the label step. See `label0` for more info.
The 'dlabel' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["dlabel"]
@dlabel.setter
def dlabel(self, val):
self["dlabel"] = val
# domain
# ------
@property
def domain(self):
"""
The 'domain' property is an instance of Domain
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnelarea.Domain`
- A dict of string/value properties that will be passed
to the Domain constructor
Supported dict properties:
column
If there is a layout grid, use the domain for
this column in the grid for this funnelarea
trace .
row
If there is a layout grid, use the domain for
this row in the grid for this funnelarea trace
.
x
Sets the horizontal domain of this funnelarea
trace (in plot fraction).
y
Sets the vertical domain of this funnelarea
trace (in plot fraction).
Returns
-------
plotly.graph_objs.funnelarea.Domain
"""
return self["domain"]
@domain.setter
def domain(self, val):
self["domain"] = val
# hoverinfo
# ---------
@property
def hoverinfo(self):
"""
Determines which trace information appear on hover. If `none`
or `skip` are set, no information is displayed upon hovering.
But, if `none` is set, click and hover events are still fired.
The 'hoverinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['label', 'text', 'value', 'percent', 'name'] joined with '+' characters
(e.g. 'label+text')
OR exactly one of ['all', 'none', 'skip'] (e.g. 'skip')
- A list or array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["hoverinfo"]
@hoverinfo.setter
def hoverinfo(self, val):
self["hoverinfo"] = val
# hoverinfosrc
# ------------
@property
def hoverinfosrc(self):
"""
Sets the source reference on Chart Studio Cloud for hoverinfo
.
The 'hoverinfosrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hoverinfosrc"]
@hoverinfosrc.setter
def hoverinfosrc(self, val):
self["hoverinfosrc"] = val
# hoverlabel
# ----------
@property
def hoverlabel(self):
"""
The 'hoverlabel' property is an instance of Hoverlabel
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnelarea.Hoverlabel`
- A dict of string/value properties that will be passed
to the Hoverlabel constructor
Supported dict properties:
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
Returns
-------
plotly.graph_objs.funnelarea.Hoverlabel
"""
return self["hoverlabel"]
@hoverlabel.setter
def hoverlabel(self, val):
self["hoverlabel"] = val
# hovertemplate
# -------------
@property
def hovertemplate(self):
"""
Template string used for rendering the information that appear
on hover box. Note that this will override `hoverinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format#locale_format for details on the date formatting syntax.
The variables available in `hovertemplate` are the ones emitted
as event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-data.
Additionally, every attributes that can be specified per-point
(the ones that are `arrayOk: true`) are available. variables
`label`, `color`, `value`, `text` and `percent`. Anything
contained in tag `<extra>` is displayed in the secondary box,
for example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag `<extra></extra>`.
The 'hovertemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertemplate"]
@hovertemplate.setter
def hovertemplate(self, val):
self["hovertemplate"] = val
# hovertemplatesrc
# ----------------
@property
def hovertemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
hovertemplate .
The 'hovertemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertemplatesrc"]
@hovertemplatesrc.setter
def hovertemplatesrc(self, val):
self["hovertemplatesrc"] = val
# hovertext
# ---------
@property
def hovertext(self):
"""
Sets hover text elements associated with each sector. If a
single string, the same string appears for all data points. If
an array of string, the items are mapped in order of this
trace's sectors. To be seen, trace `hoverinfo` must contain a
"text" flag.
The 'hovertext' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["hovertext"]
@hovertext.setter
def hovertext(self, val):
self["hovertext"] = val
# hovertextsrc
# ------------
@property
def hovertextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for hovertext
.
The 'hovertextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["hovertextsrc"]
@hovertextsrc.setter
def hovertextsrc(self, val):
self["hovertextsrc"] = val
# ids
# ---
@property
def ids(self):
"""
Assigns id labels to each datum. These ids for object constancy
of data points during animation. Should be an array of strings,
not numbers or any other type.
The 'ids' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ids"]
@ids.setter
def ids(self, val):
self["ids"] = val
# idssrc
# ------
@property
def idssrc(self):
"""
Sets the source reference on Chart Studio Cloud for ids .
The 'idssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["idssrc"]
@idssrc.setter
def idssrc(self, val):
self["idssrc"] = val
# insidetextfont
# --------------
@property
def insidetextfont(self):
"""
Sets the font used for `textinfo` lying inside the sector.
The 'insidetextfont' property is an instance of Insidetextfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnelarea.Insidetextfont`
- A dict of string/value properties that will be passed
to the Insidetextfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.funnelarea.Insidetextfont
"""
return self["insidetextfont"]
@insidetextfont.setter
def insidetextfont(self, val):
self["insidetextfont"] = val
# label0
# ------
@property
def label0(self):
"""
Alternate to `labels`. Builds a numeric set of labels. Use with
`dlabel` where `label0` is the starting label and `dlabel` the
step.
The 'label0' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["label0"]
@label0.setter
def label0(self, val):
self["label0"] = val
# labels
# ------
@property
def labels(self):
"""
Sets the sector labels. If `labels` entries are duplicated, we
sum associated `values` or simply count occurrences if `values`
is not provided. For other array attributes (including color)
we use the first non-empty entry among all occurrences of the
label.
The 'labels' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["labels"]
@labels.setter
def labels(self, val):
self["labels"] = val
# labelssrc
# ---------
@property
def labelssrc(self):
"""
Sets the source reference on Chart Studio Cloud for labels .
The 'labelssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["labelssrc"]
@labelssrc.setter
def labelssrc(self, val):
self["labelssrc"] = val
# legendgroup
# -----------
@property
def legendgroup(self):
"""
Sets the legend group for this trace. Traces part of the same
legend group hide/show at the same time when toggling legend
items.
The 'legendgroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["legendgroup"]
@legendgroup.setter
def legendgroup(self, val):
self["legendgroup"] = val
# marker
# ------
@property
def marker(self):
"""
The 'marker' property is an instance of Marker
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnelarea.Marker`
- A dict of string/value properties that will be passed
to the Marker constructor
Supported dict properties:
colors
Sets the color of each sector. If not
specified, the default trace color set is used
to pick the sector colors.
colorssrc
Sets the source reference on Chart Studio Cloud
for colors .
line
:class:`plotly.graph_objects.funnelarea.marker.
Line` instance or dict with compatible
properties
Returns
-------
plotly.graph_objs.funnelarea.Marker
"""
return self["marker"]
@marker.setter
def marker(self, val):
self["marker"] = val
# meta
# ----
@property
def meta(self):
"""
Assigns extra meta information associated with this trace that
can be used in various text attributes. Attributes such as
trace `name`, graph, axis and colorbar `title.text`, annotation
`text` `rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta` values in
an attribute in the same trace, simply use `%{meta[i]}` where
`i` is the index or key of the `meta` item in question. To
access trace `meta` in layout attributes, use
`%{data[n[.meta[i]}` where `i` is the index or key of the
`meta` and `n` is the trace index.
The 'meta' property accepts values of any type
Returns
-------
Any|numpy.ndarray
"""
return self["meta"]
@meta.setter
def meta(self, val):
self["meta"] = val
# metasrc
# -------
@property
def metasrc(self):
"""
Sets the source reference on Chart Studio Cloud for meta .
The 'metasrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["metasrc"]
@metasrc.setter
def metasrc(self, val):
self["metasrc"] = val
# name
# ----
@property
def name(self):
"""
Sets the trace name. The trace name appear as the legend item
and on hover.
The 'name' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["name"]
@name.setter
def name(self, val):
self["name"] = val
# opacity
# -------
@property
def opacity(self):
"""
Sets the opacity of the trace.
The 'opacity' property is a number and may be specified as:
- An int or float in the interval [0, 1]
Returns
-------
int|float
"""
return self["opacity"]
@opacity.setter
def opacity(self, val):
self["opacity"] = val
# scalegroup
# ----------
@property
def scalegroup(self):
"""
If there are multiple funnelareas that should be sized
according to their totals, link them by providing a non-empty
group id here shared by every trace in the same group.
The 'scalegroup' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["scalegroup"]
@scalegroup.setter
def scalegroup(self, val):
self["scalegroup"] = val
# showlegend
# ----------
@property
def showlegend(self):
"""
Determines whether or not an item corresponding to this trace
is shown in the legend.
The 'showlegend' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlegend"]
@showlegend.setter
def showlegend(self, val):
self["showlegend"] = val
# stream
# ------
@property
def stream(self):
"""
The 'stream' property is an instance of Stream
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnelarea.Stream`
- A dict of string/value properties that will be passed
to the Stream constructor
Supported dict properties:
maxpoints
Sets the maximum number of points to keep on
the plots from an incoming stream. If
`maxpoints` is set to 50, only the newest 50
points will be displayed on the plot.
token
The stream id number links a data trace on a
plot with a stream. See https://chart-
studio.plotly.com/settings for more details.
Returns
-------
plotly.graph_objs.funnelarea.Stream
"""
return self["stream"]
@stream.setter
def stream(self, val):
self["stream"] = val
# text
# ----
@property
def text(self):
"""
Sets text elements associated with each sector. If trace
`textinfo` contains a "text" flag, these elements will be seen
on the chart. If trace `hoverinfo` contains a "text" flag and
"hovertext" is not set, these elements will be seen in the
hover labels.
The 'text' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["text"]
@text.setter
def text(self, val):
self["text"] = val
# textfont
# --------
@property
def textfont(self):
"""
Sets the font used for `textinfo`.
The 'textfont' property is an instance of Textfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnelarea.Textfont`
- A dict of string/value properties that will be passed
to the Textfont constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for family .
size
sizesrc
Sets the source reference on Chart Studio Cloud
for size .
Returns
-------
plotly.graph_objs.funnelarea.Textfont
"""
return self["textfont"]
@textfont.setter
def textfont(self, val):
self["textfont"] = val
# textinfo
# --------
@property
def textinfo(self):
"""
Determines which trace information appear on the graph.
The 'textinfo' property is a flaglist and may be specified
as a string containing:
- Any combination of ['label', 'text', 'value', 'percent'] joined with '+' characters
(e.g. 'label+text')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["textinfo"]
@textinfo.setter
def textinfo(self, val):
self["textinfo"] = val
# textposition
# ------------
@property
def textposition(self):
"""
Specifies the location of the `textinfo`.
The 'textposition' property is an enumeration that may be specified as:
- One of the following enumeration values:
['inside', 'none']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["textposition"]
@textposition.setter
def textposition(self, val):
self["textposition"] = val
# textpositionsrc
# ---------------
@property
def textpositionsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
textposition .
The 'textpositionsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textpositionsrc"]
@textpositionsrc.setter
def textpositionsrc(self, val):
self["textpositionsrc"] = val
# textsrc
# -------
@property
def textsrc(self):
"""
Sets the source reference on Chart Studio Cloud for text .
The 'textsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["textsrc"]
@textsrc.setter
def textsrc(self, val):
self["textsrc"] = val
# texttemplate
# ------------
@property
def texttemplate(self):
"""
Template string used for rendering the information text that
appear on points. Note that this will override `textinfo`.
Variables are inserted using %{variable}, for example "y:
%{y}". Numbers are formatted using d3-format's syntax
%{variable:d3-format}, for example "Price: %{y:$.2f}".
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for details on
the formatting syntax. Dates are formatted using d3-time-
format's syntax %{variable|d3-time-format}, for example "Day:
%{2019-01-01|%A}". https://github.com/d3/d3-time-
format#locale_format for details on the date formatting syntax.
Every attributes that can be specified per-point (the ones that
are `arrayOk: true`) are available. variables `label`, `color`,
`value`, `text` and `percent`.
The 'texttemplate' property is a string and must be specified as:
- A string
- A number that will be converted to a string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["texttemplate"]
@texttemplate.setter
def texttemplate(self, val):
self["texttemplate"] = val
# texttemplatesrc
# ---------------
@property
def texttemplatesrc(self):
"""
Sets the source reference on Chart Studio Cloud for
texttemplate .
The 'texttemplatesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["texttemplatesrc"]
@texttemplatesrc.setter
def texttemplatesrc(self, val):
self["texttemplatesrc"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.funnelarea.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets the font used for `title`. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
position
Specifies the location of the `title`. Note
that the title's position used to be set by the
now deprecated `titleposition` attribute.
text
Sets the title of the chart. If it is empty, no
title is displayed. Note that before the
existence of `title.text`, the title's contents
used to be defined as the `title` attribute
itself. This behavior has been deprecated.
Returns
-------
plotly.graph_objs.funnelarea.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# uid
# ---
@property
def uid(self):
"""
Assign an id to this trace, Use this to provide object
constancy between traces during animations and transitions.
The 'uid' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["uid"]
@uid.setter
def uid(self, val):
self["uid"] = val
# uirevision
# ----------
@property
def uirevision(self):
"""
Controls persistence of some user-driven changes to the trace:
`constraintrange` in `parcoords` traces, as well as some
`editable: true` modifications such as `name` and
`colorbar.title`. Defaults to `layout.uirevision`. Note that
other user-driven trace attribute changes are controlled by
`layout` attributes: `trace.visible` is controlled by
`layout.legend.uirevision`, `selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)` (accessible
with `config: {editable: true}`) is controlled by
`layout.editrevision`. Trace changes are tracked by `uid`,
which only falls back on trace index if no `uid` is provided.
So if your app can add/remove traces before the end of the
`data` array, such that the same trace has a different index,
you can still preserve user-driven changes if you give each
trace a `uid` that stays with it as it moves.
The 'uirevision' property accepts values of any type
Returns
-------
Any
"""
return self["uirevision"]
@uirevision.setter
def uirevision(self, val):
self["uirevision"] = val
# values
# ------
@property
def values(self):
"""
Sets the values of the sectors. If omitted, we count
occurrences of each label.
The 'values' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["values"]
@values.setter
def values(self, val):
self["values"] = val
# valuessrc
# ---------
@property
def valuessrc(self):
"""
Sets the source reference on Chart Studio Cloud for values .
The 'valuessrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["valuessrc"]
@valuessrc.setter
def valuessrc(self, val):
self["valuessrc"] = val
# visible
# -------
@property
def visible(self):
"""
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as a
legend item (provided that the legend itself is visible).
The 'visible' property is an enumeration that may be specified as:
- One of the following enumeration values:
[True, False, 'legendonly']
Returns
-------
Any
"""
return self["visible"]
@visible.setter
def visible(self, val):
self["visible"] = val
# type
# ----
@property
def type(self):
return self._props["type"]
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
aspectratio
Sets the ratio between height and width
baseratio
Sets the ratio between bottom length and maximum top
length.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
dlabel
Sets the label step. See `label0` for more info.
domain
:class:`plotly.graph_objects.funnelarea.Domain`
instance or dict with compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.funnelarea.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `label`, `color`, `value`,
`text` and `percent`. Anything contained in tag
`<extra>` is displayed in the secondary box, for
example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Sets hover text elements associated with each sector.
If a single string, the same string appears for all
data points. If an array of string, the items are
mapped in order of this trace's sectors. To be seen,
trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
insidetextfont
Sets the font used for `textinfo` lying inside the
sector.
label0
Alternate to `labels`. Builds a numeric set of labels.
Use with `dlabel` where `label0` is the starting label
and `dlabel` the step.
labels
Sets the sector labels. If `labels` entries are
duplicated, we sum associated `values` or simply count
occurrences if `values` is not provided. For other
array attributes (including color) we use the first
non-empty entry among all occurrences of the label.
labelssrc
Sets the source reference on Chart Studio Cloud for
labels .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
marker
:class:`plotly.graph_objects.funnelarea.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
scalegroup
If there are multiple funnelareas that should be sized
according to their totals, link them by providing a
non-empty group id here shared by every trace in the
same group.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.funnelarea.Stream`
instance or dict with compatible properties
text
Sets text elements associated with each sector. If
trace `textinfo` contains a "text" flag, these elements
will be seen on the chart. If trace `hoverinfo`
contains a "text" flag and "hovertext" is not set,
these elements will be seen in the hover labels.
textfont
Sets the font used for `textinfo`.
textinfo
Determines which trace information appear on the graph.
textposition
Specifies the location of the `textinfo`.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
textposition .
textsrc
Sets the source reference on Chart Studio Cloud for
text .
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. Every attributes
that can be specified per-point (the ones that are
`arrayOk: true`) are available. variables `label`,
`color`, `value`, `text` and `percent`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
texttemplate .
title
:class:`plotly.graph_objects.funnelarea.Title` instance
or dict with compatible properties
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
values
Sets the values of the sectors. If omitted, we count
occurrences of each label.
valuessrc
Sets the source reference on Chart Studio Cloud for
values .
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
"""
def __init__(
self,
arg=None,
aspectratio=None,
baseratio=None,
customdata=None,
customdatasrc=None,
dlabel=None,
domain=None,
hoverinfo=None,
hoverinfosrc=None,
hoverlabel=None,
hovertemplate=None,
hovertemplatesrc=None,
hovertext=None,
hovertextsrc=None,
ids=None,
idssrc=None,
insidetextfont=None,
label0=None,
labels=None,
labelssrc=None,
legendgroup=None,
marker=None,
meta=None,
metasrc=None,
name=None,
opacity=None,
scalegroup=None,
showlegend=None,
stream=None,
text=None,
textfont=None,
textinfo=None,
textposition=None,
textpositionsrc=None,
textsrc=None,
texttemplate=None,
texttemplatesrc=None,
title=None,
uid=None,
uirevision=None,
values=None,
valuessrc=None,
visible=None,
**kwargs
):
"""
Construct a new Funnelarea object
Visualize stages in a process using area-encoded trapezoids.
This trace can be used to show data in a part-to-whole
representation similar to a "pie" trace, wherein each item
appears in a single stage. See also the "funnel" trace type for
a different approach to visualizing funnel data.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of :class:`plotly.graph_objs.Funnelarea`
aspectratio
Sets the ratio between height and width
baseratio
Sets the ratio between bottom length and maximum top
length.
customdata
Assigns extra data each datum. This may be useful when
listening to hover, click and selection events. Note
that, "scatter" traces also appends customdata items in
the markers DOM elements
customdatasrc
Sets the source reference on Chart Studio Cloud for
customdata .
dlabel
Sets the label step. See `label0` for more info.
domain
:class:`plotly.graph_objects.funnelarea.Domain`
instance or dict with compatible properties
hoverinfo
Determines which trace information appear on hover. If
`none` or `skip` are set, no information is displayed
upon hovering. But, if `none` is set, click and hover
events are still fired.
hoverinfosrc
Sets the source reference on Chart Studio Cloud for
hoverinfo .
hoverlabel
:class:`plotly.graph_objects.funnelarea.Hoverlabel`
instance or dict with compatible properties
hovertemplate
Template string used for rendering the information that
appear on hover box. Note that this will override
`hoverinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. The variables
available in `hovertemplate` are the ones emitted as
event data described at this link
https://plotly.com/javascript/plotlyjs-events/#event-
data. Additionally, every attributes that can be
specified per-point (the ones that are `arrayOk: true`)
are available. variables `label`, `color`, `value`,
`text` and `percent`. Anything contained in tag
`<extra>` is displayed in the secondary box, for
example "<extra>{fullData.name}</extra>". To hide the
secondary box completely, use an empty tag
`<extra></extra>`.
hovertemplatesrc
Sets the source reference on Chart Studio Cloud for
hovertemplate .
hovertext
Sets hover text elements associated with each sector.
If a single string, the same string appears for all
data points. If an array of string, the items are
mapped in order of this trace's sectors. To be seen,
trace `hoverinfo` must contain a "text" flag.
hovertextsrc
Sets the source reference on Chart Studio Cloud for
hovertext .
ids
Assigns id labels to each datum. These ids for object
constancy of data points during animation. Should be an
array of strings, not numbers or any other type.
idssrc
Sets the source reference on Chart Studio Cloud for
ids .
insidetextfont
Sets the font used for `textinfo` lying inside the
sector.
label0
Alternate to `labels`. Builds a numeric set of labels.
Use with `dlabel` where `label0` is the starting label
and `dlabel` the step.
labels
Sets the sector labels. If `labels` entries are
duplicated, we sum associated `values` or simply count
occurrences if `values` is not provided. For other
array attributes (including color) we use the first
non-empty entry among all occurrences of the label.
labelssrc
Sets the source reference on Chart Studio Cloud for
labels .
legendgroup
Sets the legend group for this trace. Traces part of
the same legend group hide/show at the same time when
toggling legend items.
marker
:class:`plotly.graph_objects.funnelarea.Marker`
instance or dict with compatible properties
meta
Assigns extra meta information associated with this
trace that can be used in various text attributes.
Attributes such as trace `name`, graph, axis and
colorbar `title.text`, annotation `text`
`rangeselector`, `updatemenues` and `sliders` `label`
text all support `meta`. To access the trace `meta`
values in an attribute in the same trace, simply use
`%{meta[i]}` where `i` is the index or key of the
`meta` item in question. To access trace `meta` in
layout attributes, use `%{data[n[.meta[i]}` where `i`
is the index or key of the `meta` and `n` is the trace
index.
metasrc
Sets the source reference on Chart Studio Cloud for
meta .
name
Sets the trace name. The trace name appear as the
legend item and on hover.
opacity
Sets the opacity of the trace.
scalegroup
If there are multiple funnelareas that should be sized
according to their totals, link them by providing a
non-empty group id here shared by every trace in the
same group.
showlegend
Determines whether or not an item corresponding to this
trace is shown in the legend.
stream
:class:`plotly.graph_objects.funnelarea.Stream`
instance or dict with compatible properties
text
Sets text elements associated with each sector. If
trace `textinfo` contains a "text" flag, these elements
will be seen on the chart. If trace `hoverinfo`
contains a "text" flag and "hovertext" is not set,
these elements will be seen in the hover labels.
textfont
Sets the font used for `textinfo`.
textinfo
Determines which trace information appear on the graph.
textposition
Specifies the location of the `textinfo`.
textpositionsrc
Sets the source reference on Chart Studio Cloud for
textposition .
textsrc
Sets the source reference on Chart Studio Cloud for
text .
texttemplate
Template string used for rendering the information text
that appear on points. Note that this will override
`textinfo`. Variables are inserted using %{variable},
for example "y: %{y}". Numbers are formatted using
d3-format's syntax %{variable:d3-format}, for example
"Price: %{y:$.2f}". https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format for
details on the formatting syntax. Dates are formatted
using d3-time-format's syntax %{variable|d3-time-
format}, for example "Day: %{2019-01-01|%A}".
https://github.com/d3/d3-time-format#locale_format for
details on the date formatting syntax. Every attributes
that can be specified per-point (the ones that are
`arrayOk: true`) are available. variables `label`,
`color`, `value`, `text` and `percent`.
texttemplatesrc
Sets the source reference on Chart Studio Cloud for
texttemplate .
title
:class:`plotly.graph_objects.funnelarea.Title` instance
or dict with compatible properties
uid
Assign an id to this trace, Use this to provide object
constancy between traces during animations and
transitions.
uirevision
Controls persistence of some user-driven changes to the
trace: `constraintrange` in `parcoords` traces, as well
as some `editable: true` modifications such as `name`
and `colorbar.title`. Defaults to `layout.uirevision`.
Note that other user-driven trace attribute changes are
controlled by `layout` attributes: `trace.visible` is
controlled by `layout.legend.uirevision`,
`selectedpoints` is controlled by
`layout.selectionrevision`, and `colorbar.(x|y)`
(accessible with `config: {editable: true}`) is
controlled by `layout.editrevision`. Trace changes are
tracked by `uid`, which only falls back on trace index
if no `uid` is provided. So if your app can add/remove
traces before the end of the `data` array, such that
the same trace has a different index, you can still
preserve user-driven changes if you give each trace a
`uid` that stays with it as it moves.
values
Sets the values of the sectors. If omitted, we count
occurrences of each label.
valuessrc
Sets the source reference on Chart Studio Cloud for
values .
visible
Determines whether or not this trace is visible. If
"legendonly", the trace is not drawn, but can appear as
a legend item (provided that the legend itself is
visible).
Returns
-------
Funnelarea
"""
super(Funnelarea, self).__init__("funnelarea")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.Funnelarea
constructor must be a dict or
an instance of :class:`plotly.graph_objs.Funnelarea`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("aspectratio", None)
_v = aspectratio if aspectratio is not None else _v
if _v is not None:
self["aspectratio"] = _v
_v = arg.pop("baseratio", None)
_v = baseratio if baseratio is not None else _v
if _v is not None:
self["baseratio"] = _v
_v = arg.pop("customdata", None)
_v = customdata if customdata is not None else _v
if _v is not None:
self["customdata"] = _v
_v = arg.pop("customdatasrc", None)
_v = customdatasrc if customdatasrc is not None else _v
if _v is not None:
self["customdatasrc"] = _v
_v = arg.pop("dlabel", None)
_v = dlabel if dlabel is not None else _v
if _v is not None:
self["dlabel"] = _v
_v = arg.pop("domain", None)
_v = domain if domain is not None else _v
if _v is not None:
self["domain"] = _v
_v = arg.pop("hoverinfo", None)
_v = hoverinfo if hoverinfo is not None else _v
if _v is not None:
self["hoverinfo"] = _v
_v = arg.pop("hoverinfosrc", None)
_v = hoverinfosrc if hoverinfosrc is not None else _v
if _v is not None:
self["hoverinfosrc"] = _v
_v = arg.pop("hoverlabel", None)
_v = hoverlabel if hoverlabel is not None else _v
if _v is not None:
self["hoverlabel"] = _v
_v = arg.pop("hovertemplate", None)
_v = hovertemplate if hovertemplate is not None else _v
if _v is not None:
self["hovertemplate"] = _v
_v = arg.pop("hovertemplatesrc", None)
_v = hovertemplatesrc if hovertemplatesrc is not None else _v
if _v is not None:
self["hovertemplatesrc"] = _v
_v = arg.pop("hovertext", None)
_v = hovertext if hovertext is not None else _v
if _v is not None:
self["hovertext"] = _v
_v = arg.pop("hovertextsrc", None)
_v = hovertextsrc if hovertextsrc is not None else _v
if _v is not None:
self["hovertextsrc"] = _v
_v = arg.pop("ids", None)
_v = ids if ids is not None else _v
if _v is not None:
self["ids"] = _v
_v = arg.pop("idssrc", None)
_v = idssrc if idssrc is not None else _v
if _v is not None:
self["idssrc"] = _v
_v = arg.pop("insidetextfont", None)
_v = insidetextfont if insidetextfont is not None else _v
if _v is not None:
self["insidetextfont"] = _v
_v = arg.pop("label0", None)
_v = label0 if label0 is not None else _v
if _v is not None:
self["label0"] = _v
_v = arg.pop("labels", None)
_v = labels if labels is not None else _v
if _v is not None:
self["labels"] = _v
_v = arg.pop("labelssrc", None)
_v = labelssrc if labelssrc is not None else _v
if _v is not None:
self["labelssrc"] = _v
_v = arg.pop("legendgroup", None)
_v = legendgroup if legendgroup is not None else _v
if _v is not None:
self["legendgroup"] = _v
_v = arg.pop("marker", None)
_v = marker if marker is not None else _v
if _v is not None:
self["marker"] = _v
_v = arg.pop("meta", None)
_v = meta if meta is not None else _v
if _v is not None:
self["meta"] = _v
_v = arg.pop("metasrc", None)
_v = metasrc if metasrc is not None else _v
if _v is not None:
self["metasrc"] = _v
_v = arg.pop("name", None)
_v = name if name is not None else _v
if _v is not None:
self["name"] = _v
_v = arg.pop("opacity", None)
_v = opacity if opacity is not None else _v
if _v is not None:
self["opacity"] = _v
_v = arg.pop("scalegroup", None)
_v = scalegroup if scalegroup is not None else _v
if _v is not None:
self["scalegroup"] = _v
_v = arg.pop("showlegend", None)
_v = showlegend if showlegend is not None else _v
if _v is not None:
self["showlegend"] = _v
_v = arg.pop("stream", None)
_v = stream if stream is not None else _v
if _v is not None:
self["stream"] = _v
_v = arg.pop("text", None)
_v = text if text is not None else _v
if _v is not None:
self["text"] = _v
_v = arg.pop("textfont", None)
_v = textfont if textfont is not None else _v
if _v is not None:
self["textfont"] = _v
_v = arg.pop("textinfo", None)
_v = textinfo if textinfo is not None else _v
if _v is not None:
self["textinfo"] = _v
_v = arg.pop("textposition", None)
_v = textposition if textposition is not None else _v
if _v is not None:
self["textposition"] = _v
_v = arg.pop("textpositionsrc", None)
_v = textpositionsrc if textpositionsrc is not None else _v
if _v is not None:
self["textpositionsrc"] = _v
_v = arg.pop("textsrc", None)
_v = textsrc if textsrc is not None else _v
if _v is not None:
self["textsrc"] = _v
_v = arg.pop("texttemplate", None)
_v = texttemplate if texttemplate is not None else _v
if _v is not None:
self["texttemplate"] = _v
_v = arg.pop("texttemplatesrc", None)
_v = texttemplatesrc if texttemplatesrc is not None else _v
if _v is not None:
self["texttemplatesrc"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("uid", None)
_v = uid if uid is not None else _v
if _v is not None:
self["uid"] = _v
_v = arg.pop("uirevision", None)
_v = uirevision if uirevision is not None else _v
if _v is not None:
self["uirevision"] = _v
_v = arg.pop("values", None)
_v = values if values is not None else _v
if _v is not None:
self["values"] = _v
_v = arg.pop("valuessrc", None)
_v = valuessrc if valuessrc is not None else _v
if _v is not None:
self["valuessrc"] = _v
_v = arg.pop("visible", None)
_v = visible if visible is not None else _v
if _v is not None:
self["visible"] = _v
# Read-only literals
# ------------------
self._props["type"] = "funnelarea"
arg.pop("type", None)
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@_funnelarea.py@.PATH_END.py
|
{
"filename": "downsample.py",
"repo_name": "AlexandreAdam/score_models",
"repo_path": "score_models_extracted/score_models-master/score_models/layers/downsample.py",
"type": "Python"
}
|
import torch
import torch.nn.functional as F
from .conv_layers import conv3x3
from .up_or_downsampling import downsample
from .style_gan_conv import StyleGANConv
from ..definitions import default_init
AVGPOOL_FUNC = {1: F.avg_pool1d,
2: F.avg_pool2d,
3: F.avg_pool3d}
class DownsampleLayer(torch.nn.Module):
def __init__(
self,
in_ch=None,
out_ch=None,
with_conv=False,
fir=False,
fir_kernel=(1, 3, 3, 1),
dimensions:int = 2,
):
super().__init__()
out_ch = out_ch if out_ch is not None else in_ch
if out_ch != in_ch:
assert with_conv
self.dimensions = dimensions
self.with_conv = with_conv
self.out_ch = out_ch
if not fir:
if with_conv:
self.Conv_0 = conv3x3(in_ch, out_ch, stride=2, dimensions=dimensions)
else:
if with_conv:
self.Conv_0 = StyleGANConv(in_ch, out_ch,
kernel=3, down=True,
resample_kernel=fir_kernel,
use_bias=True,
kernel_init=default_init(),
dimensions=dimensions
)
self.fir = fir
self.fir_kernel = fir_kernel
def forward(self, x):
if not self.fir:
if self.with_conv:
pad = [0, 1]*self.dimensions
x = F.pad(x, pad)
x = self.Conv_0(x)
else:
x = AVGPOOL_FUNC[self.dimensions](x, 2, stride=2)
else:
if not self.with_conv:
x = downsample(x, self.fir_kernel, factor=2, dimensions=self.dimensions)
else:
x = self.Conv_0(x)
return x
|
AlexandreAdamREPO_NAMEscore_modelsPATH_START.@score_models_extracted@score_models-master@score_models@layers@downsample.py@.PATH_END.py
|
{
"filename": "fhd_cal.py",
"repo_name": "RadioAstronomySoftwareGroup/pyuvdata",
"repo_path": "pyuvdata_extracted/pyuvdata-main/src/pyuvdata/uvcal/fhd_cal.py",
"type": "Python"
}
|
# Copyright (c) 2018 Radio Astronomy Software Group
# Licensed under the 2-clause BSD License
"""Class for reading FHD calibration save files."""
import os
import warnings
import numpy as np
from astropy import units
from astropy.coordinates import EarthLocation
from docstring_parser import DocstringStyle
from scipy.io import readsav
from .. import utils
from ..docstrings import copy_replace_short_description
from ..utils.io import fhd as fhd_utils
from . import UVCal
__all__ = ["FHDCal"]
class FHDCal(UVCal):
"""
Defines a FHD-specific subclass of UVCal for reading FHD calibration save files.
This class should not be interacted with directly, instead use the read_fhd_cal
method on the UVCal class.
"""
@copy_replace_short_description(UVCal.read_fhd_cal, style=DocstringStyle.NUMPYDOC)
def read_fhd_cal(
self,
*,
cal_file,
obs_file,
layout_file=None,
settings_file=None,
raw=True,
read_data=True,
background_lsts=True,
extra_history=None,
run_check=True,
check_extra=True,
run_check_acceptability=True,
use_future_array_shapes=None,
astrometry_library=None,
):
"""Read data from an FHD cal.sav file."""
self._set_future_array_shapes(use_future_array_shapes=use_future_array_shapes)
if not read_data and settings_file is None:
raise ValueError("A settings_file must be provided if read_data is False.")
filenames = fhd_utils.fhd_filenames(
obs_file=obs_file,
layout_file=layout_file,
settings_file=settings_file,
cal_file=cal_file,
)
self.filename = filenames
self._filename.form = (len(self.filename),)
this_dict = readsav(obs_file, python_dict=True)
obs_data = this_dict["obs"]
bl_info = obs_data["BASELINE_INFO"][0]
astrometry = obs_data["ASTR"][0]
self.Nspws = 1
self.spw_array = np.array([0])
self.Nfreqs = int(obs_data["N_FREQ"][0])
self.freq_array = np.zeros(len(bl_info["FREQ"][0]), dtype=np.float64)
self.freq_array[:] = bl_info["FREQ"][0]
self.channel_width = np.full(self.Nfreqs, float(obs_data["FREQ_RES"][0]))
self.flex_spw_id_array = np.zeros(self.Nfreqs, dtype=int)
# FHD only calculates one calibration over all the times.
# obs_data.n_time /cal_data.n_times gives the number of times that goes into
# that one calibration, UVCal.Ntimes gives the number of separate calibrations
# along the time axis.
self.Ntimes = 1
time_array = bl_info["jdate"][0]
# this is generated in FHD by subtracting the JD of neighboring
# integrations. This can have limited accuracy, so it can be slightly
# off the actual value.
# (e.g. 1.999426... rather than 2)
# time_res is constrained to be a scalar currently
self.integration_time = np.array([np.float64(obs_data["TIME_RES"][0])])
# array of used frequencies (1: used, 0: flagged)
freq_use = bl_info["freq_use"][0]
# array of used antennas (1: used, 0: flagged)
ant_use = bl_info["tile_use"][0]
# array of used times (1: used, 0: flagged)
time_use = bl_info["time_use"][0]
time_array_use = time_array[np.where(time_use > 0)]
# extend the range by 1/4 the integration time in each direction
# to make sure that the original data times are covered by the range.
# Note that this leaves gaps between adjacent files in principal, but
# using 1/2 the integration time occasionally led to time_ranges overlapping
# slightly because of precision issues.
intime_jd = self.integration_time / (24.0 * 3600.0)
self.time_range = np.reshape(
np.asarray(
[
np.min(time_array_use) - intime_jd / 4.0,
np.max(time_array_use) + intime_jd / 4.0,
]
),
(1, 2),
)
self.telescope.name = obs_data["instrument"][0].decode("utf8")
latitude = np.deg2rad(float(obs_data["LAT"][0]))
longitude = np.deg2rad(float(obs_data["LON"][0]))
altitude = float(obs_data["ALT"][0])
# This is a bit of a kludge because nothing like a phase center name
# exists in FHD files.
# At least for the MWA, obs.ORIG_PHASERA and obs.ORIG_PHASEDEC specify
# the field the telescope was nominally pointing at
# (May need to be revisited, but probably isn't too important)
cat_name = (
"Field RA(deg): "
+ str(obs_data["ORIG_PHASERA"][0])
+ ", Dec:"
+ str(obs_data["ORIG_PHASEDEC"][0])
)
# For the MWA, this can sometimes be converted to EoR fields
if (
self.telescope.name.lower() == "mwa"
and np.isclose(obs_data["ORIG_PHASERA"][0], 0)
and np.isclose(obs_data["ORIG_PHASEDEC"][0], -27)
):
cat_name = "EoR 0 Field"
cat_id = self._add_phase_center(
cat_name=cat_name,
cat_type="sidereal",
cat_lon=np.deg2rad(float(obs_data["OBSRA"][0])),
cat_lat=np.deg2rad(float(obs_data["OBSDEC"][0])),
cat_frame=astrometry["RADECSYS"][0].decode().lower(),
cat_epoch=astrometry["EQUINOX"][0],
info_source="file",
)
self.phase_center_id_array = np.zeros(self.Ntimes, dtype=int) + cat_id
# get the stuff FHD read from the antenna table (in layout file)
if layout_file is not None:
obs_tile_names = [
ant.decode("utf8") for ant in bl_info["TILE_NAMES"][0].tolist()
]
if self.telescope.name.lower() == "mwa":
obs_tile_names = [
"Tile" + "0" * (3 - len(ant.strip())) + ant.strip()
for ant in obs_tile_names
]
layout_param_dict = fhd_utils.get_fhd_layout_info(
layout_file=layout_file,
telescope_name=self.telescope.name,
latitude=latitude,
longitude=longitude,
altitude=altitude,
obs_tile_names=obs_tile_names,
run_check_acceptability=True,
)
layout_params_to_ignore = [
"gst0",
"rdate",
"earth_omega",
"dut1",
"timesys",
"diameters",
]
telescope_attrs = {
"telescope_location": "location",
"Nants_telescope": "Nants",
"antenna_names": "antenna_names",
"antenna_numbers": "antenna_numbers",
"antenna_positions": "antenna_positions",
"diameters": "antenna_diameters",
}
for key, value in layout_param_dict.items():
if key in layout_params_to_ignore:
continue
if key in telescope_attrs:
setattr(self.telescope, telescope_attrs[key], value)
else:
setattr(self, key, value)
else:
warnings.warn(
"No layout file, antenna_postions will not be defined "
"and antenna_names might be incorrect."
)
self.telescope.location = EarthLocation.from_geodetic(
lat=latitude * units.rad,
lon=longitude * units.rad,
height=altitude * units.m,
)
# FHD stores antenna numbers, not names, in the "TILE_NAMES" field
self.telescope.antenna_names = [
ant.decode("utf8") for ant in bl_info["TILE_NAMES"][0].tolist()
]
self.telescope.antenna_numbers = np.array(
[int(ant) for ant in self.telescope.antenna_names]
)
if self.telescope.name.lower() == "mwa":
self.telescope.antenna_names = [
"Tile" + "0" * (3 - len(ant.strip())) + ant.strip()
for ant in self.telescope.antenna_names
]
self.telescope.Nants = len(self.telescope.antenna_names)
self.telescope.antenna_names = np.asarray(self.telescope.antenna_names)
self.telescope.x_orientation = "east"
self.set_telescope_params()
# need to make sure telescope location is defined properly before this call
proc = self.set_lsts_from_time_array(
background=background_lsts, astrometry_library=astrometry_library
)
self._set_sky()
self.gain_convention = "divide"
self.gain_scale = "Jy"
self.pol_convetions = "sum"
self._set_gain()
# currently don't have branch info. may change in future.
self.git_origin_cal = "https://github.com/EoRImaging/FHD"
self.git_hash_cal = obs_data["code_version"][0].decode("utf8")
if "DELAYS" in obs_data.dtype.names and obs_data["delays"][0] is not None:
self.extra_keywords["delays"] = (
"[" + ", ".join(str(int(d)) for d in obs_data["delays"][0]) + "]"
)
if settings_file is not None:
self.history, self.observer = fhd_utils.get_fhd_history(
settings_file, return_user=True
)
else:
warnings.warn("No settings file, history will be incomplete")
self.history = ""
if extra_history is not None:
if isinstance(extra_history, list | tuple):
self.history += "\n" + "\n".join(extra_history)
else:
self.history += "\n" + extra_history
if not utils.history._check_history_version(
self.history, self.pyuvdata_version_str
):
if self.history.endswith("\n"):
self.history += self.pyuvdata_version_str
else:
self.history += "\n" + self.pyuvdata_version_str
if not read_data:
n_pols = int(obs_data["N_POL"][0])
# FHD only has the diagonal elements (jxx, jyy), so limit to 2
self.Njones = int(np.min([n_pols, 2]))
# for calibration FHD includes all antennas in the antenna table,
# regardless of whether or not they have data
self.Nants_data = len(self.telescope.antenna_names)
# get details from settings file
keywords = [
"ref_antenna_name",
"catalog_name",
"n_sources",
"min_cal_baseline",
"max_cal_baseline",
"galaxy_model",
"diffuse_model",
"auto_scale",
"n_vis_cal",
"time_avg",
"conv_thresh",
]
if not raw:
keywords += [
"polyfit",
"bandpass",
"mode_fit",
"amp_degree",
"phase_degree",
]
settings_lines = {}
with open(settings_file) as read_obj:
cal_start = False
for line in read_obj:
if not cal_start:
if line.startswith("##CAL"):
cal_start = True
else:
if line.startswith("##"):
break
# in cal structure section
for kw in keywords:
if line.strip().startswith(kw.upper()):
settings_lines[kw] = line.split()[1:]
self.ref_antenna_name = settings_lines["ref_antenna_name"][0]
self.Nsources = int(settings_lines["n_sources"][0])
self.sky_catalog = settings_lines["catalog_name"][0]
self.baseline_range = np.asarray(
[
float(settings_lines["min_cal_baseline"][0]),
float(settings_lines["max_cal_baseline"][0]),
]
)
galaxy_model = int(settings_lines["galaxy_model"][0])
if len(settings_lines["diffuse_model"]) > 0:
diffuse_model = settings_lines["diffuse_model"][0]
else:
diffuse_model = ""
auto_scale = settings_lines["auto_scale"]
n_vis_cal = np.int64(settings_lines["n_vis_cal"][0])
time_avg = int(settings_lines["time_avg"][0])
conv_thresh = float(settings_lines["conv_thresh"][0])
if not raw:
polyfit = int(settings_lines["polyfit"][0])
bandpass = int(settings_lines["bandpass"][0])
mode_fit = settings_lines["mode_fit"]
# for some reason, it's a float if it's one value,
# and integers otherwise
if len(mode_fit) == 1:
mode_fit = float(mode_fit[0])
else:
mode_fit = np.array(mode_fit, dtype=np.int64)
amp_degree = int(settings_lines["amp_degree"][0])
phase_degree = int(settings_lines["phase_degree"][0])
else:
this_dict = readsav(cal_file, python_dict=True)
cal_data = this_dict["cal"]
self.Njones = int(cal_data["n_pol"][0])
self.Nants_data = int(cal_data["n_tile"][0])
self.sky_catalog = cal_data["skymodel"][0]["catalog_name"][0].decode("utf8")
self.ref_antenna_name = (
cal_data["ref_antenna_name"][0].decode("utf8").strip()
)
self.Nsources = int(cal_data["skymodel"][0]["n_sources"][0])
self.baseline_range = np.asarray(
[
float(cal_data["min_cal_baseline"][0]),
float(cal_data["max_cal_baseline"][0]),
]
)
galaxy_model = cal_data["skymodel"][0]["galaxy_model"][0]
diffuse_model = cal_data["skymodel"][0]["diffuse_model"][0]
auto_scale = cal_data["auto_scale"][0]
n_vis_cal = cal_data["n_vis_cal"][0]
time_avg = cal_data["time_avg"][0]
conv_thresh = cal_data["conv_thresh"][0]
if not raw:
polyfit = cal_data["polyfit"][0]
bandpass = cal_data["bandpass"][0]
mode_fit = cal_data["mode_fit"][0]
amp_degree = cal_data["amp_degree"][0]
phase_degree = cal_data["phase_degree"][0]
# Now read data like arrays
fit_gain_array_in = cal_data["gain"][0]
fit_gain_array = np.zeros(
self._gain_array.expected_shape(self), dtype=np.complex128
)
for jones_i, arr in enumerate(fit_gain_array_in):
fit_gain_array[:, :, 0, jones_i] = arr
if raw:
res_gain_array_in = cal_data["gain_residual"][0]
res_gain_array = np.zeros(
self._gain_array.expected_shape(self), dtype=np.complex128
)
for jones_i, arr in enumerate(res_gain_array_in):
res_gain_array[:, :, 0, jones_i] = arr
self.gain_array = fit_gain_array + res_gain_array
else:
self.gain_array = fit_gain_array
# FHD doesn't really have a chi^2 measure. What is has is a convergence
# measure. The solution converged well if this is less than the convergence
# threshold ('conv_thresh' in extra_keywords).
self.quality_array = np.zeros_like(self.gain_array, dtype=np.float64)
convergence = cal_data["convergence"][0]
for jones_i, arr in enumerate(convergence):
self.quality_array[:, :, 0, jones_i] = arr
# Currently this can't include the times because the flag array
# dimensions has to match the gain array dimensions.
# This is somewhat artificial...
self.flag_array = np.zeros_like(self.gain_array, dtype=np.bool_)
flagged_ants = np.where(ant_use == 0)[0]
for ant in flagged_ants:
self.flag_array[ant] = 1
flagged_freqs = np.where(freq_use == 0)[0]
for freq in flagged_freqs:
self.flag_array[:, freq] = 1
if self.telescope.name.lower() == "mwa":
self.ref_antenna_name = (
"Tile" + "0" * (3 - len(self.ref_antenna_name)) + self.ref_antenna_name
)
# In Python 3, we sometimes get Unicode, sometimes bytes
# doesn't reliably show up in tests though, so excluding it from coverage
if isinstance(galaxy_model, bytes): # pragma: nocover
galaxy_model = galaxy_model.decode("utf8")
if galaxy_model == 0:
galaxy_model = None
else:
galaxy_model = "gsm"
if isinstance(diffuse_model, bytes):
diffuse_model = diffuse_model.decode("utf8")
if diffuse_model == "":
diffuse_model = None
else:
diffuse_model = os.path.basename(diffuse_model)
if galaxy_model is not None:
if diffuse_model is not None:
self.diffuse_model = galaxy_model + " + " + diffuse_model
else:
self.diffuse_model = galaxy_model
elif diffuse_model is not None:
self.diffuse_model = diffuse_model
# FHD only has the diagonal elements (jxx, jyy) and if there's only one
# present it must be jxx
if self.Njones == 1:
self.jones_array = np.array([-5])
else:
self.jones_array = np.array([-5, -6])
# for calibration FHD creates gain array of shape (Nfreqs, Nants_telescope)
# rather than (Nfreqs, Nants_data). This means the antenna array will
# contain all antennas in the antenna table instead of only those
# which had data in the original uvfits file
self.ant_array = self.telescope.antenna_numbers
self.extra_keywords["autoscal".upper()] = (
"[" + ", ".join(str(d) for d in auto_scale) + "]"
)
self.extra_keywords["nvis_cal".upper()] = n_vis_cal
self.extra_keywords["time_avg".upper()] = time_avg
self.extra_keywords["cvgthres".upper()] = conv_thresh
if not raw:
self.extra_keywords["polyfit".upper()] = polyfit
self.extra_keywords["bandpass".upper()] = bandpass
if isinstance(mode_fit, list | tuple | np.ndarray):
self.extra_keywords["mode_fit".upper()] = (
"[" + ", ".join(str(m) for m in mode_fit) + "]"
)
else:
self.extra_keywords["mode_fit".upper()] = mode_fit
self.extra_keywords["amp_deg".upper()] = amp_degree
self.extra_keywords["phse_deg".upper()] = phase_degree
# wait for LSTs if set in background
if proc is not None:
proc.join()
if run_check:
self.check(
check_extra=check_extra, run_check_acceptability=run_check_acceptability
)
|
RadioAstronomySoftwareGroupREPO_NAMEpyuvdataPATH_START.@pyuvdata_extracted@pyuvdata-main@src@pyuvdata@uvcal@fhd_cal.py@.PATH_END.py
|
{
"filename": "_textangle.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/annotation/_textangle.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextangleValidator(_plotly_utils.basevalidators.AngleValidator):
def __init__(
self, plotly_name="textangle", parent_name="layout.annotation", **kwargs
):
super(TextangleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+arraydraw"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@annotation@_textangle.py@.PATH_END.py
|
{
"filename": "_linepositionsrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattergeo/hoverlabel/font/_linepositionsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LinepositionsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="linepositionsrc",
parent_name="scattergeo.hoverlabel.font",
**kwargs,
):
super(LinepositionsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattergeo@hoverlabel@font@_linepositionsrc.py@.PATH_END.py
|
{
"filename": "_showticklabels.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattergl/marker/colorbar/_showticklabels.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowticklabelsValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(
self,
plotly_name="showticklabels",
parent_name="scattergl.marker.colorbar",
**kwargs,
):
super(ShowticklabelsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattergl@marker@colorbar@_showticklabels.py@.PATH_END.py
|
{
"filename": "Dewaele_2012.py",
"repo_name": "geodynamics/burnman",
"repo_path": "burnman_extracted/burnman-main/burnman/calibrants/Dewaele_2012.py",
"type": "Python"
}
|
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for
# the Earth and Planetary Sciences
# Copyright (C) 2012 - 2024 by the BurnMan team, released under the GNU
# GPL v2 or later.
from burnman.eos.vinet import Vinet
from burnman.eos.mie_grueneisen_debye import MGDBase
from burnman.classes.calibrant import Calibrant
import numpy as np
"""
Dewaele_2012
^^^^^^^^^^^^
"""
class KBr_B2(Calibrant):
"""
The B2 KBr pressure standard reported by
Dewaele et al. (2012; https://doi.org/10.1103/PhysRevB.85.214105).
"""
def __init__(self):
def _pressure_Dewaele_KBr(volume, temperature, params):
# Isothermal pressure (GPa)
pressure_model = Vinet()
P0 = pressure_model.pressure(params["T_0"], volume, params)
# Thermal pressure
Pth = (
params["aK(V,T)"] + params["dK_dT_V"] * np.log(params["V_0"] / volume)
) * (temperature - params["T_0"])
# Total pressure
P = P0 + Pth
return P
_params_Dewaele_KBr = {
"V_0": 3.8180e-5,
"K_0": 14.9e9,
"Kprime_0": 5.81,
"aK(V,T)": 0.00222e9,
"dK_dT_V": 0.0,
"n": 2.0,
"T_0": 300.0,
"P_0": 0.0,
"Z": 1.0,
}
Calibrant.__init__(self, _pressure_Dewaele_KBr, "pressure", _params_Dewaele_KBr)
class KCl_B2(Calibrant):
"""
The B2 KCl pressure standard reported by
Dewaele et al. (2012; https://doi.org/10.1103/PhysRevB.85.214105).
"""
def __init__(self):
def _pressure_Dewaele_KCl(volume, temperature, params):
# Isothermal pressure (GPa)
pressure_model = Vinet()
P0 = pressure_model.pressure(params["T_0"], volume, params)
# Thermal pressure
Pth = (
params["aK(V,T)"] + params["dK_dT_V"] * np.log(params["V_0"] / volume)
) * (temperature - params["T_0"])
# Total pressure
P = P0 + Pth
return P
_params_Dewaele_KCl = {
"V_0": 3.28206e-5,
"K_0": 17.2e9,
"Kprime_0": 5.89,
"aK(V,T)": 0.00224e9,
"dK_dT_V": 0.0,
"n": 2.0,
"T_0": 300.0,
"P_0": 0.0,
"Z": 1.0,
}
Calibrant.__init__(self, _pressure_Dewaele_KCl, "pressure", _params_Dewaele_KCl)
|
geodynamicsREPO_NAMEburnmanPATH_START.@burnman_extracted@burnman-main@burnman@calibrants@Dewaele_2012.py@.PATH_END.py
|
{
"filename": "_sysconfigdata_arcadia.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/_sysconfigdata_arcadia.py",
"type": "Python"
}
|
# system configuration generated and used by the sysconfig module
build_time_vars = {
'AR': 'x86_64-linux-gnu-gcc-ar',
'ARFLAGS': 'rcs',
'CC': 'x86_64-linux-gnu-gcc -pthread',
'CCSHARED': '-fPIC',
'CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -g -fwrapv -O2 -Wall -g -fstack-protector-strong -Wformat -Werror=format-security',
'CXX': 'x86_64-linux-gnu-g++ -pthread',
'EXT_SUFFIX': '.so',
'LDSHARED': 'x86_64-linux-gnu-gcc -pthread -shared -Wl,-O1 -Wl,-Bsymbolic-functions -Wl,-Bsymbolic-functions -Wl,-z,relro -g -fwrapv -O2',
'SHLIB_SUFFIX': '.so',
}
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@_sysconfigdata_arcadia.py@.PATH_END.py
|
{
"filename": "_dtick.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/histogram/marker/colorbar/_dtick.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class DtickValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(
self, plotly_name="dtick", parent_name="histogram.marker.colorbar", **kwargs
):
super(DtickValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
implied_edits=kwargs.pop("implied_edits", {"tickmode": "linear"}),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@histogram@marker@colorbar@_dtick.py@.PATH_END.py
|
{
"filename": "test_voevent_generator.py",
"repo_name": "loostrum/darc",
"repo_path": "darc_extracted/darc-master/test/test_voevent_generator.py",
"type": "Python"
}
|
#!/usr/bin/env python3
import os
import unittest
from time import sleep
import multiprocessing as mp
from darc import VOEventGenerator, VOEventQueueServer
class TestVOEventGenerator(unittest.TestCase):
def test_generate_VOEvent(self):
"""
Test that the VOEvent generator converts a trigger into a VOEvent
"""
# init VOEvent Generator
control_queue = mp.Queue()
generator = VOEventGenerator(control_queue)
# overwrite server location
generator.server_host = 'localhost'
# events are only generated if send = True
generator.send_events = True
# start the generator
generator.start()
sleep(1)
# create two triggers, generator should pick highest S/N
# define utc of trigger with highest S/N, which is also in the VOEvent filename
trigger_utc = '2019-01-02T18:00:00.0'
triggers = [{'dm': 56.791, 'dm_err': .2, 'width': 2.5, 'snr': 10, 'flux': 0.5,
'ra': 83.63322083333333, 'dec': 22.01446111111111, 'ymw16': 0,
'semiMaj': 15., 'semiMin': 15., 'name': 'B0531+21', 'cb': 0,
'importance': 0.1, 'utc': '2019-01-01T18:00:00.0', 'test': True},
{'dm': 56.791, 'dm_err': .2, 'width': 2.5, 'snr': 50, 'flux': 0.5,
'ra': 83.63322083333333, 'dec': 22.01446111111111, 'ymw16': 0,
'semiMaj': 15., 'semiMin': 15., 'name': 'B0531+21', 'cb': 17,
'importance': 0.1, 'utc': trigger_utc, 'test': True}]
# get the queue
VOEventQueueServer.register('get_queue')
queue_server = VOEventQueueServer(address=(generator.server_host, generator.server_port),
authkey=generator.server_auth.encode())
queue_server.connect()
queue = queue_server.get_queue()
# send the triggers
for trigger in triggers:
queue.put(trigger)
# wait and stop
sleep(5)
control_queue.put('stop')
generator.join()
# check the output file
filename = os.path.join(generator.voevent_dir, "{}.xml".format(trigger_utc))
self.assertTrue(os.path.isfile(filename))
# remove output file
os.remove(filename)
if __name__ == '__main__':
unittest.main()
|
loostrumREPO_NAMEdarcPATH_START.@darc_extracted@darc-master@test@test_voevent_generator.py@.PATH_END.py
|
{
"filename": "_familysrc.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/table/header/font/_familysrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FamilysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="familysrc", parent_name="table.header.font", **kwargs
):
super(FamilysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@table@header@font@_familysrc.py@.PATH_END.py
|
{
"filename": "hpc++.py",
"repo_name": "duvall3/rat-pac",
"repo_path": "rat-pac_extracted/rat-pac-master/python/SCons/Tool/hpc++.py",
"type": "Python"
}
|
"""SCons.Tool.hpc++
Tool-specific initialization for c++ on HP/UX.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/hpc++.py 4043 2009/02/23 09:06:45 scons"
import os.path
import string
import SCons.Util
cplusplus = __import__('c++', globals(), locals(), [])
acc = None
# search for the acc compiler and linker front end
try:
dirs = os.listdir('/opt')
except (IOError, OSError):
# Not being able to read the directory because it doesn't exist
# (IOError) or isn't readable (OSError) is okay.
dirs = []
for dir in dirs:
cc = '/opt/' + dir + '/bin/aCC'
if os.path.exists(cc):
acc = cc
break
def generate(env):
"""Add Builders and construction variables for g++ to an Environment."""
cplusplus.generate(env)
if acc:
env['CXX'] = acc or 'aCC'
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS +Z')
# determine version of aCC
line = os.popen(acc + ' -V 2>&1').readline().rstrip()
if string.find(line, 'aCC: HP ANSI C++') == 0:
env['CXXVERSION'] = string.split(line)[-1]
if env['PLATFORM'] == 'cygwin':
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS')
else:
env['SHCXXFLAGS'] = SCons.Util.CLVar('$CXXFLAGS +Z')
def exists(env):
return acc
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
duvall3REPO_NAMErat-pacPATH_START.@rat-pac_extracted@rat-pac-master@python@SCons@Tool@hpc++.py@.PATH_END.py
|
{
"filename": "DivuDilatation.py",
"repo_name": "mmicromegas/ransX",
"repo_path": "ransX_extracted/ransX-master/EQUATIONS/DivuDilatation.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
from UTILS.Calculus import Calculus
from UTILS.SetAxisLimit import SetAxisLimit
from UTILS.Tools import Tools
from UTILS.Errors import Errors
import sys
from mpl_toolkits.axes_grid1 import make_axes_locatable
import matplotlib.cm as cm
# Theoretical background https://arxiv.org/abs/1401.5176
# Mocak, Meakin, Viallet, Arnett, 2014, Compressible Hydrodynamic Mean-Field #
# Equations in Spherical Geometry and their Application to Turbulent Stellar #
# Convection Data #
class DivuDilatation(Calculus, SetAxisLimit, Tools, Errors, object):
def __init__(self, filename, ig, fext, ieos, intc, data_prefix, bconv, tconv):
super(DivuDilatation, self).__init__(ig)
# load data to structured array
eht = self.customLoad(filename)
# load grid
xzn0 = self.getRAdata(eht, 'xzn0')
nx = self.getRAdata(eht, 'nx')
ny = self.getRAdata(eht, 'ny')
nz = self.getRAdata(eht, 'nz')
# pick equation-specific Reynolds-averaged mean fields according to:
# https://github.com/mmicromegas/ransX/blob/master/DOCS/ransXimplementationGuide.pdf
dd = self.getRAdata(eht, 'dd')[intc]
ux = self.getRAdata(eht, 'ux')[intc]
uy = self.getRAdata(eht, 'uy')[intc]
uz = self.getRAdata(eht, 'uz')[intc]
ddgg = -self.getRAdata(eht, 'ddgg')[intc]
gamma1 = self.getRAdata(eht, 'gamma1')[intc]
ddux = self.getRAdata(eht, 'ddux')[intc]
dduy = self.getRAdata(eht, 'dduy')[intc]
dduz = self.getRAdata(eht, 'dduz')[intc]
uxux = self.getRAdata(eht, 'uxux')[intc]
uxuy = self.getRAdata(eht, 'uxuy')[intc]
uxuz = self.getRAdata(eht, 'uxuz')[intc]
dduxux = self.getRAdata(eht, 'dduxux')[intc]
dduxuy = self.getRAdata(eht, 'dduxuy')[intc]
dduxuz = self.getRAdata(eht, 'dduxuz')[intc]
divu = self.getRAdata(eht, 'divu')[intc]
dddivu = self.getRAdata(eht, 'dddivu')[intc]
uxdivu = self.getRAdata(eht, 'uxdivu')[intc]
uydivu = self.getRAdata(eht, 'uydivu')[intc]
uzdivu = self.getRAdata(eht, 'uzdivu')[intc]
uxdivux = self.getRAdata(eht, 'uxdivux')[intc]
uydivux = self.getRAdata(eht, 'uydivux')[intc]
uzdivux = self.getRAdata(eht, 'uzdivux')[intc]
uxdivuy = self.getRAdata(eht, 'uxdivuy')[intc]
uydivuy = self.getRAdata(eht, 'uydivuy')[intc]
uzdivuy = self.getRAdata(eht, 'uzdivuy')[intc]
uxdivuz = self.getRAdata(eht, 'uxdivuz')[intc]
uydivuz = self.getRAdata(eht, 'uydivuz')[intc]
uzdivuz = self.getRAdata(eht, 'uzdivuz')[intc]
divux = self.getRAdata(eht, 'divux')[intc]
divuy = self.getRAdata(eht, 'divuy')[intc]
divuz = self.getRAdata(eht, 'divuz')[intc]
dduxdivu = self.getRAdata(eht, 'dduxdivu')[intc]
dduydivu = self.getRAdata(eht, 'dduydivu')[intc]
dduzdivu = self.getRAdata(eht, 'dduzdivu')[intc]
dduxdivux = self.getRAdata(eht, 'dduxdivux')[intc]
dduydivux = self.getRAdata(eht, 'dduydivux')[intc]
dduzdivux = self.getRAdata(eht, 'dduzdivux')[intc]
dduxdivuy = self.getRAdata(eht, 'dduxdivuy')[intc]
dduydivuy = self.getRAdata(eht, 'dduydivuy')[intc]
dduzdivuy = self.getRAdata(eht, 'dduzdivuy')[intc]
dduxdivuz = self.getRAdata(eht, 'dduxdivuz')[intc]
dduydivuz = self.getRAdata(eht, 'dduydivuz')[intc]
dduzdivuz = self.getRAdata(eht, 'dduzdivuz')[intc]
dddivux = self.getRAdata(eht, 'dddivux')[intc]
dddivuy = self.getRAdata(eht, 'dddivuy')[intc]
dddivuz = self.getRAdata(eht, 'dddivuz')[intc]
dduxuxx = self.getRAdata(eht, 'dduxuxx')[intc]
dduyuxx = self.getRAdata(eht, 'dduyuxx')[intc]
dduzuxx = self.getRAdata(eht, 'dduzuxx')[intc]
dduxuyy = self.getRAdata(eht, 'dduxuyy')[intc]
dduyuyy = self.getRAdata(eht, 'dduyuyy')[intc]
dduzuyy = self.getRAdata(eht, 'dduzuyy')[intc]
dduxuzz = self.getRAdata(eht, 'dduxuzz')[intc]
dduyuzz = self.getRAdata(eht, 'dduyuzz')[intc]
dduzuzz = self.getRAdata(eht, 'dduzuzz')[intc]
# dduxx = self.getRAdata(eht,'dduxx')[intc]
# dduyy = self.getRAdata(eht,'dduyy')[intc]
# dduzz = self.getRAdata(eht,'dduzz')[intc]
uxuxx = self.getRAdata(eht, 'uxuxx')[intc]
uyuxx = self.getRAdata(eht, 'uyuxx')[intc]
uzuxx = self.getRAdata(eht, 'uzuxx')[intc]
uxuyy = self.getRAdata(eht, 'uxuyy')[intc]
uyuyy = self.getRAdata(eht, 'uyuyy')[intc]
uzuyy = self.getRAdata(eht, 'uzuyy')[intc]
uxuzz = self.getRAdata(eht, 'uxuzz')[intc]
uyuzz = self.getRAdata(eht, 'uyuzz')[intc]
uzuzz = self.getRAdata(eht, 'uzuzz')[intc]
uxx = self.getRAdata(eht, 'uxx')[intc]
uyy = self.getRAdata(eht, 'uyy')[intc]
uzz = self.getRAdata(eht, 'uzz')[intc]
pp = self.getRAdata(eht, 'pp')[intc]
divu = self.getRAdata(eht, 'divu')[intc]
dddivu = self.getRAdata(eht, 'dddivu')[intc]
ppdivu = self.getRAdata(eht, 'ppdivu')[intc]
ppux = self.getRAdata(eht, 'ppux')[intc]
uxdivu = self.getRAdata(eht, 'uxdivu')[intc]
uxppdivu = self.getRAdata(eht, 'uxppdivu')[intc]
ppuy = self.getRAdata(eht, 'ppuy')[intc]
uydivu = self.getRAdata(eht, 'uydivu')[intc]
uyppdivu = self.getRAdata(eht, 'uyppdivu')[intc]
ppuz = self.getRAdata(eht, 'ppuz')[intc]
uzdivu = self.getRAdata(eht, 'uzdivu')[intc]
uzppdivu = self.getRAdata(eht, 'uzppdivu')[intc]
# override gamma for ideal gas eos (need to be fixed in PROMPI later)
if ieos == 1:
cp = self.getRAdata(eht, 'cp')[intc]
cv = self.getRAdata(eht, 'cv')[intc]
gamma1 = cp / cv # gamma1,gamma2,gamma3 = gamma = cp/cv Cox & Giuli 2nd Ed. page 230, Eq.9.110
# construct equation-specific mean fields
fht_ux = ddux / dd
fht_uy = dduy / dd
fht_uz = dduz / dd
eht_uxff = ux - fht_ux
###########################################
# FULL TURBULENCE VELOCITY FIELD HYPOTHESIS
###########################################
if (True):
self.rxx = uxux - ux * ux
self.rxy = uxuy - ux * uy
self.rxz = uxuz - ux * uz
if (False):
self.rxx = dduxux / dd - ddux * ddux / (dd * dd)
self.ryx = dduxuy / dd - ddux * dduy / (dd * dd)
self.rzx = dduxuz / dd - ddux * dduz / (dd * dd)
self.eht_uxf_divuf = uxdivu - ux * divu
self.eht_uyf_divuf = uydivu - uy * divu
self.eht_uzf_divuf = uzdivu - uz * divu
self.eht_uxf_divuxf = uxdivux - ux * divux
self.eht_uxf_divuyf = uxdivuy - ux * divuy
self.eht_uxf_divuzf = uxdivuz - ux * divuz
self.eht_uyf_divuxf = uydivux - uy * divux
self.eht_uyf_divuyf = uydivuy - uy * divuy
self.eht_uyf_divuzf = uydivuz - uy * divuz
self.eht_uzf_divuxf = uzdivux - uz * divux
self.eht_uzf_divuyf = uzdivuy - uz * divuy
self.eht_uzf_divuzf = uzdivuz - uz * divuz
self.eht_uxff_divuff = dduxdivu / dd - ddux * dddivu / (dd * dd)
self.eht_uyff_divuff = dduydivu / dd - dduy * dddivu / (dd * dd)
self.eht_uzff_divuff = dduzdivu / dd - dduz * dddivu / (dd * dd)
self.eht_uxff_divuxff = dduxdivux / dd - ddux * dddivux / (dd * dd)
self.eht_uxff_divuyff = dduxdivuy / dd - ddux * dddivuy / (dd * dd)
self.eht_uxff_divuzff = dduxdivuz / dd - ddux * dddivuz / (dd * dd)
self.eht_uyff_divuxff = dduydivux / dd - dduy * dddivux / (dd * dd)
self.eht_uyff_divuyff = dduydivuy / dd - dduy * dddivuy / (dd * dd)
self.eht_uyff_divuzff = dduydivuz / dd - dduy * dddivuz / (dd * dd)
self.eht_uzff_divuxff = dduzdivux / dd - dduz * dddivux / (dd * dd)
self.eht_uzff_divuyff = dduzdivuy / dd - dduz * dddivuy / (dd * dd)
self.eht_uzff_divuzff = dduzdivuz / dd - dduz * dddivuz / (dd * dd)
self.eht_uxf_uxxf = uxuxx - ux * uxx
self.eht_uxf_uyyf = uxuyy - ux * uyy
self.eht_uxf_uzzf = uxuzz - ux * uzz
self.eht_uyf_uxxf = uyuxx - uy * uxx
self.eht_uyf_uyyf = uyuyy - uy * uyy
self.eht_uyf_uzzf = uyuzz - uy * uzz
self.eht_uzf_uxxf = uzuxx - uz * uxx
self.eht_uzf_uyyf = uzuyy - uz * uyy
self.eht_uzf_uzzf = uzuzz - uz * uzz
self.eht_uxff_divuff = dduxdivu / dd - ddux * dddivu / (dd * dd)
self.eht_uyff_divuff = dduydivu / dd - dduy * dddivu / (dd * dd)
self.eht_uzff_divuff = dduzdivu / dd - dduz * dddivu / (dd * dd)
# self.eht_uxff_uxxff = dduxuxx/dd - ddux*dduxx/(dd*dd)
# self.eht_uxff_uyyff = dduxuyy/dd - ddux*dduyy/(dd*dd)
# self.eht_uxff_uzzff = dduxuzz/dd - ddux*dduzz/(dd*dd)
# self.eht_uyff_uxxff = dduyuxx/dd - dduy*dduxx/(dd*dd)
# self.eht_uyff_uyyff = dduyuyy/dd - dduy*dduyy/(dd*dd)
# self.eht_uyff_uzzff = dduyuzz/dd - dduy*dduzz/(dd*dd)
# self.eht_uzff_uxxff = dduzuxx/dd - dduz*dduxx/(dd*dd)
# self.eht_uzff_uyyff = dduzuyy/dd - dduz*dduyy/(dd*dd)
# self.eht_uzff_uzzff = dduzuzz/dd - dduz*dduzz/(dd*dd)
###############################################
# END FULL TURBULENCE VELOCITY FIELD HYPOTHESIS
###############################################
self.eht_uxfppdivu = uxppdivu - ux * ppdivu
self.ppfuxf_fht_divu = (ppux - pp * ux) * (dddivu / dd)
self.pp_eht_uxf_divuff = pp * (uxdivu - ux * divu)
self.eht_ppf_uxf_divuff = ppux * divu - ppux * (dddivu / dd) - pp * ux * divu + pp * ux * (dddivu / dd)
self.eht_uyfppdivu = uyppdivu - uy * ppdivu
self.ppfuyf_fht_divu = (ppuy - pp * uy) * (dddivu / dd)
self.pp_eht_uyf_divuff = pp * (uydivu - uy * divu)
self.eht_ppf_uyf_divuff = ppuy * divu - ppuy * (dddivu / dd) - pp * uy * divu + pp * uy * (dddivu / dd)
self.eht_uzfppdivu = uzppdivu - uz * ppdivu
self.ppfuzf_fht_divu = (ppuz - pp * uz) * (dddivu / dd)
self.pp_eht_uzf_divuff = pp * (uzdivu - uz * divu)
self.eht_ppf_uzf_divuff = ppuz * divu - ppuz * (dddivu / dd) - pp * uz * divu + pp * uz * (dddivu / dd)
self.eht_divu1 = divu
self.eht_divu2 = divux + divuy + divuz
#print(divux)
#print('************')
#print(divuy)
#print('************')
#print(divuz)
#print('************')
self.fht_divu = dddivu/dd
self.eht_divuff = self.Div(eht_uxff,xzn0)
eht_ux = ux
fht_ux = ddux/dd
self.favrian_d = self.Div(fht_ux, xzn0)
self.reynolds_d = self.Div(eht_ux, xzn0)
# for space-time diagrams
t_divu = self.getRAdata(eht, 'divu')
t_timec = self.getRAdata(eht, 'timec')
if self.ig == 1:
self.t_divu = t_divu
elif self.ig == 2:
dx = (xzn0[-1]-xzn0[0])/nx
dumx = xzn0[0]+np.arange(1,nx,1)*dx
t_divu2 = []
# interpolation due to non-equidistant radial grid
for i in range(int(t_divu.shape[0])):
t_divu2.append(np.interp(dumx,xzn0,t_divu[i,:]))
t_divu_forspacetimediagram = np.asarray(t_divu2)
self.t_divu = t_divu_forspacetimediagram # for the space-time diagrams
# assign global data to be shared across whole class
self.data_prefix = data_prefix
self.xzn0 = xzn0
self.dd = dd
self.nx = nx
self.ny = ny
self.nz = nz
self.ig = ig
self.pp = pp
self.ddgg = ddgg
self.gamma1 = gamma1
self.fext = fext
self.bconv = bconv
self.tconv = tconv
self.t_timec = t_timec
def plot_divu(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
"""Plot divu in the model"""
# check supported geometries
if self.ig != 1 and self.ig != 2:
print("ERROR(DivuDilatation.py):" + self.errorGeometry(self.ig))
sys.exit()
# load x GRID
grd1 = self.xzn0
# load DATA to plot
plt1 = self.eht_divu1
plt2 = self.eht_divu2
plt3 = self.favrian_d
plt4 = self.reynolds_d
plt5 = self.fht_divu
plt6 = self.eht_divuff
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [plt1, plt2, plt6]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
if self.ig == 1:
plt.title('divu (cartesian)')
#plt.plot(grd1, plt2, color='g', label=r"$divu2$")
plt.plot(grd1, plt1, marker='o', color='r',markersize=6,markevery=20, label=r"+$\overline{\nabla \cdot {\bf u}}$")
plt.plot(grd1, plt4, color='b', label=r"+$\nabla_x \overline{u}_x$")
plt.plot(grd1, plt3, color='g', label=r"+$\nabla_x \widetilde{u}_x$")
# plt.plot(grd1, plt5, color='m', linestyle='dotted',label=r"+$\overline{\rho \nabla \cdot {\bf u}}/\overline{\rho}$")
plt.plot(grd1, plt6, color='c', label=r"+$\nabla_x \overline{u''}_x$")
elif self.ig == 2:
plt.title('divu (spherical)')
plt.plot(grd1, plt1, color='r', label=r"$divu1$")
plt.plot(grd1, plt2, color='g', label=r"$divu2$")
plt.axhline(y=0., linestyle='--',color='k')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r'x (cm)'
setylabel = r"cm s$^{-2}$"
plt.ylabel(setylabel)
plt.xlabel(setxlabel)
elif self.ig == 2:
setxlabel = r'r (cm)'
setylabel = r"cm s$^{-2}$"
plt.ylabel(setylabel)
plt.xlabel(setxlabel)
# convective boundary markers
plt.axvline(self.bconv, linestyle='-', linewidth=0.7, color='k')
plt.axvline(self.tconv, linestyle='-', linewidth=0.7, color='k')
# show LEGEND
plt.legend(loc=ilg, prop={'size': 14})
# display PLOT
plt.show(block=False)
# check supported file output extension
if self.fext != "png" and self.fext != "eps":
print("ERROR(DivuDilatation.py):" + self.errorOutputFileExtension(self.fext))
sys.exit()
# save PLOT
if self.fext == "png":
plt.savefig('RESULTS/' + self.data_prefix + 'divu.png')
if self.fext == "eps":
plt.savefig('RESULTS/' + self.data_prefix + 'divu.eps')
def plot_divu_space_time(self, LAXIS, bconv, tconv, xbl, xbr, ybu, ybd, ilg):
"""Plot Frho space time diagram"""
if self.ig != 1 and self.ig != 2:
print("ERROR(ContinuityEquationWithMassFlux.py):" + self.errorGeometry(self.ig))
sys.exit()
t_timec = self.t_timec
# load x GRID
nx = self.nx
grd1 = self.xzn0
# load DATA to plot
plt1 = self.t_divu.T
#plt1 = self.t_divu.T
indRES = np.where((grd1 < 9.e8) & (grd1 > 4.e8))[0]
#pltMax = np.max(plt1[indRES])
#pltMin = np.min(plt1[indRES])
pltMax = 5.e-5
pltMin = -5.e-5
# create FIGURE
# plt.figure(figsize=(7, 6))
#print(t_timec[0], t_timec[-1], grd1[0], grd1[-1])
fig, ax = plt.subplots(figsize=(14, 7))
# fig.suptitle("log(X) (" + self.setNucNoUp(str(element))+ ")")
fig.suptitle(r"$\nabla \cdot {\bf u}$ " + str(self.nx) + ' x ' + str(self.ny) + ' x ' + str(self.nz))
im = ax.imshow(plt1, interpolation='bilinear', cmap=cm.jet.reversed(),
origin='lower', extent = [t_timec[0], t_timec[-1], grd1[0], grd1[-1]], aspect='auto',
vmax=pltMax, vmin=pltMin)
#extent = [t_timec[0], t_timec[-1], grd1[0], grd1[-1]]
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im, cax=cax, orientation='vertical')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r'time (s)'
setylabel = r"r ($10^8$ cm)"
ax.set_xlabel(setxlabel)
ax.set_ylabel(setylabel)
elif self.ig == 2:
setxlabel = r'time (s)'
setylabel = r"r ($10^8$ cm)"
ax.set_xlabel(setxlabel)
ax.set_ylabel(setylabel)
# display PLOT
plt.show(block=False)
# save PLOT
if self.fext == "png":
plt.savefig('RESULTS/' + self.data_prefix + 'mean_divu_space_time' +'.png')
if self.fext == "eps":
plt.savefig('RESULTS/' + self.data_prefix + 'mean_divu_space_time' + '.eps')
|
mmicromegasREPO_NAMEransXPATH_START.@ransX_extracted@ransX-master@EQUATIONS@DivuDilatation.py@.PATH_END.py
|
{
"filename": "camera.py",
"repo_name": "mtalapinto/moes",
"repo_path": "feros/optics/camera.py",
"type": "Python"
}
|
from . import trace
from . import spheric_surface
import numpy as np
from . import transform
from . import refraction_index
import copy
import numpy.linalg as la
#import compare_zemax
#import matplotlib.pyplot as plt
from . import cte
def init():
cam_data = [
-299.99, -26.079,
1e10, -55.821,
-558.957, -11.65,
-147.498, -0.995,
-147.491, -38.060,
-2146.267, -19.695,
495.253, -15.06,
999.741, -322.22,
-262.531, -25.98,
1097.82, -160.486
]
return cam_data
def set_data(camdat):
cam_data = [
[1, camdat[0], camdat[1], 'SFPL51-CAM'],
[2, camdat[2], camdat[3], 'Air'],
[3, camdat[4], camdat[5], 'SBAM4'],
[4, camdat[6], camdat[7], 'Air'],
[5, camdat[8], camdat[9], 'SFPL53'],
[6, camdat[10], camdat[11], 'Air'],
[7, camdat[12], camdat[13], 'SBSL7'],
[8, camdat[14], camdat[15], 'Air'],
[9, camdat[16], camdat[17], 'SBAM4'],
[10, camdat[18], camdat[19], 'Air']
]
return cam_data
def load_data():
basedir = 'optics/'
file_cam = open(basedir+'cam_data.dat','r')
camdata = []
for line in file_cam:
camdata.append(float(line))
return camdata
def tracing(H, DC, Tin, l0, t, p):#, cam_data):
H_out = H.copy()
DC_out = DC.copy()
# Lens 1 - entry surface
r_sf0 = -230.664
H_out, n = spheric_surface.dZ(H_out, DC_out, r_sf0)
n0 = np.full(len(DC), 1)
n1 = refraction_index.n(l0, t, p, 'CAF2')
k = n1 / n0
cosi = DC_out[:, 0] * n[:, 0] + DC_out[:, 1] * n[:, 1] + DC_out[:, 2] * n[:, 2]
sini = np.sqrt(1 - cosi ** 2)
sinr = sini / k
cosr = np.sqrt(1 - sinr ** 2)
DC_out[:, 0] = DC_out[:, 0] / k + (cosr - cosi / k) * n[:, 0]
DC_out[:, 1] = DC_out[:, 1] / k + (cosr - cosi / k) * n[:, 1]
DC_out[:, 2] = DC_out[:, 2] / k + (cosr - cosi / k) * n[:, 2]
# sf0 - sf1
z_l1_out = -24.7
H_out = trace.to_next_surface(H_out, DC_out, z_l1_out)
H_out[:, 2] = 0.
# output surface
H_out, n = spheric_surface.dZ(H_out, DC_out, 633.82)
n0 = refraction_index.n(l0, t, p, 'CAF2')
n1 = np.full(len(DC), 1)
k = n1 / n0
cosi = DC_out[:, 0] * n[:, 0] + DC_out[:, 1] * n[:, 1] + DC_out[:, 2] * n[:, 2]
sini = np.sqrt(1 - cosi ** 2)
sinr = sini / k
cosr = np.sqrt(1 - sinr ** 2)
DC_out[:, 0] = DC_out[:, 0] / k + (cosr - cosi / k) * n[:, 0]
DC_out[:, 1] = DC_out[:, 1] / k + (cosr - cosi / k) * n[:, 1]
DC_out[:, 2] = DC_out[:, 2] / k + (cosr - cosi / k) * n[:, 2]
# End lens 1
# Tracing to lens 2
z_l2_out = -1
H_out = trace.to_next_surface(H_out, DC_out, z_l2_out)
H_out[:, 2] = 0.
# Lens 2 - entry surface
r_sf0 = -1766.220
H_out, n = spheric_surface.dZ(H_out, DC_out, r_sf0)
n0 = np.full(len(DC), 1)
n1 = refraction_index.n(l0, t, p, 'PSK3')
k = n1 / n0
cosi = DC_out[:, 0] * n[:, 0] + DC_out[:, 1] * n[:, 1] + DC_out[:, 2] * n[:, 2]
sini = np.sqrt(1 - cosi ** 2)
sinr = sini / k
cosr = np.sqrt(1 - sinr ** 2)
DC_out[:, 0] = DC_out[:, 0] / k + (cosr - cosi / k) * n[:, 0]
DC_out[:, 1] = DC_out[:, 1] / k + (cosr - cosi / k) * n[:, 1]
DC_out[:, 2] = DC_out[:, 2] / k + (cosr - cosi / k) * n[:, 2]
# tracing to lens 2, 2nd surface
z_l3_out = -8.1
H_out = trace.to_next_surface(H_out, DC_out, z_l3_out)
H_out[:, 2] = 0.
# lens 2, 2nd surface
H_out, n = spheric_surface.dZ(H_out, DC_out, -130.030)
n0 = refraction_index.n(l0, t, p, 'PSK3')
n1 = refraction_index.n(l0, t, p, 'FK54')
k = n1 / n0
cosi = DC_out[:, 0] * n[:, 0] + DC_out[:, 1] * n[:, 1] + DC_out[:, 2] * n[:, 2]
sini = np.sqrt(1 - cosi ** 2)
sinr = sini / k
cosr = np.sqrt(1 - sinr ** 2)
DC_out[:, 0] = DC_out[:, 0] / k + (cosr - cosi / k) * n[:, 0]
DC_out[:, 1] = DC_out[:, 1] / k + (cosr - cosi / k) * n[:, 1]
DC_out[:, 2] = DC_out[:, 2] / k + (cosr - cosi / k) * n[:, 2]
# tracing to lens 2, output surface
z_l3_out = -30.070
H_out = trace.to_next_surface(H_out, DC_out, z_l3_out)
H_out[:, 2] = 0.
# lens 2, output surface
H_out, n = spheric_surface.dZ(H_out, DC_out, 1613.65)
n0 = refraction_index.n(l0, t, p, 'FK54')
n1 = np.full(len(DC), 1)
k = n1 / n0
cosi = DC_out[:, 0] * n[:, 0] + DC_out[:, 1] * n[:, 1] + DC_out[:, 2] * n[:, 2]
sini = np.sqrt(1 - cosi ** 2)
sinr = sini / k
cosr = np.sqrt(1 - sinr ** 2)
DC_out[:, 0] = DC_out[:, 0] / k + (cosr - cosi / k) * n[:, 0]
DC_out[:, 1] = DC_out[:, 1] / k + (cosr - cosi / k) * n[:, 1]
DC_out[:, 2] = DC_out[:, 2] / k + (cosr - cosi / k) * n[:, 2]
# End lens 2
# Tracing to lens 3
z_l2_out = -10
H_out = trace.to_next_surface(H_out, DC_out, z_l2_out)
H_out[:, 2] = 0.
# Lens 3 - entry surface
r_sf0 = 319.400
H_out, n = spheric_surface.dZ(H_out, DC_out, r_sf0)
n0 = np.full(len(DC), 1)
n1 = refraction_index.n(l0, t, p, 'BK7')
k = n1 / n0
cosi = DC_out[:, 0] * n[:, 0] + DC_out[:, 1] * n[:, 1] + DC_out[:, 2] * n[:, 2]
sini = np.sqrt(1 - cosi ** 2)
sinr = sini / k
cosr = np.sqrt(1 - sinr ** 2)
DC_out[:, 0] = DC_out[:, 0] / k + (cosr - cosi / k) * n[:, 0]
DC_out[:, 1] = DC_out[:, 1] / k + (cosr - cosi / k) * n[:, 1]
DC_out[:, 2] = DC_out[:, 2] / k + (cosr - cosi / k) * n[:, 2]
# tracing to lens 3, 2nd surface
z_l3_out = -15.030
H_out = trace.to_next_surface(H_out, DC_out, z_l3_out)
H_out[:, 2] = 0.
# lens 3, output surface
H_out, n = spheric_surface.dZ(H_out, DC_out, 803.7)
n0 = refraction_index.n(l0, t, p, 'BK7')
n1 = np.full(len(DC), 1)
k = n1 / n0
cosi = DC_out[:, 0] * n[:, 0] + DC_out[:, 1] * n[:, 1] + DC_out[:, 2] * n[:, 2]
sini = np.sqrt(1 - cosi ** 2)
sinr = sini / k
cosr = np.sqrt(1 - sinr ** 2)
DC_out[:, 0] = DC_out[:, 0] / k + (cosr - cosi / k) * n[:, 0]
DC_out[:, 1] = DC_out[:, 1] / k + (cosr - cosi / k) * n[:, 1]
DC_out[:, 2] = DC_out[:, 2] / k + (cosr - cosi / k) * n[:, 2]
# tracing to lens 4, 1nd surface
z_l3_out = -314.07
H_out = trace.to_next_surface(H_out, DC_out, z_l3_out)
H_out[:, 2] = 0.
# Lens 4, 1st surface
r_sf0 = -197.97
H_out, n = spheric_surface.dZ(H_out, DC_out, r_sf0)
n0 = np.full(len(DC), 1)
n1 = refraction_index.n(l0, t, p, 'FK5')
k = n1 / n0
cosi = DC_out[:, 0] * n[:, 0] + DC_out[:, 1] * n[:, 1] + DC_out[:, 2] * n[:, 2]
sini = np.sqrt(1 - cosi ** 2)
sinr = sini / k
cosr = np.sqrt(1 - sinr ** 2)
DC_out[:, 0] = DC_out[:, 0] / k + (cosr - cosi / k) * n[:, 0]
DC_out[:, 1] = DC_out[:, 1] / k + (cosr - cosi / k) * n[:, 1]
DC_out[:, 2] = DC_out[:, 2] / k + (cosr - cosi / k) * n[:, 2]
# tracing to lens 4, 2nd surface
z_l3_out = -25.030
H_out = trace.to_next_surface(H_out, DC_out, z_l3_out)
H_out[:, 2] = 0.
# lens 4, output surface
H_out, n = spheric_surface.dZ(H_out, DC_out, 1000.54)
n0 = refraction_index.n(l0, t, p, 'FK5')
n1 = np.full(len(DC), 1)
k = n1 / n0
cosi = DC_out[:, 0] * n[:, 0] + DC_out[:, 1] * n[:, 1] + DC_out[:, 2] * n[:, 2]
sini = np.sqrt(1 - cosi ** 2)
sinr = sini / k
cosr = np.sqrt(1 - sinr ** 2)
DC_out[:, 0] = DC_out[:, 0] / k + (cosr - cosi / k) * n[:, 0]
DC_out[:, 1] = DC_out[:, 1] / k + (cosr - cosi / k) * n[:, 1]
DC_out[:, 2] = DC_out[:, 2] / k + (cosr - cosi / k) * n[:, 2]
# tracing to lens 4, 1nd surface
z_l3_out = -132.5
H_out = trace.to_next_surface(H_out, DC_out, z_l3_out)
H_out[:, 2] = 0.
# CCD rotation
T_ccd = np.array([0.*np.pi/180, 0.*np.pi/180, 0.*np.pi/180])
H_out = transform.transform2(H_out, T_ccd)
DC_out = transform.transform2(DC_out, T_ccd)
# Field lens, 1st surface
r_sf0 = 135.01
H_out, n = spheric_surface.dZ(H_out, DC_out, r_sf0)
n0 = np.full(len(DC), 1)
n1 = refraction_index.n(l0, t, p, 'LAK16A')
k = n1 / n0
cosi = DC_out[:, 0] * n[:, 0] + DC_out[:, 1] * n[:, 1] + DC_out[:, 2] * n[:, 2]
sini = np.sqrt(1 - cosi ** 2)
sinr = sini / k
cosr = np.sqrt(1 - sinr ** 2)
DC_out[:, 0] = DC_out[:, 0] / k + (cosr - cosi / k) * n[:, 0]
DC_out[:, 1] = DC_out[:, 1] / k + (cosr - cosi / k) * n[:, 1]
DC_out[:, 2] = DC_out[:, 2] / k + (cosr - cosi / k) * n[:, 2]
# tracing to FF, 2nd surface
z_l3_out = -2.060
H_out = trace.to_next_surface(H_out, DC_out, z_l3_out)
H_out[:, 2] = 0.
# field lens 2nd surface
H_out, n = spheric_surface.dZ(H_out, DC_out, 1e20)
n0 = refraction_index.n(l0, t, p, 'LAK16A')
n1 = refraction_index.n(l0, t, p, 'SILICA')
k = n1 / n0
cosi = DC_out[:, 0] * n[:, 0] + DC_out[:, 1] * n[:, 1] + DC_out[:, 2] * n[:, 2]
sini = np.sqrt(1 - cosi ** 2)
sinr = sini / k
cosr = np.sqrt(1 - sinr ** 2)
DC_out[:, 0] = DC_out[:, 0] / k + (cosr - cosi / k) * n[:, 0]
DC_out[:, 1] = DC_out[:, 1] / k + (cosr - cosi / k) * n[:, 1]
DC_out[:, 2] = DC_out[:, 2] / k + (cosr - cosi / k) * n[:, 2]
# tracing to field lens, next surface
z_l3_out = -10.110
H_out = trace.to_next_surface(H_out, DC_out, z_l3_out)
H_out[:, 2] = 0.
# field lens 3rd surface
H_out, n = spheric_surface.dZ(H_out, DC_out, 1e20)
n0 = refraction_index.n(l0, t, p, 'SILICA')
n1 = refraction_index.n(l0, t, p, 'VACUUM')
k = n1 / n0
cosi = DC_out[:, 0] * n[:, 0] + DC_out[:, 1] * n[:, 1] + DC_out[:, 2] * n[:, 2]
sini = np.sqrt(1 - cosi ** 2)
sinr = sini / k
cosr = np.sqrt(1 - sinr ** 2)
DC_out[:, 0] = DC_out[:, 0] / k + (cosr - cosi / k) * n[:, 0]
DC_out[:, 1] = DC_out[:, 1] / k + (cosr - cosi / k) * n[:, 1]
DC_out[:, 2] = DC_out[:, 2] / k + (cosr - cosi / k) * n[:, 2]
# tracing to field lens, next surface
z_l3_out = -4.7
H_out = trace.to_next_surface(H_out, DC_out, z_l3_out)
H_out[:, 2] = 0.
return H_out, DC_out
|
mtalapintoREPO_NAMEmoesPATH_START.@feros@optics@camera.py@.PATH_END.py
|
{
"filename": "libraries.py",
"repo_name": "AndresdPM/BANNJOS",
"repo_path": "BANNJOS_extracted/BANNJOS-main/libraries.py",
"type": "Python"
}
|
import sys, os
from multiprocessing import Pool, cpu_count
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.keras.layers import Input, Dense, Dropout
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import regularizers
from tensorflow.keras import constraints
from tensorflow.keras.layers.experimental import preprocessing
from sklearn.model_selection import train_test_split, StratifiedKFold, RepeatedStratifiedKFold, RepeatedKFold, KFold
from sklearn.preprocessing import StandardScaler, RobustScaler, MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.neighbors import KNeighborsRegressor
from sklearn.utils import shuffle
import numpy as np
import pandas as pd
import polars as pl
import urllib.request
from math import log10, floor
import time
import datetime as dt
import uncertainties.unumpy as unp
import matplotlib.pyplot as plt
plt.rc('font', family='serif')
plt.rc('text', usetex=True)
def RFE_selector(X, y, params, scoring = 'accuracy', min_features_to_select = 5, n_splits=5, step = 1, output_name = 'Recursive_feature_selection'):
from sklearn.ensemble import RandomForestRegressor
from sklearn.feature_selection import RFECV
forest = RandomForestRegressor(n_estimators=32, n_jobs = -1, verbose = 0)
rfecv = RFECV(
estimator=forest,
step=step,
cv=n_splits,
scoring=scoring,
min_features_to_select=min_features_to_select,
verbose = 1)
rfecv.fit(X, y)
print("Optimal number of features : %d" % rfecv.n_features_)
# Plot number of features VS. cross-validation scores
plt.close('all')
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score (mae)")
plt.plot(range(min_features_to_select, (len(rfecv.cv_results_['mean_test_score']) * step) + min_features_to_select, step),
rfecv.cv_results_['mean_test_score'], zorder = 1
)
plt.fill_between(range(min_features_to_select, (len(rfecv.cv_results_['mean_test_score']) * step) + min_features_to_select, step), rfecv.cv_results_['mean_test_score'] - rfecv.cv_results_['std_test_score'], rfecv.cv_results_['mean_test_score'] + rfecv.cv_results_['std_test_score'], alpha = 0.3, zorder = 0)
plt.savefig(output_name+'.png')
rfe_values = rfecv.get_support()
best_features = X.loc[:, rfe_values].columns.to_list()
pd.Series(name = '#best_features', data=best_features).to_csv(output_name+'_bf.csv', index = False)
pd.DataFrame(rfecv.cv_results_).to_csv(output_name+'_cv.csv', index = False)
pd.DataFrame(columns = X.columns, data = np.expand_dims(rfecv.ranking_, axis=0)).to_csv(output_name+'_ranking.csv', index = False)
return best_features, rfecv.ranking_ , rfecv.cv_results_
def create_dir(path):
"""
This routine creates directories.
"""
if not os.path.isdir(path):
try:
tree = path.split('/')
previous_tree = tree[0]
for leave in tree[1:]:
previous_tree = '%s/%s'%(previous_tree,leave)
try:
os.mkdir(previous_tree)
except:
pass
except OSError:
print ("Creation of the directory %s failed" % path)
else:
print ("Successfully created the directory %s " % path)
def dict_merge(dct, merge_dct):
"""
Recursive dict merge
"""
from collections.abc import Mapping
for k, v in merge_dct.items():
if (k in dct and isinstance(dct[k], dict)
and isinstance(merge_dct[k], Mapping)):
dict_merge(dct[k], merge_dct[k])
else:
dct[k] = merge_dct[k]
def NLL(y_true, y_pred):
return -y_pred.log_prob(y_true)
def normal_sp(params):
shape = params.shape[1]
return tfp.distributions.Normal(loc=params[:, 0:int(shape/2)], scale=1e-4 + tf.math.softplus(0.05 * params[:,int(shape/2):int(shape)])) # both parameters are learnable
def normal_exp(params):
shape = params.shape[1]
return tfp.distributions.Normal(loc=params[:,0:int(shape/2)], scale=tf.math.exp(params[:, int(shape/2):int(shape)])) # both parameters are learnable
def random_forest(n_inputs, n_outputs, n_instances, params):
model = RandomForestRegressor(n_estimators=params['n_estimators'], n_jobs = -1, verbose = params['verbose'])
return model
def k_neighbors(n_inputs, n_outputs, n_instances, params):
model = KNeighborsRegressor(n_neighbors=params['n_neighbors'], weights=params['weights'], p=params['power_minkowski'], n_jobs = 1)
return model
def gaussian_posterior(n_inputs, n_outputs, n_instances, params):
learning_rate = float(params['initial_learning_rate'])
inputs = Input(shape=(n_inputs,), name = 'input')
for layer in range(params['hidden_layers']):
if layer == 0:
previous = inputs
else:
previous = hidden
hidden = tf.keras.layers.Dense(params['hidden_neurons_%i'%(layer+1)], activation="relu", name = 'hidden_%i'%layer)(previous)
params_mc = Dense(n_outputs * 2)(hidden)
dist = tfp.layers.DistributionLambda(normal_sp)(params_mc)
model_nobay = Model(inputs=inputs, outputs=dist, name='Gauss')
model_nobay.compile(Adam(learning_rate=learning_rate), loss=params['loss'])
return model_nobay
def deterministic(n_inputs, n_outputs, n_instances, params):
learning_rate = float(params['initial_learning_rate'])
inputs = Input(shape=(n_inputs,), name = 'input')
for layer in range(params['hidden_layers']):
if layer == 0:
previous = inputs
else:
previous = hidden
hidden = Dense(params['hidden_neurons_%i'%(layer+1)], activation="relu", name = 'hidden_%i'%layer)(previous)
output = Dense(n_outputs)(hidden)
model = Model(inputs=inputs, outputs=output, name='Deterministic')
model.compile(Adam(learning_rate=learning_rate), loss=params['loss'])
return model
def variational_inference(n_inputs, n_outputs, n_instances, params):
learning_rate = float(params['initial_learning_rate'])
kernel_divergence_fn=lambda q, p, _: tfp.distributions.kl_divergence(q, p) / (n_instances * 1.0)
bias_divergence_fn=lambda q, p, _: tfp.distributions.kl_divergence(q, p) / (n_instances * 1.0)
inputs = Input(shape=(n_inputs,), name = 'input')
for layer in range(params['hidden_layers']):
if layer == 0:
previous = inputs
else:
previous = hidden
hidden = tfp.layers.DenseFlipout(params['hidden_neurons_%i'%(layer+1)],
bias_posterior_fn=tfp.layers.util.default_mean_field_normal_fn(),
bias_prior_fn=tfp.layers.default_multivariate_normal_fn,
kernel_divergence_fn=kernel_divergence_fn,
bias_divergence_fn=bias_divergence_fn,
activation="relu", name = 'hidden_%i'%layer)(previous)
params_mc = tfp.layers.DenseFlipout(n_outputs * 2,
bias_posterior_fn=tfp.layers.util.default_mean_field_normal_fn(),
bias_prior_fn=tfp.layers.default_multivariate_normal_fn,
kernel_divergence_fn=kernel_divergence_fn,
bias_divergence_fn=bias_divergence_fn)(hidden)
dist = tfp.layers.DistributionLambda(normal_sp)(params_mc)
model_vi = Model(inputs=inputs, outputs=dist, name='Variational_Inference')
model_vi.compile(Adam(learning_rate=learning_rate), loss=params['loss'])
return model_vi
def dropout(n_inputs, n_outputs, n_instances, params):
learning_rate = float(params['initial_learning_rate'])
inputs = Input(shape=(n_inputs,))
if params['input_dropout'] > 0:
previous = Dropout(params['input_dropout'])(inputs, training=True)
else:
previous = inputs
for layer, (hidden_neurons, hidden_dropout) in enumerate(zip(params['hidden_layers'], params['hidden_dropout'])):
previous = Dense(hidden_neurons, activation="relu")(previous)
previous = Dropout(hidden_dropout)(previous, training=True)
params_mc = Dense(n_outputs * 2)(previous)
dist_mc = tfp.layers.DistributionLambda(normal_sp, name='normal_sp')(params_mc)
model_mc = Model(inputs=inputs, outputs=dist_mc, name='Dropout')
model_mc.compile(Adam(learning_rate=learning_rate), loss=params['loss'])
return model_mc
def run_experiment_cv(params, X, y, X_error= None, y_error = None, n_splits=10, n_repeats=10, stratify = None, n_chunks = 1, pool = None):
def exp_decay(epoch):
initial_lrate = 0.1
k = 0.1
lrate = initial_lrate * np.exp(-k*t)
sys.stdout.write("\rEpoch {0}, using learning rate of {1}%".format(epoch, lrate))
sys.stdout.flush()
return lrate
def step_decay(epoch, lr):
if ((epoch+1) % params['step_decay_learning_rate'] == 0):
lr = round(lr * 0.5, -int(floor(log10(abs(lr * 0.5)))) + 1)
print('LR =', max(params['final_learning_rate'], lr))
return max(params['final_learning_rate'], lr)
def calcProcessTime(starttime, cur_iter, max_iter):
telapsed = time.time() - starttime
testimated = (telapsed/cur_iter)*(max_iter)
finishtime = starttime + testimated
finishtime = dt.datetime.fromtimestamp(finishtime).strftime("%H:%M:%S") # in time
lefttime = testimated-telapsed # in seconds
return (int(telapsed)/60., int(lefttime)/60., finishtime)
features_names, targets_names = X.columns.to_list(), y.columns.to_list()
n_inputs, n_outputs = X.shape[1], y.shape[1]
if 'train' in params['mode']:
epochs = int(params['epochs'])
batch_size = int(params['batch_size'])
lrs = tf.keras.callbacks.LearningRateScheduler(step_decay)
es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta = params['delta_early_stop_patience'], patience=params['early_stop_patience'], restore_best_weights=True)
if ((params['step_decay_learning_rate'] > 0) & (params['step_decay_learning_rate'] < epochs)):
callbacks = [es, lrs]
else:
callbacks = [es]
model = globals()[params['model']]
if stratify is not None:
cv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats)
y_cv = stratify
else:
cv = RepeatedKFold(n_splits=n_splits, n_repeats=n_repeats)
y_cv = y
losses = []
val_losses = []
results = []
valid_indices = []
for ii, (train_ind, valid_ind) in enumerate(cv.split(X, y_cv)):
print('\nCross validation', ii)
tf.keras.backend.clear_session()
X_train, X_valid = X.iloc[train_ind,:], X.iloc[valid_ind,:]
y_train, y_valid = y.iloc[train_ind,:], y.iloc[valid_ind,:]
try:
X_error_train, X_error_valid = X_error.iloc[train_ind,:], X_error.iloc[valid_ind,:]
y_error_train, y_error_valid = y_error.iloc[train_ind,:], y_error.iloc[valid_ind,:]
except:
X_error_valid = None
y_error_valid = None
# We save the indices
valid_indices.append(valid_ind)
n_instances = X_train.shape[0]
built_model = model(n_inputs, n_outputs, n_instances, params)
if ii == 0:
print(built_model.summary())
print(callbacks)
history = built_model.fit(X_train.values, y_train.values, epochs=epochs, batch_size=batch_size, verbose=params['verbose'], callbacks=callbacks, validation_split=0.3)
losses.append(pd.DataFrame(data={'loss_%i'%ii:history.history['loss'], 'val_loss_%i'%ii:history.history['val_loss']}))
results.append(predict_validate(params, X_valid, y_valid, X_valid_error = X_error_valid, y_valid_error = y_error_valid, built_model = built_model, idxs_pred = None, n_chunks = n_chunks, pool = pool)[0])
# Take the average probabilty on 5 folds
results = pd.concat(results)
losses = pd.concat(losses, axis = 1)
valid_indices = [item for sublist in valid_indices for item in sublist]
tf.keras.backend.clear_session()
del [built_model]
return valid_indices, results, losses
def get_statistics(preds, targets_names, full_cov, pool = None, index = None):
from astropy.stats import sigma_clip, sigma_clipped_stats, mad_std
from sklearn.mixture import GaussianMixture
# The format of preds is (n_iterations, n_objects, n_features)
clipped_pred = np.ma.filled(sigma_clip(preds, axis = 0, sigma = 6, stdfunc='mad_std', maxiters=10), np.nan)
# Standard statistics
pc = np.nanpercentile(clipped_pred, [2.275, 15.865, 50.0, 84.135, 97.725], axis=0)
std = mad_std(clipped_pred, axis = 0, ignore_nan=True)
mean = np.nanmean(clipped_pred, axis = 0)
# The correlations have to be obtained in a loop
triu_indices = np.triu_indices(len(targets_names), k = 1)
correlation_names = [targets_names[ii]+'_'+targets_names[jj]+'_corr' for (ii, jj) in zip(triu_indices[0], triu_indices[1])]
correlation_tf = tfp.stats.correlation(preds, sample_axis=0, event_axis=-1, keepdims=False, name=None).numpy()
correlation = np.ones((preds.shape[1], len(correlation_names)))*-99
for ii, corr in enumerate(correlation_tf):
cli_progress_test(ii+1, preds.shape[1])
correlation[ii, :] = corr[triu_indices]
del [correlation_tf]
results = pd.DataFrame(columns=[target+'_mean' for target in targets_names]+[target+'_std' for target in targets_names]+correlation_names+[target+'_pc02' for target in targets_names]+[target+'_pc16' for target in targets_names]+[target+'_pc50' for target in targets_names]+[target+'_pc84' for target in targets_names]+[target+'_pc98' for target in targets_names], data = np.hstack([mean, std, correlation, pc[0], pc[1], pc[2], pc[3], pc[4]]), index = index)
if full_cov:
print('')
print('Fitting the GMM...')
# Number of components for the GMM
gmm_n_comp = len(targets_names)
triu_indices = np.triu_indices(preds.shape[2]-1, k = 0)
rot_covariances_names = ['comp%i_cov_%i%i'%(comp+1, ii+1, jj+1) for comp in range(gmm_n_comp) for (ii, jj) in zip(triu_indices[0], triu_indices[1])]
rot_means_names = ['comp%i_mean_%i'%(comp+1, ii+1) for comp in range(gmm_n_comp) for ii in range(preds.shape[2]-1)]
rot_weights_names = ['comp%i_weight'%(comp+1) for comp in range(gmm_n_comp)]
rot_covariances = np.ones((preds.shape[1], len(rot_covariances_names)))*-99
rot_means = np.ones((preds.shape[1], len(rot_means_names)))*-99
rot_weights = np.ones((preds.shape[1], len(rot_weights_names)))*-99
# We perform the entire GMM compression
mat = np.array([[(np.sqrt(3)+3)/6, -np.sqrt((2-np.sqrt(3))/6), -1/np.sqrt(3)],
[-np.sqrt((2-np.sqrt(3))/6), (np.sqrt(3)+3)/6, -1/np.sqrt(3)],
[1/np.sqrt(3), 1/np.sqrt(3), 1/np.sqrt(3)]])
rot_preds = np.dot(preds, mat.T)
gmm = GaussianMixture(n_components=gmm_n_comp, init_params = 'k-means++')
# Parallelize GMM
args_gmm = []
for pred_chunk, index_chunk in zip(np.array_split(rot_preds.swapaxes(0,1), pool._processes), np.array_split(index, pool._processes)):
args_gmm.append((gmm, pred_chunk, index_chunk))
results = results.join(pd.concat(pool.map(launch_gmm_predictions, args_gmm)))
return results
def launch_gmm_predictions(args):
"""
This routine pipes into multiple threads.
"""
return gmm_prediction(*args)
def gmm_prediction(gmm, data, index):
"""
This routine fits a multi gaussian mixture model into data and returns the parameters of such fit
"""
gmm_n_comp = gmm.n_components
triu_indices = np.triu_indices(data.shape[2]-1, k = 0)
covariances_names = ['comp%i_cov_%i%i'%(comp+1, ii+1, jj+1) for comp in range(gmm_n_comp) for (ii, jj) in zip(triu_indices[0], triu_indices[1])]
means_names = ['comp%i_mean_%i'%(comp+1, ii+1) for comp in range(gmm_n_comp) for ii in range(data.shape[2]-1)]
weights_names = ['comp%i_weight'%(comp+1) for comp in range(gmm_n_comp)]
covariances = np.ones((data.shape[0], len(covariances_names)))*-99
means = np.ones((data.shape[0], len(means_names)))*-99
weights = np.ones((data.shape[0], len(weights_names)))*-99
for ii, obj in enumerate(data):
cli_progress_test(ii+1, data.shape[0])
gmm.fit(obj[:,0:2])
covariances[ii,:] = np.concatenate([cov[triu_indices] for cov in gmm.covariances_])
means[ii,:] = gmm.means_.flatten()
weights[ii,:] = gmm.weights_.flatten()
results = pd.DataFrame(columns=covariances_names+means_names+weights_names, data = np.hstack([covariances, means, weights]), index = index)
return results
def cli_progress_test(current, end_val, bar_length=50):
"""
Just a progress bar
"""
percent = float(current) / end_val
hashes = '#' * int(round(percent * bar_length))
spaces = ' ' * (bar_length - len(hashes))
sys.stdout.write("\rProcessing: [{0}] {1}%".format(hashes + spaces, int(round(percent * 100))))
sys.stdout.flush()
def build_model(params, X_train = None, y_train = None, weights_file = None):
def exp_decay(epoch):
initial_lrate = 0.1
k = 0.1
lrate = initial_lrate * np.exp(-k*t)
sys.stdout.write("\rEpoch {0}, using learning rate of {1}%".format(epoch, lrate))
sys.stdout.flush()
return lrate
def step_decay(epoch, lr):
if ((epoch+1) % params['step_decay_learning_rate'] == 0):
lr = round(lr * 0.5, -int(floor(log10(abs(lr * 0.5)))) + 1)
print('LR =', max(params['final_learning_rate'], lr))
return max(params['final_learning_rate'], lr)
def variable_step_decay(epoch, lr):
if ((epoch+1) % params['step_decay_learning_rate'] == 0):
lr = round(lr * 0.5, -int(floor(log10(abs(lr * 0.5)))) + 1)
params['step_decay_learning_rate'] *= 2 # This line can be removed if it fails.
print('LR =', max(params['final_learning_rate'], lr))
return max(params['final_learning_rate'], lr)
print('Running with params:', params)
model = globals()[params['model']]
if 'train' in params['mode']:
tf.keras.backend.clear_session()
n_inputs, n_instances, n_outputs = X_train.shape[1], X_train.shape[0], y_train.shape[1]
built_model = model(n_inputs, n_outputs, n_instances, params)
epochs = int(params['epochs'])
batch_size = int(params['batch_size'])
print(built_model.summary())
print('delta_early_stop_patience = %s'%params['delta_early_stop_patience'])
lrs = tf.keras.callbacks.LearningRateScheduler(step_decay)
es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta = params['delta_early_stop_patience'], patience=params['early_stop_patience'], restore_best_weights=True)
if params['step_decay_learning_rate']:
callbacks = [es, lrs]
else:
callbacks = [es]
history = built_model.fit(X_train.values, y_train, epochs=epochs, batch_size=batch_size, verbose=params['verbose'], callbacks=callbacks, validation_split=0.3, use_multiprocessing = True, workers = 16)
losses = pd.DataFrame(data={'loss':history.history['loss'], 'val_loss':history.history['val_loss']})
if 'predict' in params['mode']:
built_model = model(params['pre_trained_n_inputs'], params['pre_trained_n_outputs'], params['pre_trained_n_instances'], params)
print(built_model.summary())
built_model.load_weights(params['pre_trained_weights_file'])
losses = pd.DataFrame()
if 'just_fit' in params['mode']:
built_model = built_model.fit(X_train.values, y_train)
losses = pd.DataFrame()
return built_model, losses
def predict_validate(params, X_valid, y_valid, X_valid_error = None, y_valid_error = None, built_model = None, idxs_pred = None, n_chunks = 1, pool = None):
if y_valid_error is None:
y_valid_error = pd.DataFrame(data=np.zeros_like(y_valid), columns=['%s_error'%col for col in y_valid.columns.to_list()], index = y_valid.index)
if X_valid_error is None:
X_valid_error = pd.DataFrame(data=np.zeros_like(X_valid), columns=['%s_error'%col for col in X_valid.columns.to_list()], index = X_valid.index)
results, y_pred_sel = predict(params, X_valid, X_error = X_valid_error, built_model = built_model, idxs_pred = idxs_pred, n_chunks = n_chunks, pool = pool)
results = y_valid.join(y_valid_error).join(results)
return results, y_pred_sel
def predict(params, X, X_error = None, built_model = None, idxs_pred = None, n_chunks = 1, pool = None):
if X_error is None:
X_error = pd.DataFrame(data=np.zeros_like(X), columns=['%s_error'%col for col in X.columns.to_list()], index = X.index)
if n_chunks > 1:
if pool is not None:
print('Running prediction in multiprocessing mode.')
args_prediction = []
for X_i, X_error_i in zip(np.array_split(X, n_chunks), np.array_split(X_error, n_chunks)):
args_prediction.append((params, X_i, X_error_i, built_model))
y_pred = np.concatenate(pool.map(launch_make_predictions, args_prediction), axis = 1)
results = get_statistics(y_pred, params['targets_names'], params['full_cov'], pool = pool, index = X.index)
if idxs_pred is not None:
y_pred_sel = y_pred[:, [idx for idx in idxs_pred if idx < np.shape(y_pred)[1]], :]
del [y_pred]
else:
print('Running prediction in memory saving mode.')
y_pred_sel = []
results = []
for ii, (indices_total, X_i, X_error_i) in enumerate(zip(np.array_split(range(X.shape[0]), n_chunks), np.array_split(X, n_chunks), np.array_split(X_error, n_chunks))):
print('\n')
print('Predicting %i of %i'%(ii+1, n_chunks))
y_pred_i = make_predictions(params, X_i, X_error_i, built_model)
results.append(get_statistics(y_pred_i, params['targets_names'], params['full_cov'], pool = Pool(int(cpu_count()/2)), index = X_i.index))
if idxs_pred is not None:
indices = np.where(np.in1d(indices_total, [idx for idx in idxs_pred if idx < np.shape(X)[0]]))[0]
y_pred_sel.append(np.take(y_pred_i, indices, axis=1))
del [y_pred_i]
results = pd.concat(results)
if idxs_pred is not None:
y_pred_sel = np.concatenate(y_pred_sel, axis =1)
else:
print('Running prediction single processor mode.')
y_pred = make_predictions(params, X, X_error, built_model)
results = get_statistics(y_pred, params['targets_names'], params['full_cov'], pool = Pool(int(cpu_count()/2)), index = X.index)
y_pred_sel = y_pred[:, [idx for idx in idxs_pred if idx < np.shape(y_pred)[1]], :]
del [y_pred]
return results, y_pred_sel
def launch_make_predictions(args):
"""
This routine pipes gaia_query into multiple threads.
"""
return make_predictions(*args)
def make_predictions(params, X, eX = None, built_model = None, process = None):
from numpy.random import default_rng
tf.keras.backend.clear_session()
if built_model is None:
model = globals()[params['model']]
built_model = model(params['pre_trained_n_inputs'], params['pre_trained_n_outputs'], params['pre_trained_n_instances'], params)
if process is not None:
model_n = '_%s'%process
else:
model_n = ''
built_model.load_weights(params['pre_trained_weights_file'])
rng_val = default_rng(process)
iterate = int(params['alleatoric_n_iter'])
preds =np.zeros((iterate, X.shape[0], params['pre_trained_n_outputs']))
if params['alleatoric_montecarlo']:
for kk in range(0, iterate):
cli_progress_test(kk+1, iterate)
preds[kk, :, :] = built_model.predict(rng_val.normal(X, eX), verbose=0)
else:
for kk in range(0, iterate):
cli_progress_test(kk+1, iterate)
preds[kk, :, :] = built_model.predict(X, verbose=0)
return preds
def get_data(params):
def rel2abs(var, var_relerr):
return pd.DataFrame(data = var_relerr.values * var.values, columns = ['%s_error'%col.replace('relerr_', '').replace('_relerr', '').replace('relerr', '') for col in var_relerr.columns], index = var_relerr.index)
def get_errors(data, used_cols = None, force_cols = None):
if force_cols is not None:
errors = data.loc[:, force_cols]
else:
errors = pd.DataFrame(index = data.index)
all_cols = data.columns
if not used_cols:
used_cols = [x for x in cols if (not '_error' in x) & (not 'err' in x) ]
for used_col in used_cols:
if '%s_error'%used_col in all_cols:
errors['%s_error'%used_col] = data['%s_error'%used_col]
elif len(used_col.split('mag_')) > 1:
if 'mag_err_%s'%used_col.split('mag_')[1] in all_cols:
errors['mag_err_%s'%used_col.split('mag_')[1]] = data['mag_err_%s'%used_col.split('mag_')[1]]
else:
errors['%s_error'%used_col] = np.nan
elif '%s_err'%used_col in all_cols:
errors['%s_err'%used_col] = data['%s_err'%used_col]
elif '%s_ERR'%used_col in all_cols:
errors['%s_ERR'%used_col] = data['%s_ERR'%used_col]
elif '%s_RMS'%used_col in all_cols:
errors['%s_RMS'%used_col] = data['%s_RMS'%used_col]
elif ('%s_ERRZPT'%used_col.replace('_ZPT', '') in all_cols):
errors['%s_ERRZPT'%used_col.replace('_ZPT', '')] = data['%s_ERRZPT'%used_col.replace('_ZPT', '')]
elif 'e_%s'%used_col in all_cols:
errors['e_%s'%used_col] = data['e_%s'%used_col]
else:
errors['%s_error'%used_col] = np.nan
return errors, errors.columns.to_list()
def get_photometry(data, used_photo):
# Wich columns could be interesting? Lets start with the WORST PSF
if used_photo=='flux_psfcor':
photo = [mag for mag in data.columns if 'flux_psfcor' in mag]
photo_err = [flux for flux in data.columns if 'flux_relerr_psfcor' in flux]
if used_photo=='corr_photo_3':
photo = [mag for mag in data.columns if 'corr_mag_aper_3_0' in mag]
photo_err = [flux for flux in data.columns if 'flux_relerr_aper_3' in flux]
if used_photo=='photo_3':
photo = [mag for mag in data.columns if (('mag_aper_3_0' in mag) & ('corr_' not in mag))]
photo_err = [flux for flux in data.columns if 'flux_relerr_aper_3' in flux]
if used_photo=='flux_3_worstpsf':
photo = [mag for mag in data.columns if 'flux_aper3_worstpsf_' in mag]
photo_err = [flux for flux in data.columns if 'flux_relerr_aper3_worstpsf' in flux]
if used_photo=='mag_3_worstpsf':
photo = [mag for mag in data.columns if 'mag_aper3_worstpsf_' in mag]
photo_err = [flux for flux in data.columns if 'flux_relerr_aper3_worstpsf' in flux]
if used_photo=='flux_aper_3_0':
photo = [mag for mag in data.columns if 'flux_aper_3_0_' in mag]
photo_err = [flux for flux in data.columns if 'flux_relerr_aper_3_0_' in flux]
if used_photo=='fnu_flux_aper':
photo = [mag for mag in data.columns if 'fnu_flux_aper_' in mag]
photo_err = [flux for flux in data.columns if 'fnu_flux_relerr_aper_' in flux]
if used_photo=='flux_aper':
photo = [mag for mag in data.columns if (('flux_aper_' in mag) and ('_flux_aper_' not in mag)) ]
photo_err = [flux for flux in data.columns if (('flux_relerr_aper_' in flux) and ('_flux_relerr_aper_' not in flux))]
if used_photo=='mag_aper':
photo = [mag for mag in data.columns if 'mag_aper_' in mag]
photo_err = [flux for flux in data.columns if 'mag_err_aper_' in flux]
return photo, photo_err
def transform_errors(data, photo, photo_err):
# We transform relative to absolute errors
abs_errors = rel2abs(data.loc[:, photo], data.loc[:, photo_err])
data = data.join(abs_errors)
photo_err = abs_errors.columns.to_list()
return data, photo_err
X_vars, y_vars, y_vars_err, validation_vars, quality_vars, prediction_vars = params['X_vars'], params['y_vars'], params['y_vars_err'], params['validation_vars'], params['quality_vars'], params['prediction_vars']
# Read data
if params['nrows'] is not None:
#training_data = pd.read_csv(params['training_catalog'], nrows=params['nrows'])
training_data = pl.read_csv(params['training_catalog'], nrows=params['nrows']).to_pandas()
else:
if (params['skip_nrows'] is not None) and (params['each_nrows'] > 1):
skip_function = lambda i: (i+params['skip_nrows']) % params['each_nrows'] != 0
elif (params['each_nrows'] > 1):
skip_function = lambda i: i % params['each_nrows'] != 0
elif (params['skip_nrows'] is not None):
skip_function = params['skip_nrows']
else:
skip_function = 0
#training_data = pd.read_csv(params['training_catalog'], skiprows=skip_function)
training_data = pl.read_csv(params['training_catalog'], skip_rows=skip_function).to_pandas()
print('We have read %i lines.'%len(training_data))
#Shuffle data
training_data = shuffle(training_data, random_state = params['random_generator_seed'])
try:
print('For the moment, the distribution of classes is as follow:\n', training_data.groupby(['CLASS'])['CLASS'].count())
except:
pass
# We may want to select a specific class
if params['select_class'] is not None:
if isinstance(params['select_class'], list):
training_data = training_data.loc[training_data.CLASS.isin(params['select_class'])]
elif isinstance(params['select_class'], str):
training_data = training_data.loc[training_data.CLASS == params['select_class']]
if params['y_drop_nans']:
# We drop NaN values, if present.
y_float_vars = training_data.loc[:, y_vars+y_vars_err].select_dtypes(exclude=['object']).columns
training_data.loc[:, y_float_vars] = training_data.loc[:, y_float_vars].apply(lambda x: np.where(x < -999.0, np.nan, x)).values
training_data = training_data.loc[training_data.loc[:, y_vars+y_vars_err].notnull().all(axis = 1), :]
else:
# We simply assign a label -999.0 to the missing data
training_data.loc[:, y_vars+y_vars_err] = training_data.loc[:, y_vars+y_vars_err].fillna(-999.0)
print('After selection based on y_bars we continue with %i lines.'%len(training_data))
photo, photo_err = get_photometry(training_data, params['used_photo'])
#first abs values for PMs
pmra_g = unp.uarray(training_data.pmra_g, training_data.e_pmra_g)
pmdec_g = unp.uarray(training_data.pmde_g, training_data.e_pmde_g)
pm_g = unp.sqrt(pmra_g**2 + pmdec_g**2)
training_data['pm_g_error'] = unp.std_devs(pm_g)
training_data['pm_g'] = unp.nominal_values(pm_g)
X_vars = X_vars + ['pm_g_error', 'pm_g']
X_vars.remove('e_pmra_g')
X_vars.remove('e_pmde_g')
X_vars.remove('pmra_g')
X_vars.remove('pmde_g')
del [pmra_g, pmdec_g, pm_g]
pmra_cw = unp.uarray(training_data.pmra_cw*1000, np.abs(training_data.e_pmra_cw)*1000)
pmdec_cw = unp.uarray(training_data.pmde_cw*1000, np.abs(training_data.e_pmde_cw)*1000)
pm_cw = unp.sqrt(pmra_cw**2 + pmdec_cw**2)
training_data['e_pm_cw'] = unp.std_devs(pm_cw)
training_data['pm_cw'] = unp.nominal_values(pm_cw)
X_vars = X_vars + ['e_pm_cw', 'pm_cw']
X_vars.remove('e_pmra_cw')
X_vars.remove('e_pmde_cw')
X_vars.remove('pmra_cw')
X_vars.remove('pmde_cw')
del [pmra_cw, pmdec_cw, pm_cw]
#Use Galactic latitude?
if params['use_gal_lat']:
from astropy import units as u
from astropy.coordinates import SkyCoord
c = SkyCoord(ra=training_data.alpha_j2000.values*u.degree, dec=training_data.delta_j2000.values*u.degree, frame='fk5')
training_data['l'] = c.galactic.l.value
training_data['b'] = c.galactic.b.value
X_vars = X_vars + ['b']
X_vars = X_vars+photo
try:
print('After cleaning, the distribution of classes is as follow:\n', training_data.groupby(['CLASS'])['CLASS'].count())
except:
pass
if params['use_photo_error']:
X_vars = X_vars + photo_err
if (params['photo_log']) & ('flux' in params['used_photo']):
# We transform the fluxes to logarithm
fluxes_zpt = training_data.loc[:, photo].min().quantile(0.5)
training_data.loc[:, photo_err] = (training_data.loc[:, photo_err].values)/((training_data.loc[:, photo].values)*np.log(10))
training_data.loc[:, photo] = np.log10(training_data.loc[:, photo] - fluxes_zpt)
try:
training_data.loc[:, [ 'e_fg','e_fbp','e_frp']] = (training_data.loc[:, [ 'e_fg','e_fbp','e_frp']].values)/((training_data.loc[:, [ 'fg','fbp','frp']].values)*np.log(10))
training_data.loc[:, [ 'fg','fbp','frp']] = np.log10(training_data.loc[:, [ 'fg','fbp','frp']] - fluxes_zpt)
except:
pass
else:
fluxes_zpt = 0
# We are going to add new colors that help with QSO detection
import itertools
photo_colors = [mag for mag in training_data.columns if 'magab_mag_aper_4' in mag]+['w1mpropm_cw', 'w2mpropm_cw']
# We transform the missing magnitudes to nans
training_data.loc[:, photo_colors] = training_data.loc[:, photo_colors].apply(lambda x: np.where(x == 99.0, np.nan, x)).values
# We try to get the errors for X_vars
training_eX, eX_vars = get_errors(training_data, used_cols = X_vars)
# We select the data
training_output = training_data.loc[:, validation_vars]
quality_data = training_data.loc[:, quality_vars]
training_data = training_data.loc[:, X_vars+y_vars+y_vars_err]
# We make sure that -999 are treated as nans in the X vector
training_data.loc[:, X_vars] = training_data.loc[:, X_vars].apply(lambda x: np.where(x <= -999, np.nan, x)).values
# We standarize the data and the errors:
minimum = training_data.loc[:, X_vars].quantile(0.005, axis=0).values
maximum = training_data.loc[:, X_vars].quantile(0.995, axis=0).values
dynamic_range = maximum - minimum
non_zero_dynamic_range = dynamic_range > 0
vars_non_zero_dynamic_range = [i for (i, v) in zip(X_vars, non_zero_dynamic_range) if v]
e_vars_non_zero_dynamic_range = [i for (i, v) in zip(eX_vars, non_zero_dynamic_range) if v]
training_data.loc[:, vars_non_zero_dynamic_range] = (training_data.loc[:, vars_non_zero_dynamic_range].values - minimum[non_zero_dynamic_range]) / dynamic_range[non_zero_dynamic_range]
training_eX.loc[:, e_vars_non_zero_dynamic_range] = training_eX.loc[:, e_vars_non_zero_dynamic_range].values / dynamic_range[non_zero_dynamic_range]
training_data.loc[:, X_vars] = training_data.loc[:, X_vars].clip(lower = 0, upper = 1)
training_eX.loc[:, eX_vars] = training_eX.loc[:, eX_vars].clip(lower = 0, upper = 1)
# We obtain all the classes probabilities
try:
from sklearn.preprocessing import LabelEncoder
y_class = training_data.loc[:, y_vars+y_vars_err].select_dtypes(include='object')
y_class_dummies = pd.get_dummies(y_class)
y_var_class = y_class_dummies.columns.to_list()
label_encoder = LabelEncoder()
for col in y_class.columns:
y_class_dummies.loc[:, '%s_num'%col] = label_encoder.fit_transform(y_class[col])
training_data = training_data.join(y_class_dummies)
y_test_categorical = y_class.columns.to_list() + y_class.add_suffix('_num').columns.to_list()
y_vars = list(set(y_vars).difference(y_class.columns.to_list())) + y_var_class
except:
y_test_categorical = []
# We convert the nan to a figure just outside the std of the distribution.
training_data = training_data.fillna(params['fill_na'])
# If there's no error, then zero
training_ey, ey_vars = get_errors(training_data, used_cols = y_vars, force_cols = y_vars_err)
training_eX = training_eX.fillna(0).clip(lower=0)
training_ey = training_ey.fillna(0).clip(lower=0)
training_X = training_data.loc[:, X_vars]
training_y = training_data.loc[:, y_vars]
training_y_cat = training_data.loc[:, y_test_categorical]
print('The training independent vector is', training_X.shape)
print('The training dependent vector is', training_y.shape)
if params['test_catalog'] is not None:
predict_data = pd.read_csv(params['test_catalog'])
predict_output = predict_data.loc[:, prediction_vars]
# We make sure that 99 the photometric magnitudes are treated as Nans:
predict_data.loc[:, photo+photo_err] = predict_data.loc[:, photo+photo_err].apply(lambda x: np.where(x == 99.0, np.nan, x)).values
if (len(photo_err) > 0) & ('flux' in params['used_photo']):
predict_data = transform_errors(predict_data, photo, photo_err)[0]
# We select the data
predict_X = predict_data.loc[:, X_vars]
predict_eX = get_errors(predict_data, used_cols = X_vars)[0]
# We make sure that -999 are treated as nans, and that there are not nans in the y
predict_X = predict_X.apply(lambda x: np.where(x < -999, np.nan, x))
# We apply the same normalization
predict_X = (predict_X - minimum) / dynamic_range
# We convert the nan to a figure just outside the std of the distribution.
predict_X = predict_X.fillna(params['fill_na'])
predict_eX = predict_eX.fillna(0).clip(lower=0)
else:
predict_X, predict_eX, predict_output = None, None, None
return training_X, training_eX, training_y, training_ey, training_y_cat, training_output, quality_data, X_vars, y_vars, y_vars_err, predict_X, predict_eX, predict_output, minimum, dynamic_range, fluxes_zpt
def get_train_val(X, eX, y, ey, y_cat, output, params):
if params['random_generator_seed'] is None:
import random
random_state = random.randrange(1e3)
else:
random_state = params['random_generator_seed']
if params['stratify_var'] is not None:
stratify = y_cat.loc[:, params['stratify_var']]
else:
stratify = None
if stratify is not None:
# We remove the classes with less 2 elements so we can stratify
repeated_cols = (y.sum(axis = 0) > 1) & (y.isin([0,1])).all(axis=0) | (~y.isin([0,1])).any(axis=0)
repeated_index = stratify.duplicated(keep=False)
stratify = stratify[repeated_index]
X = X[repeated_index]
y = y.loc[repeated_index, repeated_cols.values]
eX = eX[repeated_index]
ey = ey.loc[repeated_index, repeated_cols.values]
y_cat = y_cat[repeated_index]
output = output[repeated_index]
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size = params['validation_sample_size'], random_state=random_state, stratify = stratify)
X_train_error, X_valid_error, y_train_error, y_valid_error = train_test_split(eX, ey, test_size = params['validation_sample_size'], random_state=random_state, stratify = stratify)
valid_output = train_test_split(output, test_size = params['validation_sample_size'], random_state=random_state, stratify = stratify)[1]
y_cat_train, y_cat_valid = train_test_split(y_cat, test_size = params['validation_sample_size'], random_state=random_state, stratify = stratify)
X_valid = X_valid.reset_index(drop=True)
X_valid_error = X_valid_error.reset_index(drop=True)
y_valid = y_valid.reset_index(drop=True)
y_valid_error = y_valid_error.reset_index(drop=True)
valid_output = valid_output.reset_index(drop=True)
target_names = y_train.columns.to_list()
return X_train, X_train_error, y_train, y_train_error, y_cat_train, target_names, X_valid, X_valid_error, y_valid, y_valid_error, y_cat_valid, valid_output
def add_inner_title(ax, title, loc, size=None, color=None, rotation=None, **kwargs):
from matplotlib.offsetbox import AnchoredText
from matplotlib.patheffects import withStroke
if size is None:
prop = dict(size=plt.rcParams['legend.fontsize'])
else:
prop = size
if color is not None:
prop['color'] = color
if rotation is not None:
prop['rotation'] = rotation
at = AnchoredText(title, loc=loc, prop=prop,
pad=0., borderpad=0.5,
frameon=False, **kwargs)
ax.add_artist(at)
at.txt._text.set_path_effects([withStroke(foreground="w", linewidth=3)])
return at
def plot_predictions(predictions, y_test, names, used_photo):
from mpl_toolkits.axes_grid1 import make_axes_locatable
plt.close('all')
fig, axs = plt.subplots(2, len(predictions), gridspec_kw={'height_ratios': [2, 1]}, figsize=(10, 6))
for (ax, name) in zip(axs[0].flatten(), names):
y_pred = predictions.loc[:, name]
y_true = y_test.loc[:, name]
lim = (y_true.max() - y_true.min()) * 0.25
x_res = (y_true+y_pred)/np.sqrt(2)
y_res = (y_pred-y_true)/np.sqrt(2)
divider = make_axes_locatable(ax)
# below height and pad are in inches
ax_ress = divider.append_axes("bottom", 0.75, pad=0.5)
#ax.xaxis.set_tick_params(labelbottom=False)
locx, locy, hh, hh_filt, hh_d, hh_d_filt, xcenters, ycenters, lwdx, lwdy, lwd_d = plot_density(y_true, y_pred, z = None, xyrange = [xlim, ylim], thresh = 5, bins = [100, 100])
contour_levels = np.linspace(np.nanmin(np.log10(hh_filt)), np.nanmax(np.log10(hh_filt))*0.9, 5)
cs = ax.contour(xcenters, ycenters, np.log10(hh_filt.T), colors = 'w', linestyles ='-', linewidths = 0.85, levels = contour_levels, zorder = 3)
hb = ax.hexbin(y_true, y_pred, gridsize=50, cmap='inferno_r', bins = 'log', extent= [y_true.min(), y_true.max(), y_true.min(), y_true.max()])
hb_res = ax_ress.hexbin(x_res, y_res, gridsize=50, cmap='inferno_r', bins = 'log', extent= [x_res.min(), x_res.max(), y_res.min(), y_res.max()])
ax.plot([y_true.min(), y_true.max()], [y_true.min(), y_true.max()], '-')
ax.set_xlim(y_true.min(), y_true.max())
ax.set_ylim(y_true.min(), y_true.max())
ax_ress.axhline(y=0)
ax_ress.set_xlim(x_res.min(), x_res.max())
ax_ress.set_ylim(y_res.min(), y_res.max())
ax_ress.set_xlabel('(%s$_{true}$+%s$_{pred})/\sqrt{2}$'%(name, name))
ax_ress.set_ylabel('(%s$_{pred}$-%s$_{true})/\sqrt{2}$'%(name, name))
ax.set_ylabel('%s$_{pred}$'%name)
ax.set_xlabel('%s$_{true}$'%name)
for (ax, name) in zip(axs[1].flatten(), names):
y_pred = predictions.loc[:, name]
y_true = y_test.loc[:, name]
lim = (y_true.max() - y_true.min()) * 0.15
y_res = (y_pred-y_true)/np.sqrt(2)
#ax.hist(y_true-y_pred, 50, range = [-lim, lim])
ax.hist(y_res, 50, range = [-lim, lim])
ax.axvline(x=0, color = 'r')
ax.set_xlabel('%s$_{true}$ - %s$_{pred}$'%(name, name))
#add_inner_title(ax, '$\sigma=%.2f$'%(np.std(y_true-y_pred)), 1, size=None, color=None, rotation=None)
add_inner_title(ax, '$\sigma=%.2f$'%(np.std(y_res)), 2, size=None, color=None, rotation=None)
plt.tight_layout()
plt.savefig('%s_BNN.png'%used_photo)
def plot_density(xdata, ydata, zdata = None, xyrange = None, thresh = 10, bins = [100, 100], kernel_density = 2):
import scipy
from scipy import stats
from astropy.convolution import convolve
from astropy.convolution import Gaussian2DKernel
#histogram definition
if xyrange is None:
xyrange = [[xdata.min(), xdata.max()], [ydata.min(), xdata.max()]] # data range
# histogram the data
hh, locx, locy = scipy.histogram2d(xdata, ydata, range=xyrange, bins=bins)
kernel = Gaussian2DKernel(kernel_density)
hh_filt = convolve(hh, kernel)
xcenters = (locx[:-1] + locx[1:]) / 2
ycenters = (locy[:-1] + locy[1:]) / 2
posx = np.digitize(xdata, locx)
posy = np.digitize(ydata, locy)
#select points within the histogram
ind = (posx > 0) & (posx <= bins[0]) & (posy > 0) & (posy <= bins[1])
hhsub = hh[posx[ind] - 1, posy[ind] - 1] # values of the histogram where the points are
xdata_ld = xdata[ind][hhsub < thresh] # low density points
ydata_ld = ydata[ind][hhsub < thresh]
#hh_filt[hh < thresh] = np.nan # fill the areas with low density by NaNs
if zdata is not None:
weighted = stats.binned_statistic_2d(xdata, ydata, zdata, range=xyrange, bins=bins, statistic='median')[0]
weighted_filt = convolve(weighted, kernel)
zdata_ld = zdata[ind][hhsub < thresh]
weighted_filt[hh < thresh] = np.nan
weighted[hh < thresh] = np.nan
return locx, locy, hh, hh_filt, weighted, weighted_filt, xcenters, ycenters, xdata_ld, ydata_ld, zdata_ld
def get_all_roc_coordinates(y_real, y_proba):
'''
Calculates all the ROC Curve coordinates (tpr and fpr) by considering each point as a treshold for the predicion of the class.
Args:
y_real: The list or series with the real classes.
y_proba: The array with the probabilities for each class, obtained by using the `.predict_proba()` method.
Returns:
tpr_list: The list of TPRs representing each threshold.
fpr_list: The list of FPRs representing each threshold.
'''
from scipy.sparse import coo_matrix
tpr_list = [0]+[None]*len(y_proba)
fpr_list = [0]+[None]*len(y_proba)
for i in range(len(y_proba)):
TN, FP, FN, TP = coo_matrix( (np.ones(y_real.shape[0], dtype=np.int64), (y_real, y_proba >= y_proba[i])), shape=(2, 2)).toarray().ravel()
tpr_list[i+1] = TP/(TP + FN) # sensitivity - true positive rate
fpr_list[i+1] = 1 - TN/(TN+FP) # 1-specificity - false positive rate
return tpr_list, fpr_list
def plot_predictions_nominal(predictions_nominal, losses_nominal, pdfs, params, losses_variance = None, xlims = [[0,1], [3000, 8000], [0, 5], [-3, 1]]):
# Lets plot the variance divided by the error
from astropy.stats import sigma_clip, sigma_clipped_stats, mad_std
from scipy.stats import norm
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay, classification_report, roc_curve, RocCurveDisplay, roc_auc_score
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sns
import matplotlib.ticker as ticker
stats='pc50'
stats_var ='std'
output_directory = params['variables_n_preproc']['output_path']+params['experiment_name']
y_obs = params['variables_n_preproc']['targets_names']
y_obs_err = params['variables_n_preproc']['targets_error_names']
y_pred_nom = ['%s_%s'%(var, stats) for var in params['model_nominal']['targets_names']]
y_pred_nom_pdf = ['%s'%var for var in params['model_nominal']['targets_names']]
y_pred_nom_pdf_regres = [col for col in y_pred_nom_pdf if (('CLASS' not in col) and ('class' not in col))]
y_pred_nom_pdf_class = [col for col in y_pred_nom_pdf if ( (('CLASS' in col) or ('class' in col)) and (('SUBCLASS' not in col) and ('subclass' not in col)) )]
y_obs_class = [col for col in y_obs if ( (('CLASS' in col) or ('class' in col)) and (('SUBCLASS' not in col) and ('subclass' not in col)) )]
y_pred_nom_class = [col for col in y_pred_nom if ( (('CLASS' in col) or ('class' in col)) and (('SUBCLASS' not in col) and ('subclass' not in col)) )]
y_obs_regres = [col for col in y_obs if (('CLASS' not in col) and ('class' not in col))]
y_obs_err_regres = [col for col in y_obs_err if (('CLASS' not in col) and ('class' not in col))]
y_pred_nom_regres = [col for col in y_pred_nom if (('CLASS' not in col) and ('class' not in col))]
y_pred_nom_pdf_subclass = [col for col in y_pred_nom_pdf if (('SUBCLASS' in col) or ('subclass' in col))]
y_obs_subclass = [col for col in y_obs if (('SUBCLASS' in col) or ('subclass' in col))]
y_pred_nom_subclass = [col for col in y_pred_nom if (('SUBCLASS' in col) or ('subclass' in col))]
if params['experiment']['fit_uncertainty_cv']:
y_pred_var = ['%s_%s'%(var, stats) for var in params['model_variance']['targets_names']]
y_pred_var_pdf = ['%s'%var for var in params['model_variance']['targets_names']]
y_pred_var_regres = [col for col in y_pred_var if (('CLASS' not in col) and ('class' not in col))]
y_pred_var_class = [col for col in y_pred_var if ( (('CLASS' in col) or ('class' in col)) and (('SUBCLASS' not in col) and ('subclass' not in col)) )]
y_pred_var_subclass = [col for col in y_pred_var if (('SUBCLASS' in col) or ('subclass' in col))]
else:
y_pred_var = ['%s_%s'%(var, stats_var) for var in params['model_nominal']['targets_names']]
y_pred_var_regres = [col for col in y_pred_var if (('CLASS' not in col) and ('class' not in col))]
y_pred_var_class = [col for col in y_pred_var if ( (('CLASS' in col) or ('class' in col)) and (('SUBCLASS' not in col) and ('subclass' not in col)) )]
y_pred_var_subclass = [col for col in y_pred_var if (('SUBCLASS' in col) or ('subclass' in col))]
mag_list = [col for col in params['variables_n_preproc']['validation_vars'] if (('mag' in col) and ('_err' not in col) and ('e_' not in col) and ('_magerr' not in col))]
mag_err_list = [col for col in params['variables_n_preproc']['validation_vars'] if (('mag' in col) and (('_err' in col) or ('e_' in col) or ('_magerr' in col)))]
residual_list = ['%s_residual'%col for col in y_pred_nom]
residual_abs_list = ['%s_residual_abs'%col for col in y_pred_nom]
residual_reg_list = ['%s_residual'%col for col in y_pred_nom_regres]
residual_reg_abs_rel_list = ['%s_residual_rel'%col for col in y_pred_nom_regres]
predictions_nominal[residual_list] = predictions_nominal.loc[:, y_pred_nom] - predictions_nominal.loc[:, y_obs].values
predictions_nominal[residual_abs_list] = predictions_nominal.loc[:, residual_list].abs()
predictions_nominal[residual_reg_abs_rel_list] = predictions_nominal[residual_reg_list].abs()/(1+predictions_nominal[y_obs_regres].values)
# We fill the Nans
predictions_nominal = predictions_nominal.fillna(99)
# We clip the results
predictions_nominal.loc[:, mag_list] = predictions_nominal.loc[:, mag_list].clip(0, 25)
predictions_nominal.loc[:, mag_err_list] = predictions_nominal.loc[:, mag_err_list].clip(0, 0.75)
# We form the lists for the x and y variables.
xx_list = [mag_list, mag_err_list, mag_list, mag_err_list, mag_list, mag_err_list]
yy_list = [residual_list, residual_list, y_pred_var, y_pred_var]
zz_list = [y_pred_var, y_pred_var, y_pred_var, y_pred_var]
#try:
classes_combinations = []
class_list = y_obs_class
for i in range(len(class_list)):
for j in range(i+1, len(class_list)):
classes_combinations.append([class_list[i], class_list[j]])
classes_combinations.append([class_list[j], class_list[i]])
mag = 'magab_mag_auto_rSDSSB'
mag_limit = 21.5
above_limit = predictions_nominal[mag] < mag_limit
true_class = np.argmax(predictions_nominal.loc[above_limit, y_obs_class].values, axis = 1)
# Plots the Probability Distributions and the ROC Curves One vs One
plt.close('all')
plt.figure(figsize = (14, 5))
#plt.suptitle(r'%s $\leq$ %.1f. Execution time %.1f'%(mag, mag_limit, params['experiment']['execution_time']))
bins = [i/20 for i in range(20)] + [1]
roc_auc_ovo = {}
for i in range(len(classes_combinations)):
# Gets the class
comb = classes_combinations[i]
c1 = comb[0]
c2 = comb[1]
c1_index = class_list.index(c1)
title = c1 + " vs " +c2
# Prepares an auxiliar dataframe to help with the plots
df_aux = pd.DataFrame()
df_aux['class'] = [y_obs_class[ii] for ii in true_class]
df_aux['prob'] = predictions_nominal.loc[above_limit, y_pred_nom_class].values[:, c1_index]
# Slices only the subset with both classes
df_aux = df_aux[(df_aux['class'] == c1) | (df_aux['class'] == c2)]
df_aux['class'] = [1 if y == c1 else 0 for y in df_aux['class']]
df_aux = df_aux.reset_index(drop = True)
# Plots the probability distribution for the class and the rest
ax = plt.subplot(2, 6, i+1)
sns.histplot(x = "prob", data = df_aux, hue = 'class', color = 'b', ax = ax, bins = bins, stat = "probability")
if i == 0:
ax.set_ylabel('Normalized count')
else:
ax.set_ylabel('')
ax.set_yticklabels([])
ax.set_ylim(0, 1.0)
ax.xaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.25))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(0.25))
ax.set_title(title, fontdict={'fontsize':'8'})
ax.legend([f"{c1}", f"{c2}"], prop={'size':'9'})
ax.set_xlabel(f"P(x = {c1})")
ax.grid(which='both')
# Calculates the ROC Coordinates and plots the ROC Curves
ax_bottom = plt.subplot(2, 6, i+7)
tpr, fpr = get_all_roc_coordinates(df_aux['class'], df_aux['prob'])
sns.lineplot(x = fpr, y = tpr, ax = ax_bottom, color = 'C0')
ax_bottom.plot([0, 1], [0, 1], 'r--')
ax_bottom.set_xlim(-0.01, 1.01)
ax_bottom.set_ylim(-0.01, 1.01)
if i == 0:
ax_bottom.set_ylabel("True Positive Rate")
else:
ax_bottom.set_yticklabels([])
ax_bottom.xaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax_bottom.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax_bottom.xaxis.set_minor_locator(ticker.MultipleLocator(0.25))
ax_bottom.yaxis.set_minor_locator(ticker.MultipleLocator(0.25))
ax_bottom.set_xlabel("False Positive Rate")
ax_bottom.grid(which='both')
# Calculates the ROC AUC OvO
roc_auc_ovo[title] = roc_auc_score(df_aux['class'], df_aux['prob'])
add_inner_title(ax_bottom, r'ROC AUC = %.3f'%roc_auc_ovo[title], 4, size={'size':'9'}, color=None, rotation=None)
plt.subplots_adjust(left = 0.1, right=0.9, wspace = 0.1, hspace = 0.35, bottom=0.1)
plt.savefig('%s/%s_ROC_curves.png'%(output_directory, params['experiment_name']), bbox_inches='tight')
#except:
#pass
try:
# Lets make histograms with the residuals versus the magnitudes
bins = np.arange(17, 26, 1.0)
binscenters = (bins[:-1] + bins[1:]) / 2
mag = 'magab_mag_auto_rSDSSB'
predictions_nominal['mag_bin'] = pd.cut(x = predictions_nominal[mag], bins = bins)
for ii, (group_name, group) in enumerate(predictions_nominal.groupby(['mag_bin'])):
if len(group) > 50:
# Lets make histograms with the residuals versus the magnitudes
true_class = np.argmax(group.loc[:, y_obs_class].values, axis = 1)
class_list = [[clase for clase in y_obs_class][i] for i in list(set(true_class))]
classes_combinations = []
class_list = y_obs_class
for i in range(len(class_list)):
for j in range(i+1, len(class_list)):
classes_combinations.append([class_list[i], class_list[j]])
classes_combinations.append([class_list[j], class_list[i]])
# Plots the Probability Distributions and the ROC Curves One vs One
plt.close('all')
fig = plt.figure(figsize = (14, 5))
#plt.suptitle(r'%s < %s $\leq$ %s. Execution time %.1f sec.'%(group_name.left, mag, group_name.right, params['experiment']['execution_time']))
bins = [i/20 for i in range(20)] + [1]
roc_auc_ovo = {}
for i in range(len(classes_combinations)):
# Gets the class
comb = classes_combinations[i]
c1 = comb[0]
c2 = comb[1]
c1_index = class_list.index(c1)
title = c1 + " vs " +c2
# Prepares an auxiliar dataframe to help with the plots
df_aux = pd.DataFrame()
df_aux['class'] = [y_obs_class[ii] for ii in true_class]
df_aux['prob'] = group.loc[:, y_pred_nom_class].values[:, c1_index]
# Slices only the subset with both classes
df_aux = df_aux[(df_aux['class'] == c1) | (df_aux['class'] == c2)]
df_aux['class'] = [1 if y == c1 else 0 for y in df_aux['class']]
df_aux = df_aux.reset_index(drop = True)
# Plots the probability distribution for the class and the rest
ax = plt.subplot(2, 6, i+1)
sns.histplot(x = "prob", data = df_aux, hue = 'class', color = 'b', ax = ax, bins = bins, stat = "probability")
if i == 0:
ax.set_ylabel('Normalized count')
else:
ax.set_ylabel('')
ax.set_yticklabels([])
ax.set_ylim(0, 1.0)
ax.xaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax.xaxis.set_minor_locator(ticker.MultipleLocator(0.25))
ax.yaxis.set_minor_locator(ticker.MultipleLocator(0.25))
ax.set_title(title, fontdict={'fontsize':'8'})
ax.legend([f"{c1}", f"{c2}"], prop={'size':'9'})
ax.set_xlabel(f"P(x = {c1})")
ax.grid(which='both')
# Calculates the ROC Coordinates and plots the ROC Curves
ax_bottom = plt.subplot(2, 6, i+7)
tpr, fpr = get_all_roc_coordinates(df_aux['class'], df_aux['prob'])
sns.lineplot(x = fpr, y = tpr, ax = ax_bottom, color = 'C0')
ax_bottom.plot([0, 1], [0, 1], 'r--')
ax_bottom.set_xlim(-0.01, 1.01)
ax_bottom.set_ylim(-0.01, 1.01)
if i == 0:
ax_bottom.set_ylabel("True Positive Rate")
else:
ax_bottom.set_yticklabels([])
ax_bottom.xaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax_bottom.yaxis.set_major_locator(ticker.MultipleLocator(0.5))
ax_bottom.xaxis.set_minor_locator(ticker.MultipleLocator(0.25))
ax_bottom.yaxis.set_minor_locator(ticker.MultipleLocator(0.25))
ax_bottom.set_xlabel("False Positive Rate")
ax_bottom.grid(which='both')
# Calculates the ROC AUC OvO
roc_auc_ovo[title] = roc_auc_score(df_aux['class'], df_aux['prob'])
add_inner_title(ax_bottom, r'ROC AUC = %.3f'%roc_auc_ovo[title], 4, size={'size':'9'}, color=None, rotation=None)
#add_inner_title(ax1, r'%s < %s $\leq$ %s'%(group_name.left, mag, group_name.right), 2, size={'size':'9'}, color=None, rotation=None)
plt.subplots_adjust(left = 0.1, right=0.9, wspace = 0.1, hspace = 0.35, bottom=0.1)
plt.savefig('%s/%s_ROC_curves_%s_%s.png'%(output_directory, params['experiment_name'], mag, str(group_name).strip('(]').replace(', ', '_')), bbox_inches='tight')
except:
pass
try:
plt.close('all')
loss, val_loss = losses_nominal['loss'], losses_nominal['val_loss']
plt.plot(loss, '--')
plt.plot(val_loss, '-')
plt.legend(['loss', 'val loss'])
plt.ylabel('loss')
plt.xlabel('Epochs')
plt.savefig('%s/%s_loss_nominal.png'%(output_directory, params['experiment_name']), bbox_inches='tight')
except:
pass
try:
plt.close('all')
loss, val_loss = losses_variance['loss'], losses_variance['val_loss']
plt.plot(loss, '--')
plt.plot(val_loss, '-')
plt.legend(['loss', 'val loss'])
plt.ylabel('loss')
plt.xlabel('Epochs')
plt.savefig('%s/%s_loss_variance.png'%(output_directory, params['experiment_name']), bbox_inches='tight')
except:
pass
for xx_names, yy_names, zz_names, in zip(xx_list, yy_list, zz_list):
for yy_name, zz_name in zip(yy_names, zz_names):
#Quantiles:
quantiles_x = predictions_nominal[xx_names].quantile([0.1, 1])
quantiles_y = predictions_nominal[yy_name].quantile([0.01, 0.99])
quantiles_z = predictions_nominal[zz_name].quantile([0.01, 0.99])
xlim = [np.nanmin(quantiles_x), np.nanmax(quantiles_x)+1e-3]
zlim = [np.nanmin(quantiles_z), np.nanmax(quantiles_z)]
if 'CLASS' in yy_name:
ylim = [-1.02, 1.02]
else:
ylim = [np.nanmin(quantiles_y), np.nanmax(quantiles_y)]
plt.close('all')
fig, axs = plt.subplots(5, 4, dpi=200, figsize = [10, 10], sharey = True, sharex = True)
for ii, (ax, xx_name) in enumerate(zip(axs.flatten(), xx_names)):
locx, locy, hh, hh_filt, hh_d, hh_d_filt, xcenters, ycenters, lwdx, lwdy, lwd_d = plot_density(predictions_nominal[xx_name], predictions_nominal[yy_name], zdata = predictions_nominal[zz_name], xyrange = [xlim, ylim], thresh = 1, bins = [100, 100])
X, Y = np.meshgrid(locx, locy)
ax.pcolormesh(X, Y, hh_d.T)
contour_levels = np.linspace(np.nanmin(np.log10(hh_filt)), np.nanmax(np.log10(hh_filt))*0.9, 5)
cs = ax.contour(xcenters, ycenters, np.log10(hh_filt.T), colors = 'w', linestyles ='-', linewidths = 0.85, levels = contour_levels, zorder = 3)
cb = ax.scatter(lwdx, lwdy, c = lwd_d, s = 1, alpha = 0.75, vmin = zlim[0], vmax = zlim[1])
ax.set_xlabel(xx_name)
ax.axhline(0, color = 'r', linewidth = 0.8)
ax.set_ylim(ylim)
ax.set_xlim(xlim)
ax.grid()
plt.subplots_adjust(left = 0.1, right=0.9, wspace = 0.25, hspace = 0.5, bottom=0.15)
fig.add_subplot(111, frameon=False)
# hide tick and tick label of the big axis
plt.tick_params(labelcolor='none', which='both', top=False, bottom=False, left=False, right=False)
plt.ylabel(yy_name.replace('pred_nom_',''))
# Colorbar
cbar = fig.colorbar(cb, ax=axs.ravel().tolist(), aspect = 40, pad = 0.02)
cbar.set_label(zz_name.replace('pred_nom_',''), labelpad = -0.05)
if any('mag_err' in xx_name for xx_name in xx_names):
plt.savefig('%s/%s_%s_vs_mag_err.png'%(output_directory, params['experiment_name'], yy_name.replace('pred_nom_','')), bbox_inches='tight')
else:
plt.savefig('%s/%s_%s_vs_mag.png'%(output_directory, params['experiment_name'], yy_name.replace('pred_nom_','')), bbox_inches='tight')
try:
# Lets make histograms with the residuals versus the magnitudes
predicted_class = np.argmax(predictions_nominal.loc[:, y_pred_nom_class].values, axis = 1)
true_class = np.argmax(predictions_nominal.loc[:, y_obs_class].values, axis = 1)
predictions_nominal['missed_class'] = predicted_class != true_class
bins = np.arange(17, 26, 1.0)
binscenters = (bins[:-1] + bins[1:]) / 2
for mag in mag_list:
predictions_nominal['mag_bin'] = pd.cut(x = predictions_nominal[mag], bins = bins)
class_fails = predictions_nominal.groupby(['mag_bin'])['missed_class'].sum()
counts = predictions_nominal.groupby(['mag_bin'])['missed_class'].count()
failure_ratio = 100*class_fails/counts
relative_failure_ratio = failure_ratio/failure_ratio.sum()
cumulative = 100*class_fails.cumsum()/counts.sum()
plt.close('all')
fig, (ax1, ax2) = plt.subplots(1,2, figsize = (10, 4))
cm = confusion_matrix(true_class, predicted_class, normalize='true')
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels=[clase.replace('CLASS_', '') for clase in y_obs_class])
disp.plot(ax = ax1)
ax2.bar(binscenters, failure_ratio, width = 1, edgecolor = 'k')
ax2.step(bins[1::], cumulative, linestyle = '--', color = 'C1', label = 'Cumulative')
ax2.set_xlabel(r'%s [mag]'%mag)
ax2.set_ylabel(r'Classification relative error [%]')
ax2.legend()
ax2.grid()
ax2.set_ylim([0, 25])
plt.subplots_adjust(left = 0.1, right=0.9, wspace = 0.25, hspace = 0.5, bottom=0.15)
plt.savefig('%s/%s_class_ANN_%s.png'%(output_directory, params['experiment_name'], mag), bbox_inches='tight')
except:
pass
try:
# Lets make histograms with the residuals versus the magnitudes
ylim = 25
predicted_class = np.argmax(predictions_nominal.loc[:, y_pred_nom_class].values, axis = 1)
true_class = np.argmax(predictions_nominal.loc[:, y_obs_class].values, axis = 1)
predictions_nominal['missed_class'] = predicted_class != true_class
bins = np.arange(17, 26, 1.0)
binscenters = (bins[:-1] + bins[1:]) / 2
mag = 'magab_mag_auto_rSDSSB'
predictions_nominal['mag_bin'] = pd.cut(x = predictions_nominal[mag], bins = bins)
class_fails_all = predictions_nominal.groupby(['mag_bin'])['missed_class'].sum()
counts_all = predictions_nominal.groupby(['mag_bin'])['missed_class'].count()
failure_ratio_all = 100*class_fails_all/counts_all
cumulative_all = 100*class_fails_all.cumsum()/counts_all.sum()
plt.close('all')
for group_name, group in predictions_nominal.groupby(['mag_bin']):
if len(group) > 2:
# Lets make histograms with the residuals versus the magnitudes
predicted_class = np.argmax(group.loc[:, y_pred_nom_class].values, axis = 1)
true_class = np.argmax(group.loc[:, y_obs_class].values, axis = 1)
group['missed_class'] = predicted_class != true_class
class_fails = group.groupby(['mag_bin'])['missed_class'].sum()
counts = group.groupby(['mag_bin'])['missed_class'].count()
failure_ratio = 100*class_fails/counts
relative_failure_ratio = failure_ratio/failure_ratio.sum()
cumulative = 100*class_fails.cumsum()/counts.sum()
labels = [[clase.replace('CLASS_', '') for clase in y_obs_class][i] for i in list(set(true_class))]
plt.close('all')
fig, (ax1, ax2) = plt.subplots(1,2, figsize = (10, 4))
cm = confusion_matrix(true_class, predicted_class, normalize='true')
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels = labels)
disp.plot(ax = ax1)
add_inner_title(ax1, r'%s < %s $\leq$ %s'%(group_name.left, mag, group_name.right), 2, size={'size':'9'}, color=None, rotation=None)
add_inner_title(ax1, '%s objects'%len(group), 3, size={'size':'9'}, color=None, rotation=None)
err = ax2.bar(binscenters, failure_ratio_all, width = 1, edgecolor = 'k', label = 'Mag bin error [%]')
cum = ax2.step(bins[1::], cumulative_all, linestyle = '--', color = 'C3', linewidth = 2, label = 'Total cumulative error [%]')
ax3 = ax2.twinx()
no = ax3.step(bins[1::], counts_all, linestyle = ':', linewidth = 2, color = 'k', label = 'N. objects')
ax3.set_ylabel('N. objects')
ax2.bar(binscenters, failure_ratio, width = 1, edgecolor = 'r')
ax2.set_xlabel(r'%s [mag]'%mag)
ax2.set_ylabel(r'Classification error [%]')
ax2.legend(loc = 2)
ax2.grid()
ax2.set_ylim([0, ylim])
#add_inner_title(ax2, '%s objects'%len(group), 2, size=None, color=None, rotation=None)
plt.subplots_adjust(left = 0.1, right=0.9, wspace = 0.25, hspace = 0.5, bottom=0.15)
plt.savefig('%s/%s_class_ANN_%s_%s.png'%(output_directory, params['experiment_name'], mag, str(group_name).strip('(]').replace(', ', '_')), bbox_inches='tight')
except:
pass
# The collapsed version
try:
# Lets make histograms with the residuals versus the magnitudes
bins = np.arange(17, 24, 1.0)
binscenters = (bins[:-1] + bins[1:]) / 2
mag = 'magab_mag_auto_rSDSSB'
predictions_nominal['mag_bin'] = pd.cut(x = predictions_nominal[mag], bins = bins)
plt.close('all')
fig, axes = plt.subplots(2, 3, figsize = (10, 6), sharex = True, sharey = True)
plt.suptitle(r'Execution time %.1f'%(params['experiment']['execution_time']))
for ii, ((group_name, group), ax) in enumerate(zip(predictions_nominal.groupby(['mag_bin']), axes.flatten())):
if len(group) > 0:
# Lets make histograms with the residuals versus the magnitudes
predicted_class = np.argmax(group.loc[:, y_pred_nom_class].values, axis = 1)
true_class = np.argmax(group.loc[:, y_obs_class].values, axis = 1)
group['missed_class'] = predicted_class != true_class
labels = [[clase.replace('CLASS_', '') for clase in y_obs_class][i] for i in list(set(true_class))]
cm = confusion_matrix(true_class, predicted_class, normalize='true')
disp = ConfusionMatrixDisplay(confusion_matrix=cm, display_labels = labels)
disp.plot(ax = ax, colorbar = False, im_kw = {'vmin':0, 'vmax':1})
add_inner_title(ax, r'%s < %s $\leq$ %s'%(group_name.left, mag, group_name.right), 2, size={'size':'9'}, color=None, rotation=None)
#add_inner_title(ax, '%s objects'%len(group), 3, size={'size':'9'}, color=None, rotation=None)
if (ii)%3:
ax.set_ylabel('')
if ii <= 7:
ax.set_xlabel('')
plt.subplots_adjust(left = 0.1, right=0.9, wspace = 0.1, hspace = 0.1, bottom=0.1)
plt.savefig('%s/%s_class_ANN_permagbin.png'%(output_directory, params['experiment_name']), bbox_inches='tight')
except:
pass
try:
bins = np.arange(17, 26, 1.0)
binscenters = (bins[:-1] + bins[1:]) / 2
for residual_rel in residual_reg_abs_rel_list:
plt.close('all')
fig, axs = plt.subplots(5, 4, dpi=200, figsize = [10, 10], sharey = True, sharex = True)
for ii, (ax, mag) in enumerate(zip(axs.flatten(), mag_list)):
predictions_nominal['mag_bin'] = pd.cut(x = predictions_nominal[mag], bins = bins)
agregated_median = predictions_nominal.groupby(['mag_bin'])[residual_rel].median()
agregated_mean = predictions_nominal.groupby(['mag_bin'])[residual_rel].mean()
agregated_std = predictions_nominal.groupby(['mag_bin'])[residual_rel].std()
cumulative = (agregated_median.cumsum()/agregated_median.sum())
ax.bar(binscenters, agregated_median, width = 1, edgecolor = 'k')
ax.axvline(x = 25, color = 'r')
ax2 = ax.twinx()
if (ii+1)%4:
ax2.set_yticklabels([])
ax2.step(bins[1::], cumulative, linestyle = '--', color = 'C1', label = 'Cumulative')
ax2.tick_params(axis='y', labelcolor='C1')
ax.set_xlabel(r'%s [mag]'%mag)
ax.set_xlim([binscenters[0], binscenters[-1]])
ax.set_ylim([0, 0.25])
ax2.set_ylim([0,1])
ax.grid()
plt.subplots_adjust(left = 0.1, right=0.9, wspace = 0.25, hspace = 0.5, bottom=0.15)
ax_o = fig.add_subplot(111, frameon=False)
# hide tick and tick label of the big axis
ax_o.tick_params(labelcolor='none', which='both', top=False, bottom=False, left=False, right=False)
ax_o.set_ylabel(r'$\Delta(z)/(1+z)$')
ax3 = ax_o.twinx()
ax3.spines["left"].set_visible(False)
ax3.spines["bottom"].set_visible(False)
ax3.spines["right"].set_visible(False)
ax3.spines["top"].set_visible(False)
ax3.tick_params(labelcolor='none', which='both', top=False, bottom=False, left=False, right=False)
ax3.set_ylabel('cumulative', color='C1') # we already handled the x-label with ax1
plt.savefig('%s/%s_regression_relerr_%s.png'%(output_directory, params['experiment_name'], residual_rel), bbox_inches='tight')
except:
pass
for obs, pred_nom, pred_var in zip(y_obs, y_pred_nom_regres, y_pred_var_regres):
plt.close('all')
variance = (predictions_nominal[obs] - predictions_nominal[pred_nom].values) / predictions_nominal[pred_var].values
clipped_variance = np.ma.filled(sigma_clip(variance, sigma=6, maxiters=None))
mu = np.nanmean(clipped_variance)
m = np.nanmedian(clipped_variance)
std = mad_std(clipped_variance)
fig, ax = plt.subplots(figsize=(5.5, 5.5), dpi=300)
ax.hist(variance, 50, density=True, range = [-6, 6])
xmin, xmax = ax.get_xlim()
x = np.linspace(xmin, xmax, 200)
std = mad_std(variance)
print(m, std)
p = norm.pdf(x, m, std)
ax.plot(x, p, 'k', linewidth=1.5)
add_inner_title(ax, '$\mu=%.2f, \sigma=%.2f$'%(m, std), 2, size=None, color=None, rotation=None)
plt.savefig('%s/%s_%s_distro_errors.png'%(output_directory, params['experiment_name'], obs), bbox_inches='tight')
for ii, (pdf, idx) in enumerate(zip(pdfs, params['variables_n_preproc']['saved_pdfs_indexes'])):
try:
plt.close('all')
fig, axs = plt.subplots(1, len(y_obs_regres) + 1, dpi=300, figsize = [14, 3])
pred_data = predictions_nominal.iloc[idx, :]
for (ax, obs, pred_nom, pred_var, pred_nom_pdf) in zip(axs[0:-1], y_obs_regres, y_pred_nom_regres, y_pred_var_regres, y_pred_nom_pdf, ):
ax.hist(pdf[pred_nom_pdf], 50, histtype = 'step')
ax.axvline(pred_data[pred_nom], color = 'C0', linestyle = '-', linewidth = 0.8)
ax.axvspan(pred_data[pred_nom] - pred_data[pred_var], pred_data[pred_nom] + pred_data[pred_var], color = 'C0', alpha = 0.2)
ax.axvline(pred_data[obs], color = 'r', linestyle = '--', linewidth = 0.8, zorder = 3)
ax.set_xlabel(obs)
ax.set_yticklabels([])
axs[-1].hist(pdf.loc[:, y_pred_nom_pdf_class], 50, range = [-0.1, 1.1], histtype = 'step', label = y_obs_class)
axs[-1].legend()
plt.savefig('%s/%s_PDF_%i.png'%(output_directory, params['experiment_name'], idx), bbox_inches='tight')
except:
pass
# The photo Z per mag bin
try:
# Lets make histograms with the residuals versus the magnitudes
bins = np.arange(13, 26, 1.0)
binscenters = (bins[:-1] + bins[1:]) / 2
mag = 'magab_mag_auto_rSDSSB'
predictions_nominal['mag_bin'] = pd.cut(x = predictions_nominal[mag], bins = bins)
for obs_regres, obs_err_regres, pred_nom_regres, pred_var_regres in zip(y_obs_regres, y_obs_err_regres, y_pred_nom_regres, y_pred_var_regres):
xlims = predictions_nominal[obs_regres].quantile([0.01, 0.99]).values
ylims = xlims
zlims = predictions_nominal[pred_var_regres].quantile([0.01, 0.99]).values
limsy = [-0.3, 0.3]
thresh = 2
plt.close('all')
fig, axes = plt.subplots(3, 4, figsize = (12, 10), sharex = True, sharey = True)
for ii, ((group_name, group), ax) in enumerate(zip(predictions_nominal.groupby(['mag_bin']), axes.flatten())):
if len(group) > 0:
# Lets make histograms with the residuals versus the magnitudes
locx, locy, hh, hh_filt, hh_d, hh_d_filt, xcenters, ycenters, lwdx, lwdy, lwd_d = plot_density(group[obs_regres], group[pred_nom_regres], zdata = group[pred_var_regres], xyrange = [xlims, ylims], thresh = thresh, bins = [100, 100])
X, Y = np.meshgrid(locx, locy)
ax.pcolormesh(X, Y, hh_d.T)
contour_levels = np.linspace(np.log10( max( [np.nanmin(hh_filt), thresh] )), max( [np.nanmax(np.log10(hh_filt))*0.99, thresh + 1]), 10)
cs = ax.contour(xcenters, ycenters, np.log10(hh_filt.T), colors = 'w', linestyles ='-', linewidths = 0.85, levels = contour_levels, zorder = 3)
cb = ax.scatter(lwdx, lwdy, c = lwd_d, s = 1, alpha = 0.75, vmin = zlims[0], vmax = zlims[1])
ax.plot(xlims, xlims, color = 'r', linewidth = 0.8)
ax.set_ylim(ylims)
ax.set_xlim(xlims)
ax.grid()
ax.set_xlabel(obs_regres)
ax.set_ylabel(pred_nom_regres.replace('_nom', '').replace('_median', ''))
add_inner_title(ax, r'%s < %s $\leq$ %s'%(group_name.left, mag, group_name.right), 2, size={'size':'9'}, color=None, rotation=None)
add_inner_title(ax, '%s objects'%len(group), 3, size={'size':'9'}, color=None, rotation=None)
if (ii)%4:
ax.set_ylabel('')
if ii <= 7:
ax.set_xlabel('')
plt.subplots_adjust(left = 0.1, right=0.9, wspace = 0.1, hspace = 0.1, bottom=0.1)
plt.savefig('%s/%s_photo_%s_permagbin.png'%(output_directory, params['experiment_name'], obs_regres), bbox_inches='tight')
except:
pass
for obs_regres, obs_err_regres, pred_nom_regres, pred_var_regres in zip(y_obs_regres, y_obs_err_regres, y_pred_nom_regres, y_pred_var_regres):
if 'Z' in obs_regres:
xlims = predictions_nominal[obs_regres].quantile([0.01, 0.99]).values
ylims = xlims
else:
xlims = predictions_nominal[obs_regres].quantile([0.01, 0.99]).values
ylims = xlims
zlims = predictions_nominal[pred_var_regres].quantile([0.01, 0.99]).values
limsy = [-0.3, 0.3]
thresh = 5
locx, locy, hh, hh_filt, hh_d, hh_d_filt, xcenters, ycenters, lwdx, lwdy, lwd_d = plot_density(predictions_nominal[obs_regres], predictions_nominal[pred_nom_regres], zdata = predictions_nominal[pred_var_regres], xyrange = [xlims, ylims], thresh = thresh, bins = [100, 100])
plt.close('all')
fig, (ax1, ax2) = plt.subplots(2,1, gridspec_kw={'height_ratios': [2, 1]}, sharex = True, figsize = (5, 8))
X, Y = np.meshgrid(locx, locy)
ax1.pcolormesh(X, Y, hh_d.T)
contour_levels = np.linspace(np.log10( max( [np.nanmin(hh_filt), thresh] )), max( [np.nanmax(np.log10(hh_filt))*0.99, thresh + 1]), 10)
cs = ax1.contour(xcenters, ycenters, np.log10(hh_filt.T), colors = 'w', linestyles ='-', linewidths = 0.85, levels = contour_levels, zorder = 3)
cb = ax1.scatter(lwdx, lwdy, c = lwd_d, s = 1, alpha = 0.75, vmin = zlims[0], vmax = zlims[1])
ax1.plot(xlims, xlims, color = 'r', linewidth = 0.8)
ax1.set_ylim(ylims)
ax1.set_xlim(xlims)
ax1.grid()
ax1.set_ylabel(obs_regres)
# Colorbar
cbar = fig.colorbar(cb, ax=ax1, aspect = 15, pad = 0.03)
cbar.set_label(pred_var_regres, labelpad = +0.5)
if 'Z' in obs_regres:
residuals_y = (predictions_nominal[pred_nom_regres].values-predictions_nominal[obs_regres].values)/(1+predictions_nominal[obs_regres].values)
residuals_x = predictions_nominal[obs_regres]
else:
residuals_x = (predictions_nominal[obs_regres]+predictions_nominal[pred_nom_regres]) / np.sqrt(2)
residuals_y = (predictions_nominal[pred_nom_regres]-predictions_nominal[obs_regres]) / np.sqrt(2)
stats = sigma_clipped_stats(residuals_y, sigma=6.0, maxiters=None)
locx, locy, hh, hh_filt, hh_d, hh_d_filt, xcenters, ycenters, lwdx, lwdy, lwd_d = plot_density(residuals_x, residuals_y, zdata = predictions_nominal[pred_var_regres], xyrange = [xlims, limsy], thresh = thresh, bins = [100, 100])
X, Y = np.meshgrid(locx, locy)
ax2.pcolormesh(X, Y, hh_d.T)
cs = ax2.contour(xcenters, ycenters, np.log10(hh_filt.T), colors = 'w', linestyles ='-', linewidths = 0.85, levels = contour_levels, zorder = 3)
cb = ax2.scatter(lwdx, lwdy, c = lwd_d, s = 1, alpha = 0.75, vmin = zlims[0], vmax = zlims[1])
ax2.set_xlabel(obs_regres)
ax2.set_xlim(xlims)
ax2.axhline(y=0, color = 'r', linewidth = 0.8)
ax2.grid()
if 'Z' in obs_regres:
ax2.set_ylabel(r'$\Delta(z)/(1+z)$')
else:
ax2.set_ylabel(r'$\Delta$(%s)'%obs_regres)
divider = make_axes_locatable(ax2)
# below height and pad are in inches
ax_histy = divider.append_axes("right", 0.9, pad=0.2, sharey=ax2)
ax_histy.yaxis.set_tick_params(labelleft=False)
ax_histy.hist(residuals_y, bins=100, orientation='horizontal', range = limsy)
ax_histy.grid()
add_inner_title(ax_histy, '$\mu=%.2f$\n$m=%.2f$\n$\sigma=%.2f$'%(stats), 2, size=None, color=None, rotation=None)
plt.subplots_adjust(left = 0.1, right=0.9, wspace = 0.15, hspace = 0.15, bottom=0.15)
plt.savefig('%s/%s_photo_%s.png'%(output_directory, params['experiment_name'], obs_regres), bbox_inches='tight')
# We rotate the predictions_nominal to check the dispersion
predictions_nominal['resx_%s'%obs_regres] = (predictions_nominal[obs_regres]+predictions_nominal[pred_nom_regres]) / np.sqrt(2)
predictions_nominal['resy_%s'%obs_regres] = (predictions_nominal[pred_nom_regres]-predictions_nominal[obs_regres]) / np.sqrt(2)
predictions_nominal['res_%s_erru'%obs_regres] = np.sqrt(predictions_nominal[obs_err_regres]**2+predictions_nominal[pred_var_regres]**2)
predictions_nominal['res_%s_errd'%obs_regres] = np.sqrt(predictions_nominal[obs_err_regres]**2+predictions_nominal[pred_var_regres]**2)
limsx = np.nanpercentile(predictions_nominal['resx_%s'%obs_regres], [0.1, 99.9], axis=0)
plt.close('all')
above = predictions_nominal['resy_%s'%obs_regres] > 0
yabove = predictions_nominal.loc[above, 'resy_%s'%obs_regres] - predictions_nominal.loc[above, 'res_%s_errd'%obs_regres]
ybelow = predictions_nominal.loc[~above, 'resy_%s'%obs_regres] + predictions_nominal.loc[~above, 'res_%s_erru'%obs_regres]
stats_above = sigma_clipped_stats(yabove, sigma=6.0, maxiters=None)
stats_below = sigma_clipped_stats(ybelow, sigma=6.0, maxiters=None)
fig, ax = plt.subplots(figsize=(5.5, 5.5), dpi=300)
ax.plot(predictions_nominal.loc[above, 'resx_%s'%obs_regres], yabove, '.', ms = 1, alpha = 0.5, label = 'above')
ax.plot(predictions_nominal.loc[~above, 'resx_%s'%obs_regres], ybelow, '.', ms = 1, alpha = 0.5, label = 'below')
ax.axhline(0, color='C3')
ax.set_xlim(limsx)
ax.set_ylim(limsy)
ax.legend()
add_inner_title(ax, '$\mu_{above}=%.2f, m_{above}=%.2f, \sigma_{above}=%.2f$\n$\mu_{below}=%.2f, m_{below}=%.2f, \sigma_{below}=%.2f$'%(stats_above[0], stats_above[1], stats_above[2], stats_below[0], stats_below[1], stats_below[2]), 2, size=None, color=None, rotation=None)
plt.savefig('%s/%s_photo_%s_errors.png'%(output_directory, params['experiment_name'], obs_regres))
|
AndresdPMREPO_NAMEBANNJOSPATH_START.@BANNJOS_extracted@BANNJOS-main@libraries.py@.PATH_END.py
|
{
"filename": "quantization.md",
"repo_name": "huggingface/peft",
"repo_path": "peft_extracted/peft-main/docs/source/developer_guides/quantization.md",
"type": "Markdown"
}
|
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
⚠️ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Quantization
Quantization represents data with fewer bits, making it a useful technique for reducing memory-usage and accelerating inference especially when it comes to large language models (LLMs). There are several ways to quantize a model including:
* optimizing which model weights are quantized with the [AWQ](https://hf.co/papers/2306.00978) algorithm
* independently quantizing each row of a weight matrix with the [GPTQ](https://hf.co/papers/2210.17323) algorithm
* quantizing to 8-bit and 4-bit precision with the [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) library
* quantizing to as low as 2-bit precision with the [AQLM](https://arxiv.org/abs/2401.06118) algorithm
However, after a model is quantized it isn't typically further trained for downstream tasks because training can be unstable due to the lower precision of the weights and activations. But since PEFT methods only add *extra* trainable parameters, this allows you to train a quantized model with a PEFT adapter on top! Combining quantization with PEFT can be a good strategy for training even the largest models on a single GPU. For example, [QLoRA](https://hf.co/papers/2305.14314) is a method that quantizes a model to 4-bits and then trains it with LoRA. This method allows you to finetune a 65B parameter model on a single 48GB GPU!
In this guide, you'll see how to quantize a model to 4-bits and train it with LoRA.
## Quantize a model
[bitsandbytes](https://github.com/TimDettmers/bitsandbytes) is a quantization library with a Transformers integration. With this integration, you can quantize a model to 8 or 4-bits and enable many other options by configuring the [`~transformers.BitsAndBytesConfig`] class. For example, you can:
* set `load_in_4bit=True` to quantize the model to 4-bits when you load it
* set `bnb_4bit_quant_type="nf4"` to use a special 4-bit data type for weights initialized from a normal distribution
* set `bnb_4bit_use_double_quant=True` to use a nested quantization scheme to quantize the already quantized weights
* set `bnb_4bit_compute_dtype=torch.bfloat16` to use bfloat16 for faster computation
```py
import torch
from transformers import BitsAndBytesConfig
config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_use_double_quant=True,
bnb_4bit_compute_dtype=torch.bfloat16,
)
```
Pass the `config` to the [`~transformers.AutoModelForCausalLM.from_pretrained`] method.
```py
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", quantization_config=config)
```
Next, you should call the [`~peft.utils.prepare_model_for_kbit_training`] function to preprocess the quantized model for training.
```py
from peft import prepare_model_for_kbit_training
model = prepare_model_for_kbit_training(model)
```
Now that the quantized model is ready, let's set up a configuration.
## LoraConfig
Create a [`LoraConfig`] with the following parameters (or choose your own):
```py
from peft import LoraConfig
config = LoraConfig(
r=16,
lora_alpha=8,
target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM"
)
```
Then use the [`get_peft_model`] function to create a [`PeftModel`] from the quantized model and configuration.
```py
from peft import get_peft_model
model = get_peft_model(model, config)
```
You're all set for training with whichever training method you prefer!
### LoftQ initialization
[LoftQ](https://hf.co/papers/2310.08659) initializes LoRA weights such that the quantization error is minimized, and it can improve performance when training quantized models. To get started, follow [these instructions](https://github.com/huggingface/peft/tree/main/examples/loftq_finetuning).
In general, for LoftQ to work best, it is recommended to target as many layers with LoRA as possible, since those not targeted cannot have LoftQ applied. This means that passing `LoraConfig(..., target_modules="all-linear")` will most likely give the best results. Also, you should use `nf4` as quant type in your quantization config when using 4bit quantization, i.e. `BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type="nf4")`.
### QLoRA-style training
QLoRA adds trainable weights to all the linear layers in the transformer architecture. Since the attribute names for these linear layers can vary across architectures, set `target_modules` to `"all-linear"` to add LoRA to all the linear layers:
```py
config = LoraConfig(target_modules="all-linear", ...)
```
## AQLM quantization
Additive Quantization of Language Models ([AQLM](https://arxiv.org/abs/2401.06118)) is a Large Language Models compression method. It quantizes multiple weights together and takes advantage of interdependencies between them. AQLM represents groups of 8-16 weights as a sum of multiple vector codes. This allows it to compress models down to as low as 2-bit with considerably low accuracy losses.
Since the AQLM quantization process is computationally expensive, a use of prequantized models is recommended. A partial list of available models can be found in the official aqlm [repository](https://github.com/Vahe1994/AQLM).
The models support LoRA adapter tuning. To tune the quantized model you'll need to install the `aqlm` inference library: `pip install aqlm>=1.0.2`. Finetuned LoRA adapters shall be saved separately, as merging them with AQLM quantized weights is not possible.
```py
quantized_model = AutoModelForCausalLM.from_pretrained(
"BlackSamorez/Mixtral-8x7b-AQLM-2Bit-1x16-hf-test-dispatch",
torch_dtype="auto", device_map="auto", low_cpu_mem_usage=True,
)
peft_config = LoraConfig(...)
quantized_model = get_peft_model(quantized_model, peft_config)
```
You can refer to the [Google Colab](https://colab.research.google.com/drive/12GTp1FCj5_0SnnNQH18h_2XFh9vS_guX?usp=sharing) example for an overview of AQLM+LoRA finetuning.
## EETQ quantization
You can also perform LoRA fine-tuning on EETQ quantized models. [EETQ](https://github.com/NetEase-FuXi/EETQ) package offers simple and efficient way to perform 8-bit quantization, which is claimed to be faster than the `LLM.int8()` algorithm. First, make sure that you have a transformers version that is compatible with EETQ (e.g. by installing it from latest pypi or from source).
```py
import torch
from transformers import EetqConfig
config = EetqConfig("int8")
```
Pass the `config` to the [`~transformers.AutoModelForCausalLM.from_pretrained`] method.
```py
from transformers import AutoModelForCausalLM
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1", quantization_config=config)
```
and create a `LoraConfig` and pass it to `get_peft_model`:
```py
from peft import LoraConfig, get_peft_model
config = LoraConfig(
r=16,
lora_alpha=8,
target_modules=["q_proj", "k_proj", "v_proj", "o_proj"],
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM"
)
model = get_peft_model(model, config)
```
## HQQ quantization
The models that is quantized using Half-Quadratic Quantization of Large Machine Learning Models ([HQQ](https://mobiusml.github.io/hqq_blog/)) support LoRA adapter tuning. To tune the quantized model, you'll need to install the `hqq` library with: `pip install hqq`.
```python
from hqq.engine.hf import HQQModelForCausalLM
quantized_model = HQQModelForCausalLM.from_quantized(save_dir_or_hfhub, device='cuda')
peft_config = LoraConfig(...)
quantized_model = get_peft_model(quantized_model, peft_config)
```
Or using transformers version that is compatible with HQQ (e.g. by installing it from latest pypi or from source).
```python
from transformers import HqqConfig, AutoModelForCausalLM
quant_config = HqqConfig(nbits=4, group_size=64)
quantized_model = AutoModelForCausalLM.from_pretrained(save_dir_or_hfhub, device_map=device_map, quantization_config=quant_config)
peft_config = LoraConfig(...)
quantized_model = get_peft_model(quantized_model, peft_config)
```
## torchao (PyTorch Architecture Optimization)
PEFT supports models quantized with [torchao](https://github.com/pytorch/ao) ("ao") for int8 quantization.
```python
from peft import LoraConfig, get_peft_model
from transformers import AutoModelForCausalLM, TorchAoConfig
model_id = ...
quantization_config = TorchAoConfig(quant_type="int8_weight_only")
base_model = AutoModelForCausalLM.from_pretrained(model_id, quantization_config=quantization_config)
peft_config = LoraConfig(...)
model = get_peft_model(base_model, peft_config)
```
### Caveats:
- Use the most recent versions of torchao (>= v0.4.0) and transformers (> 4.42).
- Only linear layers are currently supported.
- `quant_type = "int4_weight_only"` is currently not supported.
- `NF4` is not implemented in transformers as of yet and is thus also not supported.
- DoRA only works with `quant_type = "int8_weight_only"` at the moment.
- There is explicit support for torchao when used with LoRA. However, when torchao quantizes a layer, its class does not change, only the type of the underlying tensor. For this reason, PEFT methods other than LoRA will generally also work with torchao, even if not explicitly supported. Be aware, however, that **merging only works correctly with LoRA and with `quant_type = "int8_weight_only"`**. If you use a different PEFT method or dtype, merging will likely result in an error, and even it doesn't, the results will still be incorrect.
## Other Supported PEFT Methods
Besides LoRA, the following PEFT methods also support quantization:
- **VeRA** (supports bitsandbytes quantization)
- **AdaLoRA** (supports both bitsandbytes and GPTQ quantization)
- **(IA)³** (supports bitsandbytes quantization)
## Next steps
If you're interested in learning more about quantization, the following may be helpful:
* Learn more details about QLoRA and check out some benchmarks on its impact in the [Making LLMs even more accessible with bitsandbytes, 4-bit quantization and QLoRA](https://huggingface.co/blog/4bit-transformers-bitsandbytes) blog post.
* Read more about different quantization schemes in the Transformers [Quantization](https://hf.co/docs/transformers/main/quantization) guide.
|
huggingfaceREPO_NAMEpeftPATH_START.@peft_extracted@peft-main@docs@source@developer_guides@quantization.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/funnelarea/hoverlabel/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._namelengthsrc import NamelengthsrcValidator
from ._namelength import NamelengthValidator
from ._font import FontValidator
from ._bordercolorsrc import BordercolorsrcValidator
from ._bordercolor import BordercolorValidator
from ._bgcolorsrc import BgcolorsrcValidator
from ._bgcolor import BgcolorValidator
from ._alignsrc import AlignsrcValidator
from ._align import AlignValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._namelengthsrc.NamelengthsrcValidator",
"._namelength.NamelengthValidator",
"._font.FontValidator",
"._bordercolorsrc.BordercolorsrcValidator",
"._bordercolor.BordercolorValidator",
"._bgcolorsrc.BgcolorsrcValidator",
"._bgcolor.BgcolorValidator",
"._alignsrc.AlignsrcValidator",
"._align.AlignValidator",
],
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@funnelarea@hoverlabel@__init__.py@.PATH_END.py
|
{
"filename": "proc_characterize.py",
"repo_name": "simonsobs/sotodlib",
"repo_path": "sotodlib_extracted/sotodlib-master/sotodlib/toast/workflows/proc_characterize.py",
"type": "Python"
}
|
# Copyright (c) 2023-2023 Simons Observatory.
# Full license can be found in the top level "LICENSE" file.
"""Timestream processing filters.
"""
import numpy as np
from astropy import units as u
import toast
import toast.ops
from .. import ops as so_ops
from .job import workflow_timer
def setup_raw_statistics(operators):
"""Add commandline args and operators for raw timestream statistics.
Args:
operators (list): The list of operators to extend.
Returns:
None
"""
operators.append(toast.ops.Statistics(name="raw_statistics", enabled=False))
@workflow_timer
def raw_statistics(job, otherargs, runargs, data):
"""Compute timestream statistics on the raw data.
Args:
job (namespace): The configured operators and templates for this job.
otherargs (namespace): Other commandline arguments.
runargs (namespace): Job related runtime parameters.
data (Data): The data container.
Returns:
None
"""
# Configured operators for this job
job_ops = job.operators
if job_ops.raw_statistics.enabled:
job_ops.raw_statistics.output_dir = otherargs.out_dir
job_ops.raw_statistics.apply(data)
def setup_filtered_statistics(operators):
"""Add commandline args and operators for filtered timestream statistics.
Args:
operators (list): The list of operators to extend.
Returns:
None
"""
operators.append(toast.ops.Statistics(name="filtered_statistics", enabled=False))
@workflow_timer
def filtered_statistics(job, otherargs, runargs, data):
"""Compute timestream statistics on the filtered data.
Args:
job (namespace): The configured operators and templates for this job.
otherargs (namespace): Other commandline arguments.
runargs (namespace): Job related runtime parameters.
data (Data): The data container.
Returns:
None
"""
# Configured operators for this job
job_ops = job.operators
if job_ops.filtered_statistics.enabled:
job_ops.filtered_statistics.output_dir = otherargs.out_dir
job_ops.filtered_statistics.apply(data)
def setup_hn_map(operators):
"""Add commandline args and operators for H_n map.
Args:
operators (list): The list of operators to extend.
Returns:
None
"""
operators.append(so_ops.Hn(name="h_n", enabled=False))
@workflow_timer
def hn_map(job, otherargs, runargs, data):
"""Compute the H_n map.
Args:
job (namespace): The configured operators and templates for this job.
otherargs (namespace): Other commandline arguments.
runargs (namespace): Job related runtime parameters.
data (Data): The data container.
Returns:
None
"""
# Configured operators for this job
job_ops = job.operators
if job_ops.h_n.enabled:
job_ops.h_n.pixel_pointing = job.pixels_final
job_ops.h_n.pixel_dist = job_ops.binner_final.pixel_dist
job_ops.h_n.output_dir = otherargs.out_dir
job_ops.h_n.save_pointing = otherargs.full_pointing
job_ops.h_n.apply(data)
def setup_cadence_map(operators):
"""Add commandline args and operators for the cadence map.
Args:
operators (list): The list of operators to extend.
Returns:
None
"""
operators.append(toast.ops.CadenceMap(name="cadence_map", enabled=False))
@workflow_timer
def cadence_map(job, otherargs, runargs, data):
"""Compute the cadence map.
Args:
job (namespace): The configured operators and templates for this job.
otherargs (namespace): Other commandline arguments.
runargs (namespace): Job related runtime parameters.
data (Data): The data container.
Returns:
None
"""
# Configured operators for this job
job_ops = job.operators
if job_ops.cadence_map.enabled:
job_ops.cadence_map.pixel_pointing = job.pixels_final
job_ops.cadence_map.pixel_dist = job_ops.binner_final.pixel_dist
job_ops.cadence_map.output_dir = otherargs.out_dir
job_ops.cadence_map.save_pointing = otherargs.full_pointing
job_ops.cadence_map.apply(data)
def setup_crosslinking_map(operators):
"""Add commandline args and operators for the crosslinking map.
Args:
operators (list): The list of operators to extend.
Returns:
None
"""
operators.append(toast.ops.CrossLinking(name="crosslinking", enabled=False))
@workflow_timer
def crosslinking_map(job, otherargs, runargs, data):
"""Compute the crosslinking map.
Args:
job (namespace): The configured operators and templates for this job.
otherargs (namespace): Other commandline arguments.
runargs (namespace): Job related runtime parameters.
data (Data): The data container.
Returns:
None
"""
# Configured operators for this job
job_ops = job.operators
if job_ops.crosslinking.enabled:
job_ops.crosslinking.pixel_pointing = job.pixels_final
job_ops.crosslinking.pixel_dist = job_ops.binner_final.pixel_dist
job_ops.crosslinking.output_dir = otherargs.out_dir
job_ops.crosslinking.save_pointing = otherargs.full_pointing
job_ops.crosslinking.apply(data)
|
simonsobsREPO_NAMEsotodlibPATH_START.@sotodlib_extracted@sotodlib-master@sotodlib@toast@workflows@proc_characterize.py@.PATH_END.py
|
{
"filename": "_xpad.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/volume/colorbar/_xpad.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XpadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="xpad", parent_name="volume.colorbar", **kwargs):
super(XpadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@volume@colorbar@_xpad.py@.PATH_END.py
|
{
"filename": "_shadow.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/sankey/link/hoverlabel/font/_shadow.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShadowValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="shadow", parent_name="sankey.link.hoverlabel.font", **kwargs
):
super(ShadowValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@sankey@link@hoverlabel@font@_shadow.py@.PATH_END.py
|
{
"filename": "mcsed.py",
"repo_name": "wpb-astro/MCSED",
"repo_path": "MCSED_extracted/MCSED-master/mcsed.py",
"type": "Python"
}
|
""" SED fitting class using emcee for parameter estimation
.. moduleauthor:: Greg Zeimann <gregz@astro.as.utexas.edu>
"""
import logging
import sfh
import dust_abs
import dust_emission
import metallicity
import cosmology
import emcee
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import corner
import time
from scipy.integrate import simps
from scipy.interpolate import interp1d
from astropy.constants import c as clight
import numpy as np
from astropy.table import Table, vstack
plt.ioff()
import seaborn as sns
sns.set_context("talk") # options include: talk, poster, paper
sns.set_style("ticks")
sns.set_style({"xtick.direction": "in","ytick.direction": "in",
"xtick.top":True, "ytick.right":True,
"xtick.major.size":16, "xtick.minor.size":4,
"ytick.major.size":16, "ytick.minor.size":4,
})
class Mcsed:
def __init__(self, filter_matrix, wave, ssp_ages, ssp_met,
ssp_starspectra, ssp_nebspectra, emlinewave, ssp_emlineflux,
sfh_class, dust_abs_class, dust_em_class, met_class=None,
nfreeparams=None, t_birth=None,
starSSP=None, nebSSP=None, emlinefluxSSP=None,
data_fnu=None, data_fnu_e=None,
data_emline=None, data_emline_e=None, emline_dict=None,
use_emline_flux=None, linefluxCSPdict=None,
data_absindx=None, data_absindx_e=None, absindx_dict=None,
use_absorption_indx=None, absindxCSPdict=None,
fluxwv=None, fluxfn=None, medianspec=None, spectrum=None,
medianstarspec=None, starspectrum=None,
mediannebspec=None, nebspectrum=None,
redshift=None, Dl=None, filter_flag=None,
input_params=None, true_fnu=None, true_spectrum=None,
true_starspectrum=None, true_nebspectrum=None,
sigma_m=0.1, nwalkers=40, nsteps=1000,
progress_bar=False, force_emcee_finish=True, burnin_fraction=0.25,
chi2=None, tauISM_lam=None, tauIGM_lam=None):
''' Initialize the Mcsed class.
Init
----
filter_matrix : numpy array (2 dim)
The filter_matrix has rows of wavelength and columns for each
filter (can be much larger than the filters used for fitting)
wave : numpy array (1 dim)
wavelength for SSP models and all model spectra
ssp_ages : numpy array (1 dim)
ages of the SSP models
ssp_met : numpy array (1 dim)
metallicities of the SSP models
assume a grid of values Z, where Z_solar = 0.019
ssp_starspectra : numpy array (3 dim)
single stellar population spectrum (stellar component) for each
age in ssp_ages and each metallicity in ssp_met
ssp_nebspectra : numpy array (3 dim)
single stellar population spectrum (nebular component) for each
age in ssp_ages and each metallicity in ssp_met
(must have same shape as ssp_starspectra)
if None, assume stars+gas are combined in ssp_starspectra
emlinewave : numpy array (1 dim)
Rest-frame wavelengths of requested emission lines (emline_dict)
Corresponds to ssp_emline
ssp_emlineflux : numpy array (3 dim)
Emission line SSP grid spanning emlinewave, age, metallicity
Only includes requested emission lines (from emline_dict)
Only used for calculating model emission line strengths
Spectral units are ergs / s / cm2 at 10 pc
sfh_class : str
Converted from str to class in initialization
This is the input class for sfh. Each class has a common attribute
which is "sfh_class.get_nparams()" for organizing the total model_params.
Also, each class has a key function, sfh_class.evaluate(t), with
the input of time in units of Gyrs
dust_abs_class : str
Converted from str to class in initialization
This is the input class for dust absorption.
dust_em_class : str
Converted from str to class in initialization
This is the input class for dust emission.
met_class : str
Converted from str to class in initialization
This is the input class for stellar metallicity
nfreeparams : int
number of free model parameters
t_birth : float
Age of the birth cloud in Gyr
set from the value provided in config.py
starSSP : numpy array (2 dim)
Grid of stellar SSP spectra at current guess of stellar metallicity
(set from ssp_starspectra)
nebSSP : numpy array (2 dim)
Grid of nebular SSP spectra at current guess of stellar metallicity
(set from ssp_nebspectra)
emlinefluxSSP : numpy array (2 dim)
Grid of emission line fluxes at each age in the SSP grid
(set from ssp_emlineflux)
data_fnu : numpy array (1 dim)
Photometry for data. Length = (filter_flag == True).sum()
data_fnu_e : numpy array (1 dim)
Photometric errors for data
data_emline : Astropy Table (1 dim)
Emission line fluxes in units ergs / cm2 / s
data_emline_e : Astropy Table (1 dim)
Emission line errors in units ergs / cm2 / s
emline_dict : dictionary
Keys are emission line names (str)
Values are a two-element tuple:
(rest-frame wavelength in Angstroms (float), weight (float))
emline_list_dict defined in config.py, containing only the
emission lines that were also provided in the input file
(i.e., only the measurements that will be used to constrain the model)
use_emline_flux : bool
If emline_dict contains emission lines, set to True. Else, False
linefluxCSPdict : dict
Emission-line fluxes for current SED model
data_absindx : Astropy Table (1 dim)
Absorption line indices
data_absindx_e : Astropy Table (1 dim)
Absorption line index errors
absindx_dict : dict
absorption_index_dict defined in config.py, containing only
measurements that were also provided in the input file
(i.e., only the measurements that will be used to constrain the model)
use_absorption_indx : bool
True, if index measurements were included in the input file and
should be used in the model selection
absindxCSPdict : dict
Absorption line index measurements for current SED model
fluxwv : numpy array (1 dim)
wavelengths of photometric filters
fluxfn : numpy array (1 dim)
flux densities of modeled photometry
medianspec : numpy array (1 dim)
best-fit SED model (same length as self.wave)
set after fitting the model
spectrum : numpy array (1 dim)
current SED model (same length as self.wave)
medianstarspec : numpy array (1 dim)
best-fit stellar SED model (same length as self.wave)
set after fitting the model
starspectrum : numpy array (1 dim)
current stellar SED model (same length as self.wave)
mediannebspec : numpy array (1 dim)
best-fit nebular SED model (same length as self.wave)
set after fitting the model
nebspectrum : numpy array (1 dim)
current nebular SED model (same length as self.wave)
redshift : float
Redshift of the source
Dl : float
Luminosity distance of the galaxy (in units of 10 pc)
filter_flag : numpy array (1 dim)
Length = filter_matrix.shape[1], True for filters matching data
input_params : list
input parameters for modeling. Intended for testing fitting
procedure.
true_fnu : numpy array (1 dim)
True photometry for test mode. Length = (filter_flag == True).sum()
true_spectrum : numpy array (1 dim)
truth model spectrum in test model (realized from input_params)
true_starspectrum : numpy array (1 dim)
truth model stellar spectrum in test model (realized from input_params)
true_nebspectrum : numpy array (1 dim)
truth model nebular spectrum in test model (realized from input_params)
sigma_m : float
Fractional error expected from the models. This is used in
the log likelihood calculation. No model is perfect, and this is
more or less a fixed parameter to encapsulate that.
nwalkers : int
The number of walkers for emcee when fitting a model
nsteps : int
The number of steps each walker will make when fitting a model
progress_bar : bool
Show the progress of the fit in the terminal
force_emcee_finish : bool
Force emcee to finish even if it does not meet the formal
criteria for convergence
burnin_fraction : float
Fraction of nsteps to count as burnin steps, if force_emcee_finish=True
chi2 : dict
keys: 'dof', 'chi2', 'rchi2'
Track the degrees of freedom (accounting for data and model parameters)
and the chi2 and reduced chi2 of the current fit
tauISM_lam : numpy array (1 dim)
Array of effective optical depths as function of wavelength
for MW dust correction
tauIGM_lam : numpy array (1 dim)
Array of effective optical depths as function of wavelength
for IGM gas correction
full_chains : numpy array (3 dim)
Array containing the full posterior chains. The three axes are
walker, step, parameter
burn_in : int
Number of steps in the burn-in phase
'''
# Initialize all argument inputs
self.filter_matrix = filter_matrix
self.wave = wave
self.ssp_ages = ssp_ages
self.ssp_met = ssp_met
self.ssp_starspectra = ssp_starspectra
self.ssp_nebspectra = ssp_nebspectra
self.emlinewave = emlinewave
self.ssp_emlineflux = ssp_emlineflux
self.dnu = np.abs(np.hstack([0., np.diff(2.99792e18 / self.wave)]))
self.sfh_class = getattr(sfh, sfh_class)()
self.dust_abs_class = getattr(dust_abs, dust_abs_class)()
self.dust_em_class = getattr(dust_emission, dust_em_class)()
self.met_class = getattr(metallicity, 'stellar_metallicity')()
self.param_classes = ['sfh_class', 'dust_abs_class', 'met_class',
'dust_em_class']
self.nfreeparams = nfreeparams
self.t_birth = t_birth
self.starSSP = None
self.nebSSP = None
self.emlinefluxSSP = None
self.data_fnu = data_fnu
self.data_fnu_e = data_fnu_e
self.data_emline = data_emline
self.data_emline_e = data_emline_e
self.emline_dict = emline_dict
self.use_emline_flux = use_emline_flux
self.linefluxCSPdict = None
self.data_absindx = data_absindx
self.data_absindx_e = data_absindx_e
self.absindx_dict = absindx_dict
self.use_absorption_indx = use_absorption_indx
self.absindxCSPdict = None
self.fluxwv = fluxwv
self.fluxfn = fluxfn
self.medianspec = medianspec
self.spectrum = None
self.medianstarspec = medianstarspec
self.starspectrum = None
self.mediannebspec = mediannebspec
self.nebspectrum = None
self.redshift = redshift
if self.redshift is not None:
self.set_new_redshift(self.redshift)
self.Dl = Dl
self.filter_flag = filter_flag
self.input_params = input_params
self.true_fnu = true_fnu
self.true_spectrum = true_spectrum
self.true_starspectrum = true_starspectrum
self.true_nebspectrum = true_nebspectrum
self.sigma_m = sigma_m
self.nwalkers = nwalkers
self.nsteps = nsteps
self.progress_bar = progress_bar
self.force_emcee_finish = force_emcee_finish
self.burnin_fraction = burnin_fraction
self.chi2 = chi2
self.tauISM_lam = tauISM_lam
self.tauIGM_lam = tauIGM_lam
self.full_chains = None
self.burn_in = None
# Set up logging
self.setup_logging()
def set_new_redshift(self, redshift):
''' Setting redshift
Parameters
----------
redshift : float
Redshift of the source for fitting
'''
self.redshift = redshift
# Need luminosity distance to adjust spectrum to distance of the source
self.Dl = cosmology.Cosmology().luminosity_distance(self.redshift)
self.sfh_class.set_agelim(self.redshift)
def setup_logging(self):
'''Setup Logging for MCSED
Builds
-------
self.log : class
self.log.info() is for general print and self.log.error() is
for raise cases
'''
self.log = logging.getLogger('mcsed')
if not len(self.log.handlers):
# Set format for logger
fmt = '[%(levelname)s - %(asctime)s] %(message)s'
fmt = logging.Formatter(fmt)
# Set level of logging
level = logging.INFO
# Set handler for logging
handler = logging.StreamHandler()
handler.setFormatter(fmt)
handler.setLevel(level)
# Build log with name, mcsed
self.log = logging.getLogger('mcsed')
self.log.setLevel(logging.DEBUG)
self.log.addHandler(handler)
def remove_waverange_filters(self, wave1, wave2, restframe=True):
'''Remove filters in a given wavelength range
Parameters
----------
wave1 : float
start wavelength of masked range (in Angstroms)
wave2 : float
end wavelength of masked range (in Angstroms)
restframe : bool
if True, wave1 and wave2 correspond to rest-frame wavelengths
'''
wave1, wave2 = np.sort([wave1, wave2])
if restframe:
wave_factor = 1. + self.redshift
else:
wave_factor = 1.
loc1 = np.searchsorted(self.wave, wave1 * wave_factor)
loc2 = np.searchsorted(self.wave, wave2 * wave_factor)
# account for the case where indices are the same
if (loc1 == loc2):
loc2+=1
maxima = np.max(self.filter_matrix, axis=0)
try:
newflag = np.max(self.filter_matrix[loc1:loc2, :], axis=0) < maxima * 0.1
except ValueError:
return
maximas = np.max(self.filter_matrix[:, self.filter_flag], axis=0)
newflags = np.max(self.filter_matrix[loc1:loc2, self.filter_flag], axis=0) < maximas * 0.1
self.filter_flag = self.filter_flag * newflag
if self.true_fnu is not None:
self.true_fnu = self.true_fnu[newflags]
self.data_fnu = self.data_fnu[newflags]
self.data_fnu_e = self.data_fnu_e[newflags]
def get_filter_wavelengths(self):
'''Get central wavelengths of photometric filters
'''
wave_avg = np.dot(self.wave, self.filter_matrix[:, self.filter_flag])
return wave_avg
def get_filter_fluxdensities(self, spectrum=None):
'''Convert a spectrum to photometric fluxes for a given filter set.
The photometric fluxes will be in the same units as the spectrum.
The spectrum is in microjanskies(lambda) such that
the photometric fluxes will be in microjanskies.
Parameters
----------
spectrum : None or 1d array
if not None, measure the absorption indices using the input spectrum
(must have same shape as self.wave)
Returns
-------
f_nu : numpy array (1 dim)
Photometric flux densities for an input spectrum
'''
if type(spectrum)==type(None):
spectrum = self.spectrum.copy()
f_nu = np.dot(spectrum, self.filter_matrix[:, self.filter_flag])
return f_nu
def measure_absorption_index(self, spectrum=None):
'''
measure absorption indices using current spectrum
Parameters
----------
spectrum : None or 1d array
if not None, measure the absorption indices using the input spectrum
(must have same shape as self.wave)
Returns
-------
update self.absindxCSPdict, the dictionary of absorption line indices
'''
self.absindxCSPdict = {}
if self.use_absorption_indx:
# convert the spectrum from units of specific frequency to specific wavelength
wave = self.wave.copy()
factor = clight.to('Angstrom/s').value / wave**2.
if type(spectrum)==type(None):
spec = self.spectrum * factor
else:
spec = spectrum * factor
for indx in self.absindx_dict.keys():
wht, wave_indx, wave_blue, wave_red, unit = self.absindx_dict[indx]
wave_indx = np.array(wave_indx) * (1. + self.redshift)
wave_blue = np.array(wave_blue) * (1. + self.redshift)
wave_red = np.array(wave_blue) * (1. + self.redshift)
# select appropriate data ranges for blue/red continuum and index
sel_index = np.array([False]*len(wave))
sel_index[np.argmin(abs(wave-wave_indx[0])):np.argmin(abs(wave-wave_indx[1]))] = True
if abs(np.argmin(abs(wave-wave_indx[0]))-np.argmin(abs(wave-wave_indx[1])))<2:
sel_index[np.argmin(abs(wave-wave_indx[0])):np.argmin(abs(wave-wave_indx[0]))+2] = True
sel_blue = np.array([False]*len(wave))
sel_blue[np.argmin(abs(wave-wave_blue[0])):np.argmin(abs(wave-wave_blue[1]))] = True
if abs(np.argmin(abs(wave-wave_blue[0]))-np.argmin(abs(wave-wave_blue[1])))<2:
sel_blue[np.argmin(abs(wave-wave_blue[0])):np.argmin(abs(wave-wave_blue[0]))+2] = True
sel_red = np.array([False]*len(wave))
sel_red[np.argmin(abs(wave-wave_red[0])):np.argmin(abs(wave-wave_red[1]))] = True
if abs(np.argmin(abs(wave-wave_red[0]))-np.argmin(abs(wave-wave_red[1])))<2:
sel_red[np.argmin(abs(wave-wave_red[0])):np.argmin(abs(wave-wave_red[0]))+2] = True
# estimate continuum in the index:
fw_blue = np.dot(spec[sel_blue][0:-1], np.diff(wave[sel_blue]))
fw_blue /= np.diff(wave[sel_blue][[0,-1]])
fw_red = np.dot(spec[sel_red][0:-1], np.diff(wave[sel_red]))
fw_red /= np.diff(wave[sel_red][[0,-1]])
cont_waves = [np.median(wave_blue), np.median(wave_red)]
cont_fw = [fw_blue, fw_red]
coeff = np.polyfit( cont_waves, cont_fw, 1)
cont_index = coeff[0] * wave[sel_index] + coeff[1]
# flux ratio of index and continuum
spec_index = spec[sel_index] / cont_index
if unit==0: # return measurement in equivalent width (Angstroms)
value = np.dot( 1. - spec_index[0:-1], np.diff(wave[sel_index]) )
if unit==1: # return measurement in magnitudes
integral = np.dot( spec_index[0:-1], np.diff(wave[sel_index]) )
value = -2.5 * np.log10( integral / np.diff(wave[sel_index][[0,-1]]) )
if unit==2: # return measurement as a flux density ratio (red / blue)
value = fw_red / fw_blue
self.absindxCSPdict[indx] = float(value)
def set_class_parameters(self, theta):
''' For a given set of model parameters, set the needed class variables
related to SFH, dust attenuation, ect.
Input
-----
theta : list
list of input parameters for sfh, dust attenuation,
stellar metallicity, and dust emission
'''
start_value = 0
######################################################################
# STAR FORMATION HISTORY
self.sfh_class.set_parameters_from_list(theta, start_value)
start_value += self.sfh_class.get_nparams()
######################################################################
# DUST ATTENUATION
self.dust_abs_class.set_parameters_from_list(theta, start_value)
start_value += self.dust_abs_class.get_nparams()
######################################################################
# STELLAR METALLICITY
self.met_class.set_parameters_from_list(theta, start_value)
start_value += self.met_class.get_nparams()
######################################################################
# DUST EMISSION
self.dust_em_class.set_parameters_from_list(theta, start_value)
start_value += self.dust_em_class.get_nparams()
def get_ssp_spectrum(self):
'''
Calculate SSP for an arbitrary metallicity (self.met_class.met) given a
model grid for a range of metallicities (self.ssp_met)
if left as a free parameter, stellar metallicity (self.met_class.met)
spans a range of log(Z / Z_solar)
the SSP grid of metallicities (self.ssp_met) assumes values of Z
(as opposed to log solar values)
Returns
-------
starSSP : numpy array (2 dim)
Grid of stellar SSP spectra at current guess of stellar metallicity
(set from ssp_starspectra)
nebSSP : numpy array (2 dim)
Grid of nebular SSP spectra at current guess of stellar metallicity
(set from ssp_nebspectra)
emlinefluxSSP : 2-d array
Single stellar population line fluxes for each age in self.ages
'''
if self.met_class.fix_met:
if self.starSSP is not None:
return self.starSSP, self.nebSSP, self.emlinefluxSSP
Z = np.log10(self.ssp_met)
Zsolar = 0.019
z = self.met_class.met + np.log10(Zsolar)
X = Z - z
wei = np.exp(-(X)**2 / (2. * 0.15**2))
wei /= wei.sum()
self.starSSP = np.dot(self.ssp_starspectra, wei)
if type(self.ssp_nebspectra)==type(None):
self.nebSSP = None
else:
self.nebSSP = np.dot(self.ssp_nebspectra, wei)
if self.use_emline_flux:
self.emlinefluxSSP = np.dot(self.ssp_emlineflux, wei)
else:
self.emlinefluxSSP = self.ssp_emlineflux[:,:,0]
return self.starSSP, self.nebSSP, self.emlinefluxSSP
def build_csp(self, sfr=None):
'''Build a composite stellar population model for a given star
formation history, dust attenuation law, and dust emission law.
In addition to the returns it also modifies a lineflux dictionary
Returns
-------
csp : numpy array (1 dim)
Composite stellar population model (micro-Jy) at self.redshift
(both stellar and nebular components)
starcsp : numpy array (1 dim)
Composite stellar population model (micro-Jy) at self.redshift
(stellar component)
nebcsp : numpy array (1 dim)
Composite stellar population model (micro-Jy) at self.redshift
(nebular component)
mass : float
Mass for csp given the SFH input
mdust_eb : float
Dust mass if dust emission is being fit AND assume energy balance
'''
# Collapse for metallicity
starSSP, nebSSP, emlinefluxSSP = self.get_ssp_spectrum()
# Need star formation rate from observation back to formation
if sfr is None:
sfr = self.sfh_class.evaluate(self.ssp_ages)
ageval = 10**self.sfh_class.age # Gyr
# Treat the birth cloud and diffuse component separately
age_birth = self.t_birth
# Get dust-free CSPs, properly accounting for ages
# ageval sets limit on ssp_ages that are useable in model calculation
# age_birth separates birth cloud and diffuse components
sel = (self.ssp_ages > age_birth) & (self.ssp_ages <= ageval)
sel_birth = (self.ssp_ages <= age_birth) & (self.ssp_ages <= ageval)
sel_age = self.ssp_ages <= ageval
# The weight is the linear time between ages of each SSP
weight = np.diff(np.hstack([0, self.ssp_ages])) * 1e9 * sfr
weight_orig = weight.copy()
weight_birth = weight.copy()
weight_age = weight.copy()
weight[~sel] = 0
weight_birth[~sel_birth] = 0
weight_age[~sel_age] = 0
# Cover the two cases where ssp_ages contains ageval and when not
# A: index of last acceptable SSP age
A = np.nonzero(self.ssp_ages <= ageval)[0][-1]
# indices of SSP ages that are too old
select_too_old = np.nonzero(self.ssp_ages >= ageval)[0]
if len(select_too_old):
# B: index of first SSP that is too old
B = select_too_old[0]
# only adjust weight if ageval falls between two SSP age gridpoints
if A != B:
lw = ageval - self.ssp_ages[A]
wei = lw * 1e9 * np.interp(ageval, self.ssp_ages, sfr)
if ageval > age_birth:
weight[B] = wei
if ageval <= age_birth:
weight_birth[B] = wei
weight_age[B] = wei
# Cover two cases where ssp_ages contains age_birth and when not
# A: index of last acceptable SSP age
A = np.nonzero(self.ssp_ages <= age_birth)[0][-1]
# indices of SSP ages that are too old
select_too_old = np.nonzero(self.ssp_ages >= age_birth)[0]
if (len(select_too_old)>0):
# B: index of first SSP that is too old
B = select_too_old[0]
if A != B:
lw = age_birth - self.ssp_ages[A]
wei = lw * 1e9 * np.interp(age_birth, self.ssp_ages, sfr)
if ageval > age_birth:
weight[B] = weight_age[B] - wei
if ageval >= age_birth:
weight_birth[B] = wei
else:
weight_birth[B] = weight_age[B]
# Finally, do the matrix multiplication using the weights
starspec_dustfree = np.dot(self.starSSP, weight)
starspec_birth_dustfree = np.dot(self.starSSP, weight_birth)
if type(self.nebSSP) != type(None):
nebspec_dustfree = np.dot(self.nebSSP, weight)
nebspec_birth_dustfree = np.dot(self.nebSSP, weight_birth)
emlineflux_dustfree = np.dot(self.emlinefluxSSP, weight)
emlineflux_birth_dustfree = np.dot(self.emlinefluxSSP, weight_birth)
mass = np.sum(weight_age)
# Need to correct spectrum for dust attenuation
Alam = self.dust_abs_class.evaluate(self.wave)
starspec_dustobscured = starspec_dustfree * 10**(-0.4 * Alam)
if type(self.nebSSP) != type(None):
nebspec_dustobscured = nebspec_dustfree * 10**(-0.4 * Alam)
# Correct the corresponding birth cloud spectrum separately
Alam_birth = Alam / self.dust_abs_class.EBV_old_young
starspec_birth_dustobscured = starspec_birth_dustfree * 10**(-0.4 * Alam_birth)
if type(self.nebSSP) != type(None):
nebspec_birth_dustobscured = nebspec_birth_dustfree * 10**(-0.4 * Alam_birth)
# Compute attenuation for emission lines
Alam_emline = self.dust_abs_class.evaluate(self.emlinewave,new_wave=True)
Alam_emline_birth = Alam_emline / self.dust_abs_class.EBV_old_young
emlineflux_dustobscured = emlineflux_dustfree * 10**(-0.4*Alam_emline)
emlineflux_birth_dustobscured = emlineflux_birth_dustfree * 10**(-0.4*Alam_emline_birth)
# Combine the young and old components
starspec_dustfree += starspec_birth_dustfree
starspec_dustobscured += starspec_birth_dustobscured
if type(self.nebSSP) != type(None):
nebspec_dustfree += nebspec_birth_dustfree
nebspec_dustobscured += nebspec_birth_dustobscured
emlineflux_dustfree += emlineflux_birth_dustfree
emlineflux_dustobscured += emlineflux_birth_dustobscured
# Combine the stellar and nebular components
if type(self.nebSSP) != type(None):
spec_dustfree = starspec_dustfree + nebspec_dustfree
spec_dustobscured = starspec_dustobscured + nebspec_dustobscured
else:
spec_dustfree = starspec_dustfree.copy()
spec_dustobscured = starspec_dustobscured.copy()
if self.dust_em_class.assume_energy_balance:
# Bolometric luminosity of dust attenuation (for energy balance)
L_bol = (np.dot(self.dnu, spec_dustfree) - np.dot(self.dnu, spec_dustobscured))
dust_em = self.dust_em_class.evaluate(self.wave)
L_dust = np.dot(self.dnu,dust_em)
mdust_eb = L_bol/L_dust
spec_dustobscured += mdust_eb * dust_em
if type(self.nebSSP) != type(None):
nebspec_dustobscured += mdust_eb * dust_em
else:
spec_dustobscured += self.dust_em_class.evaluate(self.wave)
if type(self.nebSSP) != type(None):
nebspec_dustobscured += self.dust_em_class.evaluate(self.wave)
# Redshift the spectrum to the observed frame
csp = np.interp(self.wave, self.wave * (1. + self.redshift),
spec_dustobscured * (1. + self.redshift))
if type(self.nebSSP) != type(None):
starcsp = np.interp(self.wave, self.wave * (1. + self.redshift),
starspec_dustobscured * (1. + self.redshift))
nebcsp = np.interp(self.wave, self.wave * (1. + self.redshift),
nebspec_dustobscured * (1. + self.redshift))
else:
starcsp = np.zeros(csp.shape)
nebcsp = np.zeros(csp.shape)
# Correct for ISM and/or IGM (or neither)
if self.tauIGM_lam is not None:
csp *= np.exp(-self.tauIGM_lam)
starcsp *= np.exp(-self.tauIGM_lam)
nebcsp *= np.exp(-self.tauIGM_lam)
if self.tauISM_lam is not None:
csp *= np.exp(-self.tauISM_lam)
starcsp *= np.exp(-self.tauISM_lam)
nebcsp *= np.exp(-self.tauISM_lam)
# Correct spectra from 10pc to redshift of the source
csp /= self.Dl**2
starcsp /= self.Dl**2
nebcsp /= self.Dl**2
# Update dictionary of modeled emission line fluxes
linefluxCSPdict = {}
if self.use_emline_flux:
for emline in self.emline_dict.keys():
indx = np.argmin(np.abs(self.emlinewave
- self.emline_dict[emline][0]))
# flux is given in ergs / s / cm2 at 10 pc
flux = emlineflux_dustobscured[indx]
# Correct flux from 10pc to redshift of source
linefluxCSPdict[emline] = flux / self.Dl**2
self.linefluxCSPdict = linefluxCSPdict
# Update dictionary of modeled absorption line indices
self.measure_absorption_index(spectrum=csp)
if self.dust_em_class.assume_energy_balance:
return csp, starcsp, nebcsp, mass, mdust_eb
else:
return csp, starcsp, nebcsp, mass
def measure_chi2(self, spectrum):
'''
Measure chi2 from the input spectrum. Used in measuring chi2 from
the median spectrum, emline fluxes, and absorption line indices
Parameters
----------
spectrum : 1d array
same shape as self.wave
Returns
-------
update the chi2 dictionary
'''
# likelihood contribution from the photometry
model_y = self.get_filter_fluxdensities(spectrum=spectrum)
inv_sigma2 = 1.0 / (self.data_fnu_e**2 + (model_y * self.sigma_m)**2)
chi2_term = np.sum((self.data_fnu - model_y)**2 * inv_sigma2)
# calculate the degrees of freedom and store the current chi2 value
if not self.chi2:
dof_wht = list(np.ones(len(self.data_fnu)))
# likelihood contribution from the absorption line indices
if self.use_absorption_indx:
self.measure_absorption_index(spectrum=spectrum)
for indx in self.absindx_dict.keys():
unit = self.absindx_dict[indx][-1]
# if null value, ignore it (null = -99)
if (self.data_absindx['%s_INDX' % indx]+99 > 1e-10):
indx_weight = self.absindx_dict[indx][0]
if indx_weight < 1e-10:
continue
model_indx = self.absindxCSPdict[indx]
if unit == 1: # magnitudes
model_err = 2.5*np.log10(1.+self.sigma_m)
else:
model_err = model_indx * self.sigma_m
obs_indx = float(self.data_absindx['%s_INDX' % indx])
obs_indx_e = float(self.data_absindx_e['%s_Err' % indx])
sigma2 = obs_indx_e**2. + model_err**2.
chi2_term += ( (model_indx - obs_indx)**2 /
sigma2) * indx_weight
if not self.chi2:
dof_wht.append(indx_weight)
if self.use_emline_flux:
# if all lines have null line strengths, ignore
if not min(self.data_emline) == max(self.data_emline) == -99:
for emline in self.emline_dict.keys():
if self.data_emline['%s_FLUX' % emline] > -99: # null value
emline_wave, emline_weight = self.emline_dict[emline]
if emline_weight < 1e-10:
continue
model_lineflux = self.linefluxCSPdict[emline]
model_err = model_lineflux * self.sigma_m
lineflux = float(self.data_emline['%s_FLUX' % emline])
elineflux = float(self.data_emline_e['%s_ERR' % emline])
sigma2 = elineflux**2. + model_err**2.
chi2_term += ( (model_lineflux - lineflux)**2 /
sigma2) * emline_weight
if not self.chi2:
dof_wht.append(emline_weight)
# record current chi2 and degrees of freedom
if not self.chi2:
self.chi2 = {}
dof_wht = np.array(dof_wht)
npt = ( np.sum(dof_wht)**2. - np.sum(dof_wht**2.) ) / np.sum(dof_wht) + 1
self.chi2['dof'] = npt - self.nfreeparams
self.chi2['chi2'] = chi2_term
self.chi2['rchi2'] = self.chi2['chi2'] / (self.chi2['dof'] - 1.)
def lnprior(self):
''' Simple, uniform prior for input variables
Returns
-------
0.0 if all parameters are in bounds, -np.inf if any are out of bounds
'''
flag = True
for par_cl in self.param_classes:
flag *= getattr(self, par_cl).prior()
if not flag:
return -np.inf
else:
return 0.0
def lnlike(self):
''' Calculate the log likelihood and return the value and stellar mass
of the model as well as other derived parameters
Returns
-------
log likelihood, mass, sfr10, sfr100, fpdr, mdust_eb : (all float)
The log likelihood includes a chi2_term and a parameters term.
The mass comes from building of the composite stellar population
The parameters sfr10, sfr100, fpdr, mdust_eb are derived in get_derived_params(self)
'''
if self.dust_em_class.assume_energy_balance:
self.spectrum, self.starspectrum, self.nebspectrum, mass, mdust_eb = self.build_csp()
else:
self.spectrum, self.starspectrum, self.nebspectrum, mass = self.build_csp()
mdust_eb = None
sfr10,sfr100,fpdr = self.get_derived_params()
# likelihood contribution from the photometry
model_y = self.get_filter_fluxdensities()
inv_sigma2 = 1.0 / (self.data_fnu_e**2 + (model_y * self.sigma_m)**2)
chi2_term = -0.5 * np.sum((self.data_fnu - model_y)**2 * inv_sigma2)
parm_term = -0.5 * np.sum(np.log(1 / inv_sigma2))
# calculate the degrees of freedom and store the current chi2 value
if not self.chi2:
dof_wht = list(np.ones(len(self.data_fnu)))
# likelihood contribution from the absorption line indices
if self.use_absorption_indx:
for indx in self.absindx_dict.keys():
unit = self.absindx_dict[indx][-1]
# if null value, ignore it (null = -99)
if (self.data_absindx['%s_INDX' % indx]+99 > 1e-10):
indx_weight = self.absindx_dict[indx][0]
if indx_weight < 1e-10:
continue
model_indx = self.absindxCSPdict[indx]
if unit == 1: # magnitudes
model_err = 2.5*np.log10(1.+self.sigma_m)
else:
model_err = model_indx * self.sigma_m
obs_indx = float(self.data_absindx['%s_INDX' % indx])
obs_indx_e = float(self.data_absindx_e['%s_Err' % indx])
sigma2 = obs_indx_e**2. + model_err**2.
chi2_term += (-0.5 * (model_indx - obs_indx)**2 /
sigma2) * indx_weight
parm_term += -0.5 * np.log(indx_weight * sigma2)
if not self.chi2:
dof_wht.append(indx_weight)
# likelihood contribution from the emission lines
if self.use_emline_flux:
# if all lines have null line strengths, ignore
if not min(self.data_emline) == max(self.data_emline) == -99:
for emline in self.emline_dict.keys():
if self.data_emline['%s_FLUX' % emline] > -99: # null value
emline_wave, emline_weight = self.emline_dict[emline]
if emline_weight < 1e-10:
continue
model_lineflux = self.linefluxCSPdict[emline]
model_err = model_lineflux * self.sigma_m
lineflux = float(self.data_emline['%s_FLUX' % emline])
elineflux = float(self.data_emline_e['%s_ERR' % emline])
sigma2 = elineflux**2. + model_err**2.
chi2_term += (-0.5 * (model_lineflux - lineflux)**2 /
sigma2) * emline_weight
parm_term += -0.5 * np.log(emline_weight * sigma2)
if not self.chi2:
dof_wht.append(emline_weight)
# record current chi2 and degrees of freedom
if not self.chi2:
self.chi2 = {}
dof_wht = np.array(dof_wht)
npt = ( np.sum(dof_wht)**2. - np.sum(dof_wht**2.) ) / np.sum(dof_wht) + 1
self.chi2['dof'] = npt - self.nfreeparams
self.chi2['chi2'] = -2. * chi2_term
self.chi2['rchi2'] = self.chi2['chi2'] / (self.chi2['dof'] - 1.)
return (chi2_term + parm_term, mass,sfr10,sfr100,fpdr,mdust_eb)
def lnprob(self, theta):
''' Calculate the log probabilty and return the value and stellar mass
(as well as derived parameters) of the model
Returns
-------
log prior + log likelihood, [mass,sfr10,sfr100,fpdr,mdust_eb]: (all floats)
The log probability is just the sum of the logs of the prior and
likelihood. The mass comes from the building of the composite
stellar population. The other derived parameters are calculated in get_derived_params()
'''
self.set_class_parameters(theta)
lp = self.lnprior()
if np.isfinite(lp):
lnl,mass,sfr10,sfr100,fpdr,mdust_eb = self.lnlike()
if not self.dust_em_class.fixed:
if self.dust_em_class.assume_energy_balance:
return lp + lnl, np.array([mass,sfr10,sfr100,fpdr,mdust_eb])
else:
return lp + lnl, np.array([mass, sfr10, sfr100, fpdr])
else:
return lp + lnl, np.array([mass, sfr10, sfr100])
else:
if not self.dust_em_class.fixed:
if self.dust_em_class.assume_energy_balance:
return -np.inf, np.array([-np.inf, -np.inf, -np.inf, -np.inf, -np.inf])
else:
return -np.inf, np.array([-np.inf, -np.inf, -np.inf, -np.inf])
else:
return -np.inf, np.array([-np.inf, -np.inf, -np.inf])
def get_init_walker_values(self, kind='ball', num=None):
''' Before running emcee, this function generates starting points
for each walker in the MCMC process.
Returns
-------
pos : np.array (2 dim)
Two dimensional array with Nwalker x Ndim values
'''
# We need an initial guess for emcee so we take it from the model class
# parameter values and deltas
init_params, init_deltas, init_lims = [], [], []
for par_cl in self.param_classes:
init_params.append(getattr(self, par_cl).get_params())
init_deltas.append(getattr(self, par_cl).get_param_deltas())
if len(getattr(self, par_cl).get_param_lims()):
init_lims.append(getattr(self, par_cl).get_param_lims())
theta = list(np.hstack(init_params))
thetae = list(np.hstack(init_deltas))
theta_lims = np.vstack(init_lims)
if num is None:
num = self.nwalkers
if kind == 'ball':
pos = emcee.utils.sample_ball(theta, thetae, size=num)
else:
ran = (theta_lims[:, 1]-theta_lims[:, 0])[np.newaxis, :]
pos = (np.random.rand(num, len(theta_lims))*
ran*0.8 + theta_lims[np.newaxis, :, 0]+0.1*ran)
return pos
def get_param_names(self):
''' Grab the names of the parameters for plotting
Returns
-------
names : list
list of all parameter names
'''
names = []
for par_cl in self.param_classes:
names.append(getattr(self, par_cl).get_names())
names = list(np.hstack(names))
return names
def get_params(self):
''' Grab the the parameters in each class
Returns
-------
vals : list
list of all parameter values
'''
vals = []
for par_cl in self.param_classes:
vals.append(getattr(self, par_cl).get_params())
vals = list(np.hstack(vals))
self.nfreeparams = len(vals)
return vals
def get_param_lims(self):
''' Grab the limits of the parameters for making mock galaxies
Returns
-------
limits : numpy array (2 dim)
an array with parameters for rows and limits for columns
'''
limits = []
for par_cl in self.param_classes:
limits.append(getattr(self, par_cl).get_param_lims())
limits = np.array(sum(limits, []))
return limits
def fit_model(self):
''' Using emcee to find parameter estimations for given set of
data measurements and errors
'''
# Need to verify data parameters have been set since this is not
# a necessity on initiation
self.log.info('Fitting model using emcee')
check_vars = ['data_fnu', 'data_fnu_e', 'redshift', 'filter_flag']
for var in check_vars:
if getattr(self, var) is None:
self.error('The variable %s must be set first' % var)
pos = self.get_init_walker_values(kind='notball')
ndim = pos.shape[1]
start = time.time()
moves = emcee.moves.StretchMove()
sampler = emcee.EnsembleSampler(self.nwalkers, ndim, self.lnprob,
a=2.0)
# Do real run
sampler.run_mcmc(pos, self.nsteps, rstate0=np.random.get_state(),
progress=self.progress_bar)
end = time.time()
elapsed = end - start
self.log.info("Total time taken: %0.2f s" % elapsed)
self.log.info("Time taken per step per walker: %0.2f ms" %
(elapsed / (self.nsteps) * 1000. /
self.nwalkers))
# Calculate how long the run should last
if not self.force_emcee_finish:
tau = np.max(sampler.acor)
burnin_step = int(tau*3)
else:
try:
tau = np.max(sampler.acor)
burnin_step = int(tau*3)
except (emcee.autocorr.AutocorrError, ValueError):
# ValueError to catch issue when tau is NaN
# potentially could change np.max to np.nanmax to ignore nan acor
tau = -99
burnin_step = int(self.nsteps * self.burnin_fraction)
self.log.info("Mean acceptance fraction: %0.2f" %
(np.mean(sampler.acceptance_fraction)))
self.log.info("AutoCorrelation Steps: %i, Number of Burn-in Steps: %i"
% (np.round(tau), burnin_step))
if self.dust_em_class.fixed:
numderpar = 3
else:
if self.dust_em_class.assume_energy_balance:
numderpar = 5
else:
numderpar = 4
new_chain = np.zeros((self.nwalkers, self.nsteps, ndim+numderpar+1))
new_chain[:, :, :-(numderpar+1)] = sampler.chain
self.chain = sampler.chain
for i in np.arange(len(sampler.blobs)):
for j in np.arange(len(sampler.blobs[0])):
for k in np.arange(len(sampler.blobs[0][0])):
x = sampler.blobs[i][j][k]
# stellar mass and dust mass
if k==0 or k==4:
new_chain[j, i, -(numderpar+1)+k] = np.where((np.isfinite(x)) * (x > 10.),
np.log10(x), -99.)
# other derived parameters
else:
new_chain[j, i, -(numderpar+1)+k] = np.where((np.isfinite(x)),np.log10(x), -99.)
new_chain[:, :, -1] = sampler.lnprobability
self.samples = new_chain[:, burnin_step:, :].reshape((-1, ndim+numderpar+1))
# full posterior
self.full_chains = new_chain
self.burn_in = burnin_step
def get_derived_params(self):
''' These are not free parameters in the model, but are instead
calculated from free parameters
'''
# Lookback times for past 10 and 100 Myr (avoid t=0 for log purposes)
t_sfr100 = np.linspace(1.0e-9,0.1,num=251)
t_sfr10 = np.linspace(1.0e-9,0.01,num=251)
# Time-averaged SFR over the past 10 and 100 Myr
sfrarray = self.sfh_class.evaluate(t_sfr100)
sfr100 = simps(sfrarray,x=t_sfr100)/(t_sfr100[-1]-t_sfr100[0])
sfrarray = self.sfh_class.evaluate(t_sfr10)
sfr10 = simps(sfrarray,x=t_sfr10)/(t_sfr10[-1]-t_sfr10[0])
if self.dust_em_class.fixed:
fpdr = None
else:
if self.dust_em_class.assume_energy_balance:
umin,gamma,qpah = self.dust_em_class.get_params()
else:
umin,gamma,qpah,mdust = self.dust_em_class.get_params()
umax = 1.0e6
fpdr = gamma*np.log(umax/100.) / ((1.-gamma)*(1.-umin/umax) + gamma*np.log(umax/umin))
return sfr10,sfr100,fpdr
def set_median_fit(self,rndsamples=200,lnprobcut=7.5):
'''
set attributes
median spectrum and filter flux densities for rndsamples random samples
Input
-----
rndsamples : int
number of random samples over which to compute medians
lnprobcut : float
Some of the emcee chains include outliers. This value serves as
a cut in log probability space with respect to the maximum
probability. For reference, a Gaussian 1-sigma is 2.5 in log prob
space.
Returns
-------
self.fluxwv : list (1d)
wavelengths of filters
self.fluxfn : list (1d)
median flux densities of filters
self.medianspec : list (1d)
median spectrum (stellar and nebular)
self.medianstarspec : list (1d)
median stellar spectrum
self.mediannebspec : list (1d)
median nebular spectrum
self.absindxCSPdict : dict
update with median absorption line index measurements
self.linefluxCSPdict : dict
update with median emission line flux measurements
self.chi2 : dict
update dictionary (chi2, reduced chi2, and degrees of freedom)
as measured from medianspec and median line fluxes / indexes
'''
chi2sel = (self.samples[:, -1] >
(np.max(self.samples[:, -1], axis=0) - lnprobcut))
nsamples = self.samples[chi2sel, :]
wv = self.get_filter_wavelengths()
sp, starsp, nebsp, fn = ([], [], [], [])
temline, tabsindx, tchi2 = ( Table(), Table(), Table() )
for i in np.arange(rndsamples):
ind = np.random.randint(0, nsamples.shape[0])
self.set_class_parameters(nsamples[ind, :])
if self.dust_em_class.assume_energy_balance:
self.spectrum, self.starspectrum, self.nebspectrum, mass, mdust_eb = self.build_csp()
else:
self.spectrum, self.starspectrum, self.nebspectrum, mass = self.build_csp()
fnu = self.get_filter_fluxdensities()
sp.append(self.spectrum * 1.)
starsp.append(self.starspectrum * 1.)
nebsp.append(self.nebspectrum * 1.)
fn.append(fnu * 1.)
if self.use_emline_flux:
tloc = Table()
if not len(temline):
cols = list(self.linefluxCSPdict.keys())
else:
cols = temline.colnames
for emline in cols:
tloc[emline] = [self.linefluxCSPdict[emline]]
temline = vstack([temline, tloc])
if self.use_absorption_indx:
tloc = Table()
if not len(tabsindx):
cols = list(self.absindxCSPdict.keys())
else:
cols = tabsindx.colnames
for indx in cols:
tloc[indx] = [self.absindxCSPdict[indx]]
tabsindx = vstack([tabsindx, tloc])
self.medianspec = np.median(np.array(sp), axis=0)
self.medianstarspec = np.median(np.array(starsp), axis=0)
self.mediannebspec = np.median(np.array(nebsp), axis=0)
self.fluxwv = wv
self.fluxfn = np.median(np.array(fn), axis=0)
if self.use_emline_flux:
for emline in temline.colnames:
self.linefluxCSPdict[emline] = np.median(temline[emline])
if self.use_absorption_indx:
for indx in tabsindx.colnames:
self.absindxCSPdict[indx] = np.median(tabsindx[indx])
self.measure_chi2(spectrum=self.medianspec)
def spectrum_plot(self, ax, color=[0.996, 0.702, 0.031], alpha=0.1):
''' Make spectum plot for current model '''
if self.dust_em_class.assume_energy_balance:
self.spectrum, self.starspectrum, self.nebspectrum, mass, mdust_eb = self.build_csp()
else:
self.spectrum, self.starspectrum, self.nebspectrum, mass = self.build_csp()
self.true_spectrum = self.spectrum.copy()
self.true_starspectrum = self.starspectrum.copy()
self.true_nebspectrum = self.nebspectrum.copy()
ax.plot(self.wave, self.spectrum, color=color, alpha=alpha)
def add_sfr_plot(self, ax1):
ax1.set_xscale('log')
ax1.set_yscale('log')
ax1.set_ylabel(r'SFR [$M_{\odot} yr^{-1}$]')
ax1.set_xlabel('Lookback Time')
ax1.set_xticks([1e-3, 1e-2, 1e-1, 1])
ax1.set_xticklabels(['1 Myr', '10 Myr', '100 Myr', '1 Gyr'])
ax1.set_yticks([1e-2, 1e-1, 1, 1e1, 1e2, 1e3])
ax1.set_yticklabels(['0.01', '0.1', '1', '10', '100', '1000'])
ax1.set_xlim([10**-3, max(10**self.sfh_class.age, 1.)])
ax1.set_ylim([10**-2.3, 1e3])
ax1.minorticks_on()
def add_dust_plot(self, ax2):
ax2.set_xscale('log')
xtick_pos = [2000, 4000, 8000]
xtick_lbl = ['2000', '4000', '8000']
ax2.set_xticks(xtick_pos)
ax2.set_xticklabels(xtick_lbl)
ax2.set_xlim([1000, 10000])
ax2.set_ylim([0.01, 4])
ax2.set_ylabel(r'$A_\lambda$ [mag]')
ax2.set_xlabel(r'Wavelength [$\AA$]')
def add_spec_plot(self, ax3):
ax3.set_xscale('log')
if self.dust_em_class.fixed:
xtick_pos = [1000, 3000, 5000, 10000, 20000, 40000]
xtick_lbl = ['0.1', '0.3', '0.5', '1', '2', '4']
xlims = (1. + self.redshift) * np.array([1150, 20000])
xlims[0] = min( xlims[0], min(self.fluxwv) - 200)
xlims[1] = max( xlims[1], max(self.fluxwv) + 5000)
else:
xtick_pos = [3000, 5000, 10000, 40000, 100000, 400000, 1000000]
xtick_lbl = ['0.3', '0.5', '1', '4', '10', '40', '100']
xlims = (1. + self.redshift) * np.array([1150, 700000])
xlims[0] = min( xlims[0], min(self.fluxwv) - 200)
xlims[1] = max( xlims[1], max(self.fluxwv) + 50000)
ax3.set_yscale('log')
ax3.set_xticks(xtick_pos)
ax3.set_xticklabels(xtick_lbl)
ax3.set_xlim(xlims)
ax3.set_xlabel(r'Wavelength [$\mu$m]')
ax3.set_ylabel(r'$F_{\nu}$ [$\mu$Jy]')
def add_subplots(self, ax1, ax2, ax3, nsamples, rndsamples=200):
''' Add Subplots to Triangle plot below '''
sp, fn = ([], [])
for i in np.arange(rndsamples):
ind = np.random.randint(0, nsamples.shape[0])
self.set_class_parameters(nsamples[ind, :])
self.sfh_class.plot(ax1, alpha=0.1)
self.dust_abs_class.plot(ax2, self.wave, alpha=0.1)
self.spectrum_plot(ax3, alpha=0.1)
ax3.plot(self.wave, self.medianspec, color='dimgray')
ax3.scatter(self.fluxwv, self.fluxfn, marker='x', s=200,
color='dimgray', zorder=8)
# plot "truths" if in test mode
if self.input_params is not None:
self.set_class_parameters(self.input_params)
self.sfh_class.plot(ax1, color='k', alpha=1.0)
self.dust_abs_class.plot(ax2, self.wave, color='k', alpha=1.0)
self.spectrum_plot(ax3, color='k', alpha=0.5)
if self.true_fnu is not None:
p = ax3.scatter(self.fluxwv, self.true_fnu, marker='o', s=150,
color=[0.216, 0.471, 0.749], zorder=9)
p.set_facecolor('none')
ax3.errorbar(self.fluxwv, self.data_fnu, yerr=self.data_fnu_e, fmt='s',
fillstyle='none', markersize=15,
color=[0.510, 0.373, 0.529], zorder=10)
ax3.scatter(self.fluxwv, self.data_fnu, marker='s', s=150,facecolors='none',
edgecolors=[0.510, 0.373, 0.529], linewidths=2, zorder=10)
sel = np.where((self.fluxwv > ax3.get_xlim()[0]) * (self.fluxwv < ax3.get_xlim()[1]))[0]
ax3min = np.percentile(self.data_fnu[sel][self.data_fnu[sel]>0.0], 1)
ax3max = np.percentile(self.data_fnu[sel][self.data_fnu[sel]>0.0], 99)
ax3ran = ax3max - ax3min
if not self.dust_em_class.fixed:
ax3max = max(max(self.data_fnu),max(self.medianspec))
ax3.set_ylim([ax3min*0.5, ax3max + 0.4 * ax3ran])
ax3.set_xlim(right=max(max(self.fluxwv),max(self.wave)))
else:
ax3.set_ylim([ax3min - 0.4 * ax3ran, ax3max + 0.6 * ax3ran])
ax3.text((1.+self.redshift)*1400, ax3max,
r'${\chi}_{\nu}^2 = $%0.2f' % self.chi2['rchi2'])
def triangle_plot(self, outname, lnprobcut=7.5, imgtype='png'):
''' Make a triangle corner plot for samples from fit
Input
-----
outname : string
The triangle plot will be saved as "triangle_{outname}.png"
lnprobcut : float
Some of the emcee chains include outliers. This value serves as
a cut in log probability space with respect to the maximum
probability. For reference, a Gaussian 1-sigma is 2.5 in log prob
space.
imgtype : string
The file extension of the output plot
'''
# Make selection for three sigma sample
chi2sel = (self.samples[:, -1] >
(np.max(self.samples[:, -1], axis=0) - lnprobcut))
nsamples = self.samples[chi2sel, :]
o = 0
names = self.get_param_names()[o:]
names.append('Log Mass')
if self.dust_em_class.assume_energy_balance:
names.append("Log Dust Mass")
if self.input_params is not None:
truths = self.input_params[o:]
else:
truths = None
percentilerange = [p for i, p in enumerate(self.get_param_lims())
if i >= o] + [[7, 11]]
percentilerange = [.97] * len(names)
if self.dust_em_class.fixed:
numderpar = 3
else:
if self.dust_em_class.assume_energy_balance:
numderpar = 5
else:
numderpar = 4
if self.dust_em_class.assume_energy_balance:
indarr = np.concatenate((np.arange(o,len(nsamples[0])-numderpar),np.array([-2])))
else:
indarr = np.arange(o,len(nsamples[0])-numderpar)
fsgrad = 11+int(round(0.75*len(indarr)))
fig = corner.corner(nsamples[:, indarr], labels=names,
range=percentilerange, color='rebeccapurple',
truths=truths, truth_color='gainsboro',
label_kwargs={"fontsize": fsgrad}, show_titles=True,
title_kwargs={"fontsize": fsgrad-2},
quantiles=[0.16, 0.5, 0.84], bins=20,
**{'plot_density':False,
'plot_datapoints':False,
'fill_contours': True,
'plot_contours': True,
'no_fill_contours': False})
w = fig.get_figwidth()
fig.set_figwidth(w-(len(indarr)-13)*0.025*w)
# Adding subplots
w = fig.get_figwidth()
fig.set_figwidth(w-(len(indarr)-13)*0.025*w)
ax1 = fig.add_subplot(3, 1, 1)
ax1.set_position([0.7-0.02*(len(indarr)-5), 0.60+0.001*(len(indarr)-5),
0.28+0.02*(len(indarr)-5), 0.15+0.001*(len(indarr)-5)])
ax2 = fig.add_subplot(3, 1, 2)
ax2.set_position([0.7+0.008*(15-len(indarr)), 0.39,
0.28-0.008*(15-len(indarr)), 0.15])
ax3 = fig.add_subplot(3, 1, 3)
ax3.set_position([0.38-0.008*(len(indarr)-4), 0.82-0.001*(len(indarr)-4),
0.60+0.008*(len(indarr)-4), 0.15+0.001*(len(indarr)-4)])
self.add_sfr_plot(ax1)
self.add_dust_plot(ax2)
self.add_spec_plot(ax3)
self.add_subplots(ax1, ax2, ax3, nsamples)
for ax_loc in fig.axes:
ax_loc.minorticks_on()
ax_loc.set_axisbelow('False')
fig.savefig("%s.%s" % (outname, imgtype), dpi=150)
plt.close(fig)
def sample_plot(self, outname, imgtype='png'):
''' Make a sample plot
Input
-----
outname : string
The sample plot will be saved as "sample_{outname}.png"
imgtype : string
The file extension of the output plot
'''
# Make selection for three sigma sample
names = self.get_param_names()
if self.input_params is not None:
truths = self.input_params
else:
truths = None
fig, ax = plt.subplots(self.chain.shape[2], 1, sharex=True,
figsize=(5, 2*self.chain.shape[2]))
for i, a in enumerate(ax):
for chain in self.chain[:, :, i]:
a.plot(chain, 'k-', alpha=0.3)
a.set_ylabel(names[i])
if truths is not None:
a.plot([0, self.chain.shape[1]], [truths[i], truths[i]], 'r--')
if i == len(ax)-1:
a.set_xlabel("Step")
for ax_loc in fig.axes:
ax_loc.minorticks_on()
plt.tight_layout()
fig.savefig("%s.%s" % (outname, imgtype))
plt.close(fig)
def add_fitinfo_to_table(self, percentiles, start_value=3, lnprobcut=7.5,
numsamples=1000):
''' Assumes that "Ln Prob" is the last column in self.samples'''
chi2sel = (self.samples[:, -1] >
(np.max(self.samples[:, -1], axis=0) - lnprobcut))
nsamples = self.samples[chi2sel, :-1]
n = len(percentiles)
for i, per in enumerate(percentiles):
for j, v in enumerate(np.percentile(nsamples, per, axis=0)):
self.table[-1][(i + start_value + j*n)] = v
return (i + start_value + j*n)
def add_truth_to_table(self, truth, start_value):
for i, tr in enumerate(truth):
self.table[-1][start_value + i + 1] = tr
|
wpb-astroREPO_NAMEMCSEDPATH_START.@MCSED_extracted@MCSED-master@mcsed.py@.PATH_END.py
|
{
"filename": "Display.py",
"repo_name": "3fon3fonov/exostriker",
"repo_path": "exostriker_extracted/exostriker-main/exostriker/lib/pyqtgraph/flowchart/library/Display.py",
"type": "Python"
}
|
import numpy as np
from ... import ComboBox, PlotDataItem
from ...graphicsItems.ScatterPlotItem import ScatterPlotItem
from ...Qt import QtCore, QtGui, QtWidgets
from ..Node import Node
from .common import CtrlNode
class PlotWidgetNode(Node):
"""Connection to PlotWidget. Will plot arrays, metaarrays, and display event lists."""
nodeName = 'PlotWidget'
sigPlotChanged = QtCore.Signal(object)
def __init__(self, name):
Node.__init__(self, name, terminals={'In': {'io': 'in', 'multi': True}})
self.plot = None # currently selected plot
self.plots = {} # list of available plots user may select from
self.ui = None
self.items = {}
def disconnected(self, localTerm, remoteTerm):
if localTerm is self['In'] and remoteTerm in self.items:
self.plot.removeItem(self.items[remoteTerm])
del self.items[remoteTerm]
def setPlot(self, plot):
#print "======set plot"
if plot == self.plot:
return
# clear data from previous plot
if self.plot is not None:
for vid in list(self.items.keys()):
self.plot.removeItem(self.items[vid])
del self.items[vid]
self.plot = plot
self.updateUi()
self.update()
self.sigPlotChanged.emit(self)
def getPlot(self):
return self.plot
def process(self, In, display=True):
if display and self.plot is not None:
items = set()
# Add all new input items to selected plot
for name, vals in In.items():
if vals is None:
continue
if type(vals) is not list:
vals = [vals]
for val in vals:
vid = id(val)
if vid in self.items and self.items[vid].scene() is self.plot.scene():
# Item is already added to the correct scene
# possible bug: what if two plots occupy the same scene? (should
# rarely be a problem because items are removed from a plot before
# switching).
items.add(vid)
else:
# Add the item to the plot, or generate a new item if needed.
if isinstance(val, QtWidgets.QGraphicsItem):
self.plot.addItem(val)
item = val
else:
item = self.plot.plot(val)
self.items[vid] = item
items.add(vid)
# Any left-over items that did not appear in the input must be removed
for vid in list(self.items.keys()):
if vid not in items:
self.plot.removeItem(self.items[vid])
del self.items[vid]
def processBypassed(self, args):
if self.plot is None:
return
for item in list(self.items.values()):
self.plot.removeItem(item)
self.items = {}
def ctrlWidget(self):
if self.ui is None:
self.ui = ComboBox()
self.ui.currentIndexChanged.connect(self.plotSelected)
self.updateUi()
return self.ui
def plotSelected(self, index):
self.setPlot(self.ui.value())
def setPlotList(self, plots):
"""
Specify the set of plots (PlotWidget or PlotItem) that the user may
select from.
*plots* must be a dictionary of {name: plot} pairs.
"""
self.plots = plots
self.updateUi()
def updateUi(self):
# sets list and automatically preserves previous selection
self.ui.setItems(self.plots)
try:
self.ui.setValue(self.plot)
except ValueError:
pass
class CanvasNode(Node):
"""Connection to a Canvas widget."""
nodeName = 'CanvasWidget'
def __init__(self, name):
Node.__init__(self, name, terminals={'In': {'io': 'in', 'multi': True}})
self.canvas = None
self.items = {}
def disconnected(self, localTerm, remoteTerm):
if localTerm is self.In and remoteTerm in self.items:
self.canvas.removeItem(self.items[remoteTerm])
del self.items[remoteTerm]
def setCanvas(self, canvas):
self.canvas = canvas
def getCanvas(self):
return self.canvas
def process(self, In, display=True):
if display:
items = set()
for name, vals in In.items():
if vals is None:
continue
if type(vals) is not list:
vals = [vals]
for val in vals:
vid = id(val)
if vid in self.items:
items.add(vid)
else:
self.canvas.addItem(val)
item = val
self.items[vid] = item
items.add(vid)
for vid in list(self.items.keys()):
if vid not in items:
#print "remove", self.items[vid]
self.canvas.removeItem(self.items[vid])
del self.items[vid]
class PlotCurve(CtrlNode):
"""Generates a plot curve from x/y data"""
nodeName = 'PlotCurve'
uiTemplate = [
('color', 'color'),
]
def __init__(self, name):
CtrlNode.__init__(self, name, terminals={
'x': {'io': 'in'},
'y': {'io': 'in'},
'plot': {'io': 'out'}
})
self.item = PlotDataItem()
def process(self, x, y, display=True):
#print "scatterplot process"
if not display:
return {'plot': None}
self.item.setData(x, y, pen=self.ctrls['color'].color())
return {'plot': self.item}
class ScatterPlot(CtrlNode):
"""Generates a scatter plot from a record array or nested dicts"""
nodeName = 'ScatterPlot'
uiTemplate = [
('x', 'combo', {'values': [], 'index': 0}),
('y', 'combo', {'values': [], 'index': 0}),
('sizeEnabled', 'check', {'value': False}),
('size', 'combo', {'values': [], 'index': 0}),
('absoluteSize', 'check', {'value': False}),
('colorEnabled', 'check', {'value': False}),
('color', 'colormap', {}),
('borderEnabled', 'check', {'value': False}),
('border', 'colormap', {}),
]
def __init__(self, name):
CtrlNode.__init__(self, name, terminals={
'input': {'io': 'in'},
'plot': {'io': 'out'}
})
self.item = ScatterPlotItem()
self.keys = []
#self.ui = QtWidgets.QWidget()
#self.layout = QtWidgets.QGridLayout()
#self.ui.setLayout(self.layout)
#self.xCombo = QtWidgets.QComboBox()
#self.yCombo = QtWidgets.QComboBox()
def process(self, input, display=True):
#print "scatterplot process"
if not display:
return {'plot': None}
self.updateKeys(input[0])
x = str(self.ctrls['x'].currentText())
y = str(self.ctrls['y'].currentText())
size = str(self.ctrls['size'].currentText())
pen = QtGui.QPen(QtGui.QColor(0,0,0,0))
points = []
for i in input:
pt = {'pos': (i[x], i[y])}
if self.ctrls['sizeEnabled'].isChecked():
pt['size'] = i[size]
if self.ctrls['borderEnabled'].isChecked():
pt['pen'] = QtGui.QPen(self.ctrls['border'].getColor(i))
else:
pt['pen'] = pen
if self.ctrls['colorEnabled'].isChecked():
pt['brush'] = QtGui.QBrush(self.ctrls['color'].getColor(i))
points.append(pt)
self.item.setPxMode(not self.ctrls['absoluteSize'].isChecked())
self.item.setPoints(points)
return {'plot': self.item}
def updateKeys(self, data):
if isinstance(data, dict):
keys = list(data.keys())
elif isinstance(data, list) or isinstance(data, tuple):
keys = data
elif isinstance(data, np.ndarray) or isinstance(data, np.void):
keys = data.dtype.names
else:
print("Unknown data type:", type(data), data)
return
for c in self.ctrls.values():
c.blockSignals(True)
for c in [self.ctrls['x'], self.ctrls['y'], self.ctrls['size']]:
cur = str(c.currentText())
c.clear()
for k in keys:
c.addItem(k)
if k == cur:
c.setCurrentIndex(c.count()-1)
for c in [self.ctrls['color'], self.ctrls['border']]:
c.setArgList(keys)
for c in self.ctrls.values():
c.blockSignals(False)
self.keys = keys
def saveState(self):
state = CtrlNode.saveState(self)
return {'keys': self.keys, 'ctrls': state}
def restoreState(self, state):
self.updateKeys(state['keys'])
CtrlNode.restoreState(self, state['ctrls'])
#class ImageItem(Node):
#"""Creates an ImageItem for display in a canvas from a file handle."""
#nodeName = 'Image'
#def __init__(self, name):
#Node.__init__(self, name, terminals={
#'file': {'io': 'in'},
#'image': {'io': 'out'}
#})
#self.imageItem = graphicsItems.ImageItem()
#self.handle = None
#def process(self, file, display=True):
#if not display:
#return {'image': None}
#if file != self.handle:
#self.handle = file
#data = file.read()
#self.imageItem.updateImage(data)
#pos = file.
|
3fon3fonovREPO_NAMEexostrikerPATH_START.@exostriker_extracted@exostriker-main@exostriker@lib@pyqtgraph@flowchart@library@Display.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "CASTOR-telescope/ETC",
"repo_path": "ETC_extracted/ETC-master/castor_etc/data/pickles_spectra/__init__.py",
"type": "Python"
}
|
"""
Pickles spectra files.
---
GNU General Public License v3 (GNU GPLv3)
(c) 2022. (c) 2022.
Government of Canada Gouvernement du Canada
National Research Council Conseil national de recherches
Ottawa, Canada, K1A 0R6 Ottawa, Canada, K1A 0R6
All rights reserved Tous droits réservés
NRC disclaims any warranties, Le CNRC dénie toute garantie
expressed, implied, or énoncée, implicite ou légale,
statutory, of any kind with de quelque nature que ce
respect to the software, soit, concernant le logiciel,
including without limitation y compris sans restriction
any warranty of merchantability toute garantie de valeur
or fitness for a particular marchande ou de pertinence
purpose. NRC shall not be pour un usage particulier.
liable in any event for any Le CNRC ne pourra en aucun cas
damages, whether direct or être tenu responsable de tout
indirect, special or general, dommage, direct ou indirect,
consequential or incidental, particulier ou général,
arising from the use of the accessoire ou fortuit, résultant
software. Neither the name de l'utilisation du logiciel. Ni
of the National Research le nom du Conseil National de
Council of Canada nor the Recherches du Canada ni les noms
names of its contributors may de ses participants ne peuvent
be used to endorse or promote être utilisés pour approuver ou
products derived from this promouvoir les produits dérivés
software without specific prior de ce logiciel sans autorisation
written permission. préalable et particulière
par écrit.
This file is part of the Ce fichier fait partie du projet
FORECASTOR ETC project. FORECASTOR ETC.
FORECASTOR ETC is free software: FORECASTOR ETC est un logiciel
you can redistribute it and/or libre ; vous pouvez le redistribuer
modify it under the terms of ou le modifier suivant les termes de
the GNU General Public la "GNU General Public
License as published by the License" telle que publiée
Free Software Foundation, par la Free Software Foundation :
either version 3 of the soit la version 3 de cette
License, or (at your option) licence, soit (à votre gré)
any later version. toute version ultérieure.
FORECASTOR ETC is distributed FORECASTOR ETC est distribué
in the hope that it will be dans l'espoir qu'il vous
useful, but WITHOUT ANY WARRANTY; sera utile, mais SANS AUCUNE
without even the implied warranty GARANTIE : sans même la garantie
of MERCHANTABILITY or FITNESS FOR implicite de COMMERCIALISABILITÉ
A PARTICULAR PURPOSE. See the ni d'ADÉQUATION À UN OBJECTIF
GNU General Public License for PARTICULIER. Consultez la Licence
more details. Générale Publique GNU pour plus
de détails.
You should have received Vous devriez avoir reçu une
a copy of the GNU General copie de la Licence Générale
Public License along with Publique GNU avec FORECASTOR ETC ;
FORECASTOR ETC. If not, see si ce n'est pas le cas, consultez :
<http://www.gnu.org/licenses/>. <http://www.gnu.org/licenses/>.
"""
|
CASTOR-telescopeREPO_NAMEETCPATH_START.@ETC_extracted@ETC-master@castor_etc@data@pickles_spectra@__init__.py@.PATH_END.py
|
{
"filename": "mpfit.py",
"repo_name": "USNavalResearchLaboratory/eispac",
"repo_path": "eispac_extracted/eispac-main/eispac/extern/mpfit.py",
"type": "Python"
}
|
"""
Perform Levenberg-Marquardt least-squares minimization, based on MINPACK-1.
AUTHORS
-------
The original version of this software, called LMFIT, was written in FORTRAN
as part of the MINPACK-1 package by XXX.
Craig Markwardt converted the FORTRAN code to IDL.
The information for the IDL version is:
Craig B. Markwardt, NASA/GSFC Code 662, Greenbelt, MD 20770
craigm@lheamail.gsfc.nasa.gov
UPDATED VERSIONs can be found on my WEB PAGE:
http://cow.physics.wisc.edu/~craigm/idl/idl.html
Mark Rivers created this Python version from Craig's IDL version.
Mark Rivers, University of Chicago
Building 434A, Argonne National Laboratory
9700 South Cass Avenue, Argonne, IL 60439
rivers@cars.uchicago.edu
Updated versions can be found at:
http://cars.uchicago.edu/software
Sergey Koposov converted the Mark's Python version from Numeric to numpy
Sergey Koposov, University of Cambridge, Institute of Astronomy,
Madingley road, CB3 0HA, Cambridge, UK
koposov@ast.cam.ac.uk
Updated versions can be found at:
https://github.com/segasai/astrolibpy
DESCRIPTION
-----------
MPFIT uses the Levenberg-Marquardt technique to solve the least-squares
problem. In its typical use, MPFIT will be used to fit a user-supplied
function (the "model") to user-supplied data points (the "data") by
adjusting a set of parameters. MPFIT is based upon MINPACK-1 (LMDIF.F)
by More' and collaborators.
For example, a researcher may think that a set of observed data points
is best modelled with a Gaussian curve. A Gaussian curve is parameterized
by its mean, standard deviation and normalization. MPFIT will, within
certain constraints, find the set of parameters which best fits the data.
The fit is "best" in the least-squares sense; that is, the sum of the
weighted squared differences between the model and data is minimized.
The Levenberg-Marquardt technique is a particular strategy for iteratively
searching for the best fit. This particular implementation is drawn from
MINPACK-1 (see NETLIB), and is much faster and more accurate than the
version provided in the Scientific Python package in
Scientific.Functions.LeastSquares. This version allows upper and lower
bounding constraints to be placed on each parameter, or the parameter can
be held fixed.
The user-supplied Python function should return an array of weighted
deviations between model and data. In a typical scientific problem the
residuals should be weighted so that each deviate has a gaussian sigma
of 1.0. If X represents values of the independent variable, Y represents
a measurement for each value of X, and ERR represents the error in the
measurements, then the deviates could be calculated as follows:
DEVIATES = (Y - F(X)) / ERR
where F is the analytical function representing the model. You are
recommended to use the convenience functions MPFITFUN and MPFITEXPR,
which are driver functions that calculate the deviates for you.
If ERR are the 1-sigma uncertainties in Y, then
TOTAL( DEVIATES^2 )
will be the total chi-squared value. MPFIT will minimize the chi-square
value. The values of X, Y and ERR are passed through MPFIT to the
user-supplied function via the FUNCTKW keyword.
Simple constraints can be placed on parameter values by using the
PARINFO keyword to MPFIT. See below for a description of this keyword.
MPFIT does not perform more general optimization tasks. See TNMIN
instead. MPFIT is customized, based on MINPACK-1, to the least-squares
minimization problem.
USER FUNCTION
-------------
The user must define a function which returns the appropriate values
as specified above. The function should return the weighted deviations
between the model and the data. It should also return a status flag
and an optional partial derivative array. For applications which use
finite-difference derivatives -- the default -- the user function should
be declared in the following way:
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If fjac==None then partial derivatives should not be
# computed. It will always be None if MPFIT is called with
# default flag.
model = F(x, p)
# Non-negative status value means MPFIT should continue,
# negative means stop the calculation.
status = 0
return([status, (y-model)/err]
See below for applications with analytical derivatives.
The keyword parameters X, Y, and ERR in the example above are suggestive
but not required. Any parameters can be passed to MYFUNCT by using the
functkw keyword to MPFIT. Use MPFITFUN and MPFITEXPR if you need ideas
on how to do that. The function *must* accept a parameter list, P.
In general there are no restrictions on the number of dimensions in X, Y,
or ERR. However the deviates *must* be returned in a one-dimensional
Numeric array of type Float.
User functions may also indicate a fatal error condition using the status
return described above. If status is set to a number between -15 and -1
then MPFIT will stop the calculation and return to the caller.
ANALYTIC DERIVATIVES
--------------------
In the search for the best-fit solution, MPFIT by default calculates
derivatives numerically via a finite difference approximation. The
user-supplied function need not calculate the derivatives explicitly.
However, if you desire to compute them analytically, then the
AUTODERIVATIVE=0 keyword must be passed to MPFIT. As a practical matter,
it is often sufficient and even faster to allow MPFIT to calculate the
derivatives numerically, and so AUTODERIVATIVE=0 is not necessary.
If AUTODERIVATIVE=0 is used then the user function must check the parameter
FJAC, and if FJAC!=None then return the partial derivative array in the
return list.
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If FJAC!=None then partial derivatives must be comptuer.
# FJAC contains an array of len(p), where each entry
# is 1 if that parameter is free and 0 if it is fixed.
model = F(x, p)
# Non-negative status value means MPFIT should continue,
# negative means stop the calculation.
status = 0
if (dojac):
pderiv = zeros([len(x), len(p)], Float)
for j in range(len(p)):
pderiv[:,j] = FGRAD(x, p, j)
else:
pderiv = None
return([status, (y-model)/err, pderiv]
where FGRAD(x, p, i) is a user function which must compute the derivative
of the model with respect to parameter P[i] at X. When finite differencing
is used for computing derivatives (i.e., when AUTODERIVATIVE=1), or when
MPFIT needs only the errors but not the derivatives the parameter
FJAC=None.
Derivatives should be returned in the PDERIV array. PDERIV should be an
MxN array, where M is the number of data points and N is the number of
parameters. dp[i,j] is the derivative at the ith point with respect to
the jth parameter.
The derivatives with respect to fixed parameters are ignored; zero is an
appropriate value to insert for those derivatives. Upon input to the user
function, FJAC is set to a vector with the same length as P, with a value
of 1 for a parameter which is free, and a value of zero for a parameter
which is fixed (and hence no derivative needs to be calculated).
If the data is higher than one dimensional, then the *last* dimension
should be the parameter dimension.
Example: fitting a 50x50 image, "dp" should be 50x50xNPAR.
CONSTRAINING PARAMETER VALUES WITH THE PARINFO KEYWORD
------------------------------------------------------
The behavior of MPFIT can be modified with respect to each parameter to
be fitted. A parameter value can be fixed; simple boundary constraints
can be imposed; limitations on the parameter changes can be imposed;
properties of the automatic derivative can be modified; and parameters
can be tied to one another.
These properties are governed by the PARINFO structure, which is passed
as a keyword parameter to MPFIT.
PARINFO should be a list of dictionaries, one list entry for each
parameter. Each parameter is associated with one element of the array,
in numerical order. The dictionary can have the following keys (none
are required, keys are case insensitive):
'value' - the starting parameter value (but see the START_PARAMS
parameter for more information).
'fixed' - a boolean value, whether the parameter is to be held
fixed or not. Fixed parameters are not varied by MPFIT, but are
passed on to MYFUNCT for evaluation.
'limited' - a two-element boolean array. If the first/second element
is set, then the parameter is bounded on the lower/upper side.
A parameter can be bounded on both sides. Both LIMITED and LIMITS
must be given together.
'limits' - a two-element float array. Gives the parameter limits
on the lower and upper sides, respectively. Zero, one or two of
these values can be set, depending on the values of LIMITED.
Both LIMITED and LIMITS must be given together.
'parname' - a string, giving the name of the parameter. The fitting
code of MPFIT does not use this tag in any way. However, the
default iterfunct will print the parameter name if available.
'step' - the step size to be used in calculating the numerical
derivatives. If set to zero, then the step size is computed
automatically. Ignored when AUTODERIVATIVE=0.
'mpside' - the sidedness of the finite difference when computing
numerical derivatives. This field can take four values:
0 - one-sided derivative computed automatically
1 - one-sided derivative (f(x+h) - f(x))/h
-1 - one-sided derivative (f(x) - f(x-h))/h
2 - two-sided derivative (f(x+h) - f(x-h))/(2*h)
Where H is the STEP parameter described above. The "automatic"
one-sided derivative method will chose a direction for the
finite difference which does not violate any constraints. The
other methods do not perform this check. The two-sided method
is in principle more precise, but requires twice as many
function evaluations. Default: 0.
'mpmaxstep' - the maximum change to be made in the parameter value.
During the fitting process, the parameter will never be changed
by more than this value in one iteration. A value of 0 indicates
no maximum. Default: 0.
'tied' - a string expression which "ties" the parameter to other
free or fixed parameters. Any expression involving constants and
the parameter array P are permitted. Example: if parameter 2 is
always to be twice parameter 1 then use the following:
parinfo(2).tied = '2 * p(1)'.
Since they are totally constrained, tied parameters are considered
to be fixed; no errors are computed for them. NOTE: the PARNAME
can't be used in expressions.
'mpprint' - if set to 1, then the default iterfunct will print the
parameter value. If set to 0, the parameter value will not be
printed. This tag can be used to selectively print only a few
parameter values out of many. Default: 1 (all parameters printed)
Future modifications to the PARINFO structure, if any, will involve
adding dictionary tags beginning with the two letters "MP". Therefore
programmers are urged to avoid using tags starting with the same letters;
otherwise they are free to include their own fields within the PARINFO
structure, and they will be ignored.
PARINFO Example:
parinfo = [{'value':0., 'fixed':0, 'limited':[0,0],
'limits':[0.,0.]} for i in range(5)]
parinfo[0]['fixed'] = 1
parinfo[4]['limited'][0] = 1
parinfo[4]['limits'][0] = 50.
values = [5.7, 2.2, 500., 1.5, 2000.]
for i in range(5):
parinfo[i]['value']=values[i]
A total of 5 parameters, with starting values of 5.7, 2.2, 500, 1.5,
and 2000 are given. The first parameter is fixed at a value of 5.7,
and the last parameter is constrained to be above 50.
EXAMPLE
-------
import mpfit
import numpy.oldnumeric as Numeric
x = arange(100, float)
p0 = [5.7, 2.2, 500., 1.5, 2000.]
y = (p[0] + p[1]*[x] + p[2]*[x**2] + p[3]*sqrt(x) +
p[4]*log(x))
fa = {'x':x, 'y':y, 'err':err}
m = mpfit('myfunct', p0, functkw=fa)
print 'status = ', m.status
if (m.status <= 0):
print 'error message = ', m.errmsg
print 'parameters = ', m.params
Minimizes sum of squares of MYFUNCT. MYFUNCT is called with the X, Y,
and ERR keyword parameters that are given by FUNCTKW. The results can
be obtained from the returned object m.
THEORY OF OPERATION
-------------------
There are many specific strategies for function minimization. One very
popular technique is to use function gradient information to realize the
local structure of the function. Near a local minimum the function value
can be taylor expanded about x0 as follows:
f(x) = f(x0) + f'(x0) . (x-x0) + (1/2) (x-x0) . f''(x0) . (x-x0)
----- --------------- ------------------------------- (1)
Order 0th 1st 2nd
Here f'(x) is the gradient vector of f at x, and f''(x) is the Hessian
matrix of second derivatives of f at x. The vector x is the set of
function parameters, not the measured data vector. One can find the
minimum of f, f(xm) using Newton's method, and arrives at the following
linear equation:
f''(x0) . (xm-x0) = - f'(x0) (2)
If an inverse can be found for f''(x0) then one can solve for (xm-x0),
the step vector from the current position x0 to the new projected minimum.
Here the problem has been linearized (ie, the gradient information is known
to first order). f''(x0) is symmetric n x n matrix, and should be positive
definite.
The Levenberg-Marquardt technique is a variation on this theme. It adds an
additional diagonal term to the equation which may aid the convergence
properties:
(f''(x0) + nu I) . (xm-x0) = -f'(x0) (2a)
where I is the identity matrix. When nu is large, the overall matrix is
diagonally dominant, and the iterations follow steepest descent. When nu
is small, the iterations are quadratically convergent.
In principle, if f''(x0) and f'(x0) are known then xm-x0 can be determined.
However the Hessian matrix is often difficult or impossible to compute.
The gradient f'(x0) may be easier to compute, if even by finite difference
techniques. So-called quasi-Newton techniques attempt to successively
estimate f''(x0) by building up gradient information as the iterations
proceed.
In the least squares problem there are further simplifications which assist
in solving eqn (2). The function to be minimized is a sum of squares:
f = Sum(hi^2) (3)
where hi is the ith residual out of m residuals as described above.
This can be substituted back into eqn (2) after computing the derivatives:
f' = 2 Sum(hi hi')
f'' = 2 Sum(hi' hj') + 2 Sum(hi hi'') (4)
If one assumes that the parameters are already close enough to a minimum,
then one typically finds that the second term in f'' is negligible (or,
in any case, is too difficult to compute). Thus, equation (2) can be solved,
at least approximately, using only gradient information.
In matrix notation, the combination of eqns (2) and (4) becomes:
hT' . h' . dx = - hT' . h (5)
Where h is the residual vector (length m), hT is its transpose, h' is the
Jacobian matrix (dimensions n x m), and dx is (xm-x0). The user function
supplies the residual vector h, and in some cases h' when it is not found
by finite differences (see MPFIT_FDJAC2, which finds h and hT'). Even if
dx is not the best absolute step to take, it does provide a good estimate
of the best *direction*, so often a line minimization will occur along the
dx vector direction.
The method of solution employed by MINPACK is to form the Q . R
factorization of h', where Q is an orthogonal matrix such that QT . Q = I,
and R is upper right triangular. Using h' = Q . R and the ortogonality of
Q, eqn (5) becomes
(RT . QT) . (Q . R) . dx = - (RT . QT) . h
RT . R . dx = - RT . QT . h (6)
R . dx = - QT . h
where the last statement follows because R is upper triangular. Here, R,
QT, and h are known so this is a matter of solving for dx. The routine
MPFIT_QRFAC provides the QR factorization of h, with pivoting, and
MPFIT_QRSOLV provides the solution for dx.
REFERENCES
----------
MINPACK-1, Jorge More', available from netlib (www.netlib.org).
"Optimization Software Guide," Jorge More' and Stephen Wright,
SIAM, *Frontiers in Applied Mathematics*, Number 14.
More', Jorge J., "The Levenberg-Marquardt Algorithm: Implementation
and Theory," in *Numerical Analysis*, ed. Watson, G. A., Lecture
Notes in Mathematics 630, Springer-Verlag, 1977.
MODIFICATION HISTORY
--------------------
Translated from MINPACK-1 in FORTRAN, Apr-Jul 1998, CM
Copyright (C) 1997-2002, Craig Markwardt
This software is provided as is without any warranty whatsoever.
Permission to use, copy, modify, and distribute modified or unmodified
copies is granted, provided this copyright and disclaimer are included
unchanged.
Translated from MPFIT (Craig Markwardt's IDL package) to Python,
August, 2002. Mark Rivers
Converted from Numeric to numpy (Sergey Koposov, July 2008)
Added full Python 3 compatibility (Sergey Koposov, Feb 2017)
"""
__all__ = ['mpfit']
import numpy
import types
import scipy.linalg.blas
# Original FORTRAN documentation
# **********
#
# subroutine lmdif
#
# the purpose of lmdif is to minimize the sum of the squares of
# m nonlinear functions in n variables by a modification of
# the levenberg-marquardt algorithm. the user must provide a
# subroutine which calculates the functions. the jacobian is
# then calculated by a forward-difference approximation.
#
# the subroutine statement is
#
# subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn,
# diag,mode,factor,nprint,info,nfev,fjac,
# ldfjac,ipvt,qtf,wa1,wa2,wa3,wa4)
#
# where
#
# fcn is the name of the user-supplied subroutine which
# calculates the functions. fcn must be declared
# in an external statement in the user calling
# program, and should be written as follows.
#
# subroutine fcn(m,n,x,fvec,iflag)
# integer m,n,iflag
# double precision x(n),fvec(m)
# ----------
# calculate the functions at x and
# return this vector in fvec.
# ----------
# return
# end
#
# the value of iflag should not be changed by fcn unless
# the user wants to terminate execution of lmdif.
# in this case set iflag to a negative integer.
#
# m is a positive integer input variable set to the number
# of functions.
#
# n is a positive integer input variable set to the number
# of variables. n must not exceed m.
#
# x is an array of length n. on input x must contain
# an initial estimate of the solution vector. on output x
# contains the final estimate of the solution vector.
#
# fvec is an output array of length m which contains
# the functions evaluated at the output x.
#
# ftol is a nonnegative input variable. termination
# occurs when both the actual and predicted relative
# reductions in the sum of squares are at most ftol.
# therefore, ftol measures the relative error desired
# in the sum of squares.
#
# xtol is a nonnegative input variable. termination
# occurs when the relative error between two consecutive
# iterates is at most xtol. therefore, xtol measures the
# relative error desired in the approximate solution.
#
# gtol is a nonnegative input variable. termination
# occurs when the cosine of the angle between fvec and
# any column of the jacobian is at most gtol in absolute
# value. therefore, gtol measures the orthogonality
# desired between the function vector and the columns
# of the jacobian.
#
# maxfev is a positive integer input variable. termination
# occurs when the number of calls to fcn is at least
# maxfev by the end of an iteration.
#
# epsfcn is an input variable used in determining a suitable
# step length for the forward-difference approximation. this
# approximation assumes that the relative errors in the
# functions are of the order of epsfcn. if epsfcn is less
# than the machine precision, it is assumed that the relative
# errors in the functions are of the order of the machine
# precision.
#
# diag is an array of length n. if mode = 1 (see
# below), diag is internally set. if mode = 2, diag
# must contain positive entries that serve as
# multiplicative scale factors for the variables.
#
# mode is an integer input variable. if mode = 1, the
# variables will be scaled internally. if mode = 2,
# the scaling is specified by the input diag. other
# values of mode are equivalent to mode = 1.
#
# factor is a positive input variable used in determining the
# initial step bound. this bound is set to the product of
# factor and the euclidean norm of diag*x if nonzero, or else
# to factor itself. in most cases factor should lie in the
# interval (.1,100.). 100. is a generally recommended value.
#
# nprint is an integer input variable that enables controlled
# printing of iterates if it is positive. in this case,
# fcn is called with iflag = 0 at the beginning of the first
# iteration and every nprint iterations thereafter and
# immediately prior to return, with x and fvec available
# for printing. if nprint is not positive, no special calls
# of fcn with iflag = 0 are made.
#
# info is an integer output variable. if the user has
# terminated execution, info is set to the (negative)
# value of iflag. see description of fcn. otherwise,
# info is set as follows.
#
# info = 0 improper input parameters.
#
# info = 1 both actual and predicted relative reductions
# in the sum of squares are at most ftol.
#
# info = 2 relative error between two consecutive iterates
# is at most xtol.
#
# info = 3 conditions for info = 1 and info = 2 both hold.
#
# info = 4 the cosine of the angle between fvec and any
# column of the jacobian is at most gtol in
# absolute value.
#
# info = 5 number of calls to fcn has reached or
# exceeded maxfev.
#
# info = 6 ftol is too small. no further reduction in
# the sum of squares is possible.
#
# info = 7 xtol is too small. no further improvement in
# the approximate solution x is possible.
#
# info = 8 gtol is too small. fvec is orthogonal to the
# columns of the jacobian to machine precision.
#
# nfev is an integer output variable set to the number of
# calls to fcn.
#
# fjac is an output m by n array. the upper n by n submatrix
# of fjac contains an upper triangular matrix r with
# diagonal elements of nonincreasing magnitude such that
#
# t t t
# p *(jac *jac)*p = r *r,
#
# where p is a permutation matrix and jac is the final
# calculated jacobian. column j of p is column ipvt(j)
# (see below) of the identity matrix. the lower trapezoidal
# part of fjac contains information generated during
# the computation of r.
#
# ldfjac is a positive integer input variable not less than m
# which specifies the leading dimension of the array fjac.
#
# ipvt is an integer output array of length n. ipvt
# defines a permutation matrix p such that jac*p = q*r,
# where jac is the final calculated jacobian, q is
# orthogonal (not stored), and r is upper triangular
# with diagonal elements of nonincreasing magnitude.
# column j of p is column ipvt(j) of the identity matrix.
#
# qtf is an output array of length n which contains
# the first n elements of the vector (q transpose)*fvec.
#
# wa1, wa2, and wa3 are work arrays of length n.
#
# wa4 is a work array of length m.
#
# subprograms called
#
# user-supplied ...... fcn
#
# minpack-supplied ... dpmpar,enorm,fdjac2,,qrfac
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
class mpfit:
"""
Perform Levenberg-Marquardt least-squares minimization, based on MINPACK-1.
Parameters
----------
fcn : function
The function to be minimized. The function should return the weighted
deviations between the model and the data, as described above.
xall : array-like
An array of starting values for each of the parameters of the model.
The number of parameters should be fewer than the number of measurements.
This parameter is optional if the parinfo keyword is used (but see
parinfo). The parinfo keyword provides a mechanism to fix or constrain
individual parameters.
autoderivative : {1, 0}
If this is set (1), derivatives of the function will be computed
automatically via a finite differencing procedure. If not set (0),
then fcn must provide the (analytical) derivatives. To supply your
own analytical derivatives, explicitly pass autoderivative=0.
Default: set (=1)
ftol : float, optional
A nonnegative input variable. Termination occurs when both the actual
and predicted relative reductions in the sum of squares are at most
ftol (and status is accordingly set to 1 or 3). Therefore, ftol
measures the relative error desired in the sum of squares.
Default: 1E-10
functkw : dict, optional
A dictionary which contains the parameters to be passed to the
user-supplied function specified by fcn via the standard Python
keyword dictionary mechanism. This is the way you can pass additional
data to your user-supplied function without using global variables.
Consider the following example, if functkw =
{'xval':[1.,2.,3.], 'yval':[1.,4.,9.], 'errval':[1.,1.,1.]}
then the user-supplied function should be declared like this:
def myfunct(p, fjac=None, xval=None, yval=None, errval=None):
Default: {}, No extra parameters are passed to the function
gtol : float, optional
A nonnegative input variable. Termination occurs when the cosine of
the angle between fvec and any column of the jacobian is at most gtol
in absolute value (and status is accordingly set to 4). Therefore,
gtol measures the orthogonality desired between the function vector
and the columns of the jacobian.
Default: 1e-10
iterkw : dict, optional
The keyword arguments to be passed to iterfunct via the dictionary
keyword mechanism. This should be a dictionary and is similar in
operation to FUNCTKW.
Default: {}, No arguments are passed.
iterfunct : function
The name of a function to be called upon each NPRINT iteration of the
MPFIT routine. It should be declared in the following way:
def iterfunct(myfunct, p, iter, fnorm, functkw=None, parinfo=None,
quiet=0, dof=None, [iterkw keywords here])
# perform custom iteration update
iterfunct must accept all three keyword parameters (FUNCTKW, PARINFO
and QUIET).
myfunct - The user-supplied function to be minimized,
p - The current set of model parameters
iter - The iteration number
functkw - The arguments to be passed to myfunct.
fnorm - The chi-squared value.
quiet - Set when no textual output should be printed.
dof - The number of degrees of freedom, normally the number of
points less the number of free parameters.
See below for documentation of parinfo.
In implementation, iterfunct can perform updates to the terminal or
graphical user interface, to provide feedback while the fit proceeds.
If the fit is to be stopped for any reason, then iterfunct should return
a status value between -15 and -1. Otherwise it should return None
(e.g. no return statement) or 0. In principle, iterfunct should probably
not modify the parameter values, because it may interfere with the
algorithm's stability. In practice it is allowed. Set iterfunct=None
if there is no user-defined routine and you don't want the internal
default routine be called.
Default: an internal routine is used to print the parameter values.
maxiter : int, optional
The maximum number of iterations to perform. If the number is exceeded,
then the status value is set to 5 and MPFIT returns.
Default: 200 iterations
nocovar : {0, 1}
Set this keyword to prevent the calculation of the covariance matrix
before returning (see COVAR)
Default: clear (=0), The covariance matrix is returned
nprint : int, optional
The frequency with which iterfunct is called. A value of 1 indicates
that iterfunct is called with every iteration, while 2 indicates every
other iteration, etc. Note that several Levenberg-Marquardt attempts
can be made in a single iteration.
Default: 1
parinfo : list of dicts, optional
Provides a mechanism for more sophisticated constraints to be placed on
parameter values. When parinfo is not passed, then it is assumed that
all parameters are free and unconstrained. Values in parinfo are never
modified during a call to MPFIT. PARINFO should be a list of
dictionaries, one list entry for each parameter. The dictionary can
have the following keys (all keys are optional and case insensitive):
'value' : float
the starting parameter value (see also the XALL parameter).
'fixed' : {0, 1}
If set (1), the parameter value will be held fixed. Fixed
parameters are not varied by MPFIT, but are passed on to
MYFUNCT for evaluation.
'limited' : two-element int array.
If the first/second element is set (1), then the parameter is
bounded on the lower/upper side. A parameter can be bounded
on both sides. Both LIMITED and LIMITS must be given together.
'limits' : two-element float array.
Gives the parameter limits on the lower and upper sides,
respectively. A value will only be used as a limit if the
corresponding value of LIMITED is set (=1). Both LIMITED
and LIMITS must be given together.
'parname' : str
Name of the parameter. The fitting code of MPFIT does not
use this tag in any way. However, the default iterfunct will
print the parameter name, if available.
'step' : float
Step size to be used in calculating the numerical derivatives.
If set to zero, then the step size is computed automatically.
Ignored when AUTODERIVATIVE=0.
'mpside' : {0, 1, -1, 2}
The sidedness of the finite difference when computing
numerical derivatives. This field can take four values:
0 - one-sided derivative computed automatically
1 - one-sided derivative (f(x+h) - f(x))/h
-1 - one-sided derivative (f(x) - f(x-h))/h
2 - two-sided derivative (f(x+h) - f(x-h))/(2*h)
Where H is the STEP parameter described above. The "automatic"
one-sided derivative method will chose a direction for the
finite difference which does not violate any constraints. The
other methods do not perform this check. The two-sided method
is in principle more precise, but requires twice as many
function evaluations. Default: 0.
'mpmaxstep' : float
The maximum change to be made in the parameter value.
During the fitting process, the parameter will never be changed
by more than this value in one iteration. A value of 0 indicates
no maximum. Default: 0.
'tied' : str
String expression which "ties" the parameter to other free or
fixed parameters. Any expression involving constants and the
parameter array "P" are permitted. Example: if parameter 2 is
always to be twice parameter 1 then use the following:
parinfo[2].tied = '2 * p[1]'.
Since they are totally constrained, tied parameters are
considered to be fixed; no errors are computed for them.
NOTE: the PARNAME can't be used in expressions.
'mpprint' : {1, 0}
If set to 1, then the default iterfunct will print the
parameter value. If set to 0, the parameter value will not
be printed. This tag can be used to selectively print only
a few parameter values out of many. Default: 1 (all
parameters printed)
Default value: None, All parameters are free and unconstrained.
quiet : {0, 1}
Set this keyword = 1 when no textual output should be printed by MPFIT
damp : float, optional
A scalar number, indicating the cut-off value of residuals where
"damping" will occur. Residuals with magnitudes greater than this
number will be replaced by their hyperbolic tangent. This partially
mitigates the so-called large residual problem inherent in
least-squares solvers (as for the test problem CURVI,
http://www.maxthis.com/curviex.htm). A value of 0 indicates no damping.
Note: DAMP doesn't work with autoderivative=0. Default: 0
xtol : float, optional
A nonnegative input variable. Termination occurs when the relative error
between two consecutive iterates is at most xtol (and status is
accordingly set to 2 or 3). Therefore, xtol measures the relative error
desired in the approximate solution. Default: 1E-10
Returns
-------
mpfit class object
The results are attributes of this class, e.g. mpfit.status,
mpfit.errmsg, mpfit.params, npfit.niter, mpfit.covar.
.status : int
An integer status code is returned. All values greater than zero
can represent success (however .status == 5 may indicate failure to
converge). It can have one of the following values:
-16
A parameter or function value has become infinite or an undefined
number. This is usually a consequence of numerical overflow in the
user's model function, which must be avoided.
-15 to -1
These are error codes that either MYFUNCT or iterfunct may return to
terminate the fitting process. Values from -15 to -1 are reserved
for the user functions and will not clash with MPFIT.
0 Improper input parameters.
1 Both actual and predicted relative reductions in the sum of squares
are at most ftol.
2 Relative error between two consecutive iterates is at most xtol
3 Conditions for status = 1 and status = 2 both hold.
4 The cosine of the angle between fvec and any column of the jacobian
is at most gtol in absolute value.
5 The maximum number of iterations has been reached.
6 ftol is too small. No further reduction in the sum of squares is
possible.
7 xtol is too small. No further improvement in the approximate solution
x is possible.
8 gtol is too small. fvec is orthogonal to the columns of the jacobian
to machine precision.
.fnorm : float
The value of the summed squared residuals for the returned parameter
values.
.covar : array-like
The covariance matrix for the set of parameters returned by MPFIT.
The matrix is NxN where N is the number of parameters. The square root
of the diagonal elements gives the formal 1-sigma statistical errors on
the parameters if errors were treated "properly" in fcn.
Parameter errors are also returned in .perror.
To compute the correlation matrix, pcor, use this example:
cov = mpfit.covar
pcor = cov * 0.
for i in range(n):
for j in range(n):
pcor[i,j] = cov[i,j]/sqrt(cov[i,i]*cov[j,j])
If nocovar is set or MPFIT terminated abnormally, then .covar is set to
a scalar with value None.
.errmsg : str
A string error or warning message is returned.
.nfev : int
The number of calls to MYFUNCT performed.
.niter : int
The number of iterations completed.
.perror : array-like
The formal 1-sigma errors in each parameter, computed from the
covariance matrix. If a parameter is held fixed, or if it touches a
boundary, then the error is reported as zero.
If the fit is unweighted (i.e. no errors were given, or the weights
were uniformly set to unity), then .perror will probably not represent
the true parameter uncertainties.
*If* you can assume that the true reduced chi-squared value is unity --
meaning that the fit is implicitly assumed to be of good quality --
then the estimated parameter uncertainties can be computed by scaling
.perror by the measured chi-squared value.
dof = len(x) - len(mpfit.params) # deg of freedom
# scaled uncertainties
pcerror = mpfit.perror * sqrt(mpfit.fnorm / dof)
"""
blas_enorm32, = scipy.linalg.blas.get_blas_funcs(['nrm2'],numpy.array([0],dtype=numpy.float32))
blas_enorm64, = scipy.linalg.blas.get_blas_funcs(['nrm2'],numpy.array([0],dtype=numpy.float64))
def __init__(self, fcn, xall=None, functkw={}, parinfo=None,
ftol=1.e-10, xtol=1.e-10, gtol=1.e-10,
damp=0., maxiter=200, factor=100., nprint=1,
iterfunct='default', iterkw={}, nocovar=0,
rescale=0, autoderivative=1, quiet=0,
diag=None, epsfcn=None, debug=0):
self.niter = 0
self.params = None
self.covar = None
self.perror = None
self.status = 0 # Invalid input flag set while we check inputs
self.debug = debug
self.errmsg = ''
self.nfev = 0
self.damp = damp
self.dof=0
if fcn==None:
self.errmsg = "Usage: parms = mpfit('myfunt', ... )"
return
if iterfunct == 'default':
iterfunct = self.defiter
# Parameter damping doesn't work when user is providing their own
# gradients.
if (self.damp != 0) and (autoderivative == 0):
self.errmsg = 'ERROR: keywords DAMP and AUTODERIVATIVE are mutually exclusive'
return
# Parameters can either be stored in parinfo, or x. x takes precedence if it exists
if (xall is None) and (parinfo is None):
self.errmsg = 'ERROR: must pass parameters in P or PARINFO'
return
# Be sure that PARINFO is of the right type
if parinfo is not None:
if type(parinfo) is not list:
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
else:
if type(parinfo[0]) is not dict:
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
if ((xall is not None) and (len(xall) != len(parinfo))):
self.errmsg = 'ERROR: number of elements in PARINFO and P must agree'
return
# If the parameters were not specified at the command line, then
# extract them from PARINFO
if xall is None:
xall = self.parinfo(parinfo, 'value')
if xall is None:
self.errmsg = 'ERROR: either P or PARINFO(*)["value"] must be supplied.'
return
# Make sure parameters are numpy arrays
xall = numpy.asarray(xall)
# In the case if the xall is not float or if is float but has less
# than 64 bits we do convert it into double
if xall.dtype.kind != 'f' or xall.dtype.itemsize<=4:
xall = xall.astype(numpy.float)
npar = len(xall)
self.fnorm = -1.
fnorm1 = -1.
# TIED parameters?
ptied = self.parinfo(parinfo, 'tied', default='', n=npar)
self.qanytied = 0
for i in range(npar):
ptied[i] = ptied[i].strip()
if ptied[i] != '':
self.qanytied = 1
self.ptied = ptied
# FIXED parameters ?
pfixed = self.parinfo(parinfo, 'fixed', default=0, n=npar)
pfixed = (pfixed == 1)
for i in range(npar):
pfixed[i] = pfixed[i] or (ptied[i] != '') # Tied parameters are also effectively fixed
# Finite differencing step, absolute and relative, and sidedness of deriv.
step = self.parinfo(parinfo, 'step', default=0., n=npar)
dstep = self.parinfo(parinfo, 'relstep', default=0., n=npar)
dside = self.parinfo(parinfo, 'mpside', default=0, n=npar)
# Maximum and minimum steps allowed to be taken in one iteration
maxstep = self.parinfo(parinfo, 'mpmaxstep', default=0., n=npar)
minstep = self.parinfo(parinfo, 'mpminstep', default=0., n=npar)
qmin = minstep != 0
qmin[:] = False # Remove minstep for now!!
qmax = maxstep != 0
if numpy.any(qmin & qmax & (maxstep<minstep)):
self.errmsg = 'ERROR: MPMINSTEP is greater than MPMAXSTEP'
return
wh = (numpy.nonzero((qmin!=0.) | (qmax!=0.)))[0]
qminmax = len(wh > 0)
# Finish up the free parameters
ifree = (numpy.nonzero(pfixed != 1))[0]
nfree = len(ifree)
if nfree == 0:
self.errmsg = 'ERROR: no free parameters'
return
# Compose only VARYING parameters
self.params = xall.copy() # self.params is the set of parameters to be returned
x = self.params[ifree] # x is the set of free parameters
# LIMITED parameters ?
limited = self.parinfo(parinfo, 'limited', default=[0,0], n=npar)
limits = self.parinfo(parinfo, 'limits', default=[0.,0.], n=npar)
if (limited is not None) and (limits is not None):
# Error checking on limits in parinfo
if numpy.any((limited[:,0] & (xall < limits[:,0])) |
(limited[:,1] & (xall > limits[:,1]))):
self.errmsg = 'ERROR: parameters are not within PARINFO limits'
return
if numpy.any((limited[:,0] & limited[:,1]) &
(limits[:,0] >= limits[:,1]) &
(pfixed == 0)):
self.errmsg = 'ERROR: PARINFO parameter limits are not consistent'
return
# Transfer structure values to local variables
qulim = (limited[:,1])[ifree]
ulim = (limits [:,1])[ifree]
qllim = (limited[:,0])[ifree]
llim = (limits [:,0])[ifree]
if numpy.any((qulim!=0.) | (qllim!=0.)):
qanylim = 1
else:
qanylim = 0
else:
# Fill in local variables with dummy values
qulim = numpy.zeros(nfree)
ulim = x * 0.
qllim = qulim
llim = x * 0.
qanylim = 0
n = len(x)
# Check input parameters for errors
if (n < 0) or (ftol <= 0) or (xtol <= 0) or (gtol <= 0) \
or (maxiter < 0) or (factor <= 0):
self.errmsg = 'ERROR: input keywords are inconsistent'
return
if rescale != 0:
self.errmsg = 'ERROR: DIAG parameter scales are inconsistent'
if len(diag) < n:
return
if numpy.any(diag <= 0):
return
self.errmsg = ''
[self.status, fvec] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'ERROR: first call to "'+str(fcn)+'" failed'
return
# If the returned fvec has more than four bits I assume that we have
# double precision
# It is important that the machar is determined by the precision of
# the returned value, not by the precision of the input array
if numpy.array([fvec]).dtype.itemsize>4:
self.machar = machar(double=1)
self.blas_enorm = mpfit.blas_enorm64
else:
self.machar = machar(double=0)
self.blas_enorm = mpfit.blas_enorm32
machep = self.machar.machep
m = len(fvec)
if m < n:
self.errmsg = 'ERROR: number of parameters must not exceed data'
return
self.dof = m-nfree
self.fnorm = self.enorm(fvec)
# Initialize Levelberg-Marquardt parameter and iteration counter
par = 0.
self.niter = 1
qtf = x * 0.
self.status = 0
# Beginning of the outer loop
while(1):
# If requested, call fcn to enable printing of iterates
self.params[ifree] = x
if self.qanytied:
self.params = self.tie(self.params, ptied)
if (nprint > 0) and (iterfunct is not None):
if ((self.niter-1) % nprint) == 0:
mperr = 0
xnew0 = self.params.copy()
dof = numpy.max([len(fvec) - len(x), 0])
status = iterfunct(fcn, self.params, self.niter, self.fnorm**2,
functkw=functkw, parinfo=parinfo, quiet=quiet,
dof=dof, **iterkw)
if status is not None:
self.status = status
# Check for user termination
if self.status < 0:
self.errmsg = 'WARNING: premature termination by ' + str(iterfunct)
return
# If parameters were changed (grrr..) then re-tie
if numpy.max(numpy.abs(xnew0-self.params)) > 0:
if self.qanytied:
self.params = self.tie(self.params, ptied)
x = self.params[ifree]
# Calculate the jacobian matrix
self.status = 2
catch_msg = 'calling MPFIT_FDJAC2'
fjac = self.fdjac2(fcn, x, fvec, step, qulim, ulim, dside,
epsfcn=epsfcn,
autoderivative=autoderivative, dstep=dstep,
functkw=functkw, ifree=ifree, xall=self.params)
if fjac is None:
self.errmsg = 'WARNING: premature termination by FDJAC2'
return
# Determine if any of the parameters are pegged at the limits
if qanylim:
catch_msg = 'zeroing derivatives of pegged parameters'
whlpeg = (numpy.nonzero(qllim & (x == llim)))[0]
nlpeg = len(whlpeg)
whupeg = (numpy.nonzero(qulim & (x == ulim)))[0]
nupeg = len(whupeg)
# See if any "pegged" values should keep their derivatives
if nlpeg > 0:
# Total derivative of sum wrt lower pegged parameters
for i in range(nlpeg):
sum0 = sum(fvec * fjac[:,whlpeg[i]])
if sum0 > 0:
fjac[:,whlpeg[i]] = 0
if nupeg > 0:
# Total derivative of sum wrt upper pegged parameters
for i in range(nupeg):
sum0 = sum(fvec * fjac[:,whupeg[i]])
if sum0 < 0:
fjac[:,whupeg[i]] = 0
# Compute the QR factorization of the jacobian
[fjac, ipvt, wa1, wa2] = self.qrfac(fjac, pivot=1)
# On the first iteration if "diag" is unspecified, scale
# according to the norms of the columns of the initial jacobian
catch_msg = 'rescaling diagonal elements'
if self.niter == 1:
if (rescale==0) or (len(diag) < n):
diag = wa2.copy()
diag[diag == 0] = 1.
# On the first iteration, calculate the norm of the scaled x
# and initialize the step bound delta
wa3 = diag * x
xnorm = self.enorm(wa3)
delta = factor*xnorm
if delta == 0.:
delta = factor
# Form (q transpose)*fvec and store the first n components in qtf
catch_msg = 'forming (q transpose)*fvec'
wa4 = fvec.copy()
for j in range(n):
lj = ipvt[j]
temp3 = fjac[j,lj]
if temp3 != 0:
fj = fjac[j:,lj]
wj = wa4[j:]
# *** optimization wa4(j:*)
wa4[j:] = wj - fj * sum(fj*wj) / temp3
fjac[j,lj] = wa1[j]
qtf[j] = wa4[j]
# From this point on, only the square matrix, consisting of the
# triangle of R, is needed.
fjac = fjac[0:n, 0:n]
fjac.shape = [n, n]
temp = fjac.copy()
for i in range(n):
temp[:,i] = fjac[:, ipvt[i]]
fjac = temp.copy()
# Check for overflow. This should be a cheap test here since FJAC
# has been reduced to a (small) square matrix, and the test is
# O(N^2).
#wh = where(finite(fjac) EQ 0, ct)
#if ct GT 0 then goto, FAIL_OVERFLOW
# Compute the norm of the scaled gradient
catch_msg = 'computing the scaled gradient'
gnorm = 0.
if self.fnorm != 0:
for j in range(n):
l = ipvt[j]
if wa2[l] != 0:
sum0 = sum(fjac[0:j+1,j]*qtf[0:j+1])/self.fnorm
gnorm = numpy.max([gnorm,numpy.abs(sum0/wa2[l])])
# Test for convergence of the gradient norm
if gnorm <= gtol:
self.status = 4
break
if maxiter == 0:
self.status = 5
break
# Rescale if necessary
if rescale == 0:
diag = numpy.choose(diag>wa2, (wa2, diag))
# Beginning of the inner loop
while(1):
# Determine the levenberg-marquardt parameter
catch_msg = 'calculating LM parameter (MPFIT_)'
[fjac, par, wa1, wa2] = self.lmpar(fjac, ipvt, diag, qtf,
delta, wa1, wa2, par=par)
# Store the direction p and x+p. Calculate the norm of p
wa1 = -wa1
if (qanylim == 0) and (qminmax == 0):
# No parameter limits, so just move to new position WA2
alpha = 1.
wa2 = x + wa1
else:
# Respect the limits. If a step were to go out of bounds, then
# we should take a step in the same direction but shorter distance.
# The step should take us right to the limit in that case.
alpha = 1.
if qanylim:
# Do not allow any steps out of bounds
catch_msg = 'checking for a step out of bounds'
if nlpeg > 0:
wa1[whlpeg] = numpy.clip( wa1[whlpeg], 0., numpy.max(wa1))
if nupeg > 0:
wa1[whupeg] = numpy.clip(wa1[whupeg], numpy.min(wa1), 0.)
dwa1 = numpy.abs(wa1) > machep
whl = (numpy.nonzero(((dwa1!=0.) & qllim) & ((x + wa1) < llim)))[0]
if len(whl) > 0:
t = ((llim[whl] - x[whl]) /
wa1[whl])
alpha = numpy.min([alpha, numpy.min(t)])
whu = (numpy.nonzero(((dwa1!=0.) & qulim) & ((x + wa1) > ulim)))[0]
if len(whu) > 0:
t = ((ulim[whu] - x[whu]) /
wa1[whu])
alpha = numpy.min([alpha, numpy.min(t)])
# Obey any max step values.
if qminmax:
nwa1 = wa1 * alpha
whmax = (numpy.nonzero((qmax[ifree] != 0.) & (maxstep[ifree] > 0)))[0]
if len(whmax) > 0:
mrat = numpy.max(numpy.abs(nwa1[whmax]) /
numpy.abs(maxstep[ifree[whmax]]))
if mrat > 1:
alpha = alpha / mrat
# Scale the resulting vector
wa1 = wa1 * alpha
wa2 = x + wa1
# Adjust the final output values. If the step put us exactly
# on a boundary, make sure it is exact.
sgnu = (ulim >= 0) * 2. - 1.
sgnl = (llim >= 0) * 2. - 1.
# Handles case of
# ... nonzero *LIM ... ...zero * LIM
ulim1 = ulim * (1 - sgnu * machep) - (ulim == 0) * machep
llim1 = llim * (1 + sgnl * machep) + (llim == 0) * machep
wh = (numpy.nonzero((qulim!=0) & (wa2 >= ulim1)))[0]
if len(wh) > 0:
wa2[wh] = ulim[wh]
wh = (numpy.nonzero((qllim!=0.) & (wa2 <= llim1)))[0]
if len(wh) > 0:
wa2[wh] = llim[wh]
# endelse
wa3 = diag * wa1
pnorm = self.enorm(wa3)
# On the first iteration, adjust the initial step bound
if self.niter == 1:
delta = numpy.min([delta,pnorm])
self.params[ifree] = wa2
# Evaluate the function at x+p and calculate its norm
mperr = 0
catch_msg = 'calling '+str(fcn)
[self.status, wa4] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'WARNING: premature termination by "'+fcn+'"'
return
fnorm1 = self.enorm(wa4)
# Compute the scaled actual reduction
catch_msg = 'computing convergence criteria'
actred = -1.
if (0.1 * fnorm1) < self.fnorm:
actred = - (fnorm1/self.fnorm)**2 + 1.
# Compute the scaled predicted reduction and the scaled directional
# derivative
for j in range(n):
wa3[j] = 0
wa3[0:j+1] = wa3[0:j+1] + fjac[0:j+1,j]*wa1[ipvt[j]]
# Remember, alpha is the fraction of the full LM step actually
# taken
temp1 = self.enorm(alpha*wa3)/self.fnorm
temp2 = (numpy.sqrt(alpha*par)*pnorm)/self.fnorm
prered = temp1*temp1 + (temp2*temp2)/0.5
dirder = -(temp1*temp1 + temp2*temp2)
# Compute the ratio of the actual to the predicted reduction.
ratio = 0.
if prered != 0:
ratio = actred/prered
# Update the step bound
if ratio <= 0.25:
if actred >= 0:
temp = .5
else:
temp = .5*dirder/(dirder + .5*actred)
if ((0.1*fnorm1) >= self.fnorm) or (temp < 0.1):
temp = 0.1
delta = temp*numpy.min([delta,pnorm/0.1])
par = par/temp
else:
if (par == 0) or (ratio >= 0.75):
delta = pnorm/.5
par = .5*par
# Test for successful iteration
if ratio >= 0.0001:
# Successful iteration. Update x, fvec, and their norms
x = wa2
wa2 = diag * x
fvec = wa4
xnorm = self.enorm(wa2)
self.fnorm = fnorm1
self.niter = self.niter + 1
# Tests for convergence
if (numpy.abs(actred) <= ftol) and (prered <= ftol) \
and (0.5 * ratio <= 1):
self.status = 1
if delta <= xtol*xnorm:
self.status = 2
if (numpy.abs(actred) <= ftol) and (prered <= ftol) \
and (0.5 * ratio <= 1) and (self.status == 2):
self.status = 3
if self.status != 0:
break
# Tests for termination and stringent tolerances
if self.niter >= maxiter:
self.status = 5
if (numpy.abs(actred) <= machep) and (prered <= machep) \
and (0.5*ratio <= 1):
self.status = 6
if delta <= machep*xnorm:
self.status = 7
if gnorm <= machep:
self.status = 8
if self.status != 0:
break
# End of inner loop. Repeat if iteration unsuccessful
if ratio >= 0.0001:
break
# Check for over/underflow
if ~numpy.all(numpy.isfinite(wa1) & numpy.isfinite(wa2) & \
numpy.isfinite(x)) or ~numpy.isfinite(ratio):
errmsg = ('''ERROR: parameter or function value(s) have become
'infinite; check model function for over- 'and underflow''')
self.status = -16
break
#wh = where(finite(wa1) EQ 0 OR finite(wa2) EQ 0 OR finite(x) EQ 0, ct)
#if ct GT 0 OR finite(ratio) EQ 0 then begin
if self.status != 0:
break;
# End of outer loop.
catch_msg = 'in the termination phase'
# Termination, either normal or user imposed.
if len(self.params) == 0:
return
if nfree == 0:
self.params = xall.copy()
else:
self.params[ifree] = x
if (nprint > 0) and (self.status > 0):
catch_msg = 'calling ' + str(fcn)
[status, fvec] = self.call(fcn, self.params, functkw)
catch_msg = 'in the termination phase'
self.fnorm = self.enorm(fvec)
if (self.fnorm is not None) and (fnorm1 is not None):
self.fnorm = numpy.max([self.fnorm, fnorm1])
self.fnorm = self.fnorm**2.
self.covar = None
self.perror = None
# (very carefully) set the covariance matrix COVAR
if (self.status > 0) and (nocovar==0) and (n is not None) \
and (fjac is not None) and (ipvt is not None):
sz = fjac.shape
if (n > 0) and (sz[0] >= n) and (sz[1] >= n) \
and (len(ipvt) >= n):
catch_msg = 'computing the covariance matrix'
cv = self.calc_covar(fjac[0:n,0:n], ipvt[0:n])
cv.shape = [n, n]
nn = len(xall)
# Fill in actual covariance matrix, accounting for fixed
# parameters.
self.covar = numpy.zeros([nn, nn], dtype=float)
for i in range(n):
self.covar[ifree,ifree[i]] = cv[:,i]
# Compute errors in parameters
catch_msg = 'computing parameter errors'
self.perror = numpy.zeros(nn, dtype=float)
d = numpy.diagonal(self.covar)
wh = (numpy.nonzero(d >= 0))[0]
if len(wh) > 0:
self.perror[wh] = numpy.sqrt(d[wh])
return
def __str__(self):
return {'params': self.params,
'niter': self.niter,
'params': self.params,
'covar': self.covar,
'perror': self.perror,
'status': self.status,
'debug': self.debug,
'errmsg': self.errmsg,
'nfev': self.nfev,
'damp': self.damp
#,'machar':self.machar
}.__str__()
# Default procedure to be called every iteration. It simply prints
# the parameter values.
def defiter(self, fcn, x, iter, fnorm=None, functkw=None,
quiet=0, iterstop=None, parinfo=None,
format=None, pformat='%.10g', dof=1):
if self.debug:
print ('Entering defiter...')
if quiet:
return
if fnorm is None:
[status, fvec] = self.call(fcn, x, functkw)
fnorm = self.enorm(fvec)**2
# Determine which parameters to print
nprint = len(x)
print ("Iter ", ('%6i' % iter)," CHI-SQUARE = ",('%.10g' % fnorm)," DOF = ", ('%i' % dof))
for i in range(nprint):
if (parinfo is not None) and ('parname' in parinfo[i]):
p = ' ' + parinfo[i]['parname'] + ' = '
else:
p = ' P' + str(i) + ' = '
if (parinfo is not None) and ('mpprint' in parinfo[i]):
iprint = parinfo[i]['mpprint']
else:
iprint = 1
if iprint:
print (p + (pformat % x[i]) + ' ')
return 0
# DO_ITERSTOP:
# if keyword_set(iterstop) then begin
# k = get_kbrd(0)
# if k EQ string(byte(7)) then begin
# message, 'WARNING: minimization not complete', /info
# print, 'Do you want to terminate this procedure? (y/n)', $
# format='(A,$)'
# k = ''
# read, k
# if strupcase(strmid(k,0,1)) EQ 'Y' then begin
# message, 'WARNING: Procedure is terminating.', /info
# mperr = -1
# endif
# endif
# endif
# Procedure to parse the parameter values in PARINFO, which is a list of dictionaries
def parinfo(self, parinfo=None, key='a', default=None, n=0):
if self.debug:
print ('Entering parinfo...')
if (n == 0) and (parinfo is not None):
n = len(parinfo)
if n == 0:
values = default
return values
values = []
for i in range(n):
if (parinfo is not None) and (key in parinfo[i]):
values.append(parinfo[i][key])
else:
values.append(default)
# Convert to numeric arrays if possible
test = default
if type(default) is list:
test=default[0]
values = numpy.asarray(values)
return values
# Call user function or procedure, with _EXTRA or not, with
# derivatives or not.
def call(self, fcn, x, functkw, fjac=None):
if self.debug:
print ('Entering call...')
if self.qanytied:
x = self.tie(x, self.ptied)
self.nfev = self.nfev + 1
if fjac is None:
[status, f] = fcn(x, fjac=fjac, **functkw)
if self.damp > 0:
# Apply the damping if requested. This replaces the residuals
# with their hyperbolic tangent. Thus residuals larger than
# DAMP are essentially clipped.
f = numpy.tanh(f/self.damp)
return [status, f]
else:
return fcn(x, fjac=fjac, **functkw)
def enorm(self, vec):
ans = self.blas_enorm(vec)
return ans
def fdjac2(self, fcn, x, fvec, step=None, ulimited=None, ulimit=None, dside=None,
epsfcn=None, autoderivative=1,
functkw=None, xall=None, ifree=None, dstep=None):
if self.debug:
print ('Entering fdjac2...')
machep = self.machar.machep
if epsfcn is None:
epsfcn = machep
if xall is None:
xall = x
if ifree is None:
ifree = numpy.arange(len(xall))
if step is None:
step = x * 0.
nall = len(xall)
eps = numpy.sqrt(numpy.max([epsfcn, machep]))
m = len(fvec)
n = len(x)
# Compute analytical derivative if requested
if autoderivative == 0:
mperr = 0
fjac = numpy.zeros(nall, dtype=float)
fjac[ifree] = 1.0 # Specify which parameters need derivatives
[status, fp] = self.call(fcn, xall, functkw, fjac=fjac)
if len(fjac) != m*nall:
print ('ERROR: Derivative matrix was not computed properly.')
return None
# This definition is consistent with CURVEFIT
# Sign error found (thanks Jesus Fernandez <fernande@irm.chu-caen.fr>)
fjac.shape = [m,nall]
fjac = -fjac
# Select only the free parameters
if len(ifree) < nall:
fjac = fjac[:,ifree]
fjac.shape = [m, n]
return fjac
fjac = numpy.zeros([m, n], dtype=float)
h = eps * numpy.abs(x)
# if STEP is given, use that
# STEP includes the fixed parameters
if step is not None:
stepi = step[ifree]
wh = (numpy.nonzero(stepi > 0))[0]
if len(wh) > 0:
h[wh] = stepi[wh]
# if relative step is given, use that
# DSTEP includes the fixed parameters
if len(dstep) > 0:
dstepi = dstep[ifree]
wh = (numpy.nonzero(dstepi > 0))[0]
if len(wh) > 0:
h[wh] = numpy.abs(dstepi[wh]*x[wh])
# In case any of the step values are zero
h[h == 0] = eps
# Reverse the sign of the step if we are up against the parameter
# limit, or if the user requested it.
# DSIDE includes the fixed parameters (ULIMITED/ULIMIT have only
# varying ones)
mask = dside[ifree] == -1
if len(ulimited) > 0 and len(ulimit) > 0:
mask = (mask | ((ulimited!=0) & (x > ulimit-h)))
wh = (numpy.nonzero(mask))[0]
if len(wh) > 0:
h[wh] = - h[wh]
# Loop through parameters, computing the derivative for each
for j in range(n):
xp = xall.copy()
xp[ifree[j]] = xp[ifree[j]] + h[j]
[status, fp] = self.call(fcn, xp, functkw)
if status < 0:
return None
if numpy.abs(dside[ifree[j]]) <= 1:
# COMPUTE THE ONE-SIDED DERIVATIVE
# Note optimization fjac(0:*,j)
fjac[0:,j] = (fp-fvec)/h[j]
else:
# COMPUTE THE TWO-SIDED DERIVATIVE
xp[ifree[j]] = xall[ifree[j]] - h[j]
mperr = 0
[status, fm] = self.call(fcn, xp, functkw)
if status < 0:
return None
# Note optimization fjac(0:*,j)
fjac[0:,j] = (fp-fm)/(2*h[j])
return fjac
# Original FORTRAN documentation
# **********
#
# subroutine qrfac
#
# this subroutine uses householder transformations with column
# pivoting (optional) to compute a qr factorization of the
# m by n matrix a. that is, qrfac determines an orthogonal
# matrix q, a permutation matrix p, and an upper trapezoidal
# matrix r with diagonal elements of nonincreasing magnitude,
# such that a*p = q*r. the householder transformation for
# column k, k = 1,2,...,min(m,n), is of the form
#
# t
# i - (1/u(k))*u*u
#
# where u has zeros in the first k-1 positions. the form of
# this transformation and the method of pivoting first
# appeared in the corresponding linpack subroutine.
#
# the subroutine statement is
#
# subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa)
#
# where
#
# m is a positive integer input variable set to the number
# of rows of a.
#
# n is a positive integer input variable set to the number
# of columns of a.
#
# a is an m by n array. on input a contains the matrix for
# which the qr factorization is to be computed. on output
# the strict upper trapezoidal part of a contains the strict
# upper trapezoidal part of r, and the lower trapezoidal
# part of a contains a factored form of q (the non-trivial
# elements of the u vectors described above).
#
# lda is a positive integer input variable not less than m
# which specifies the leading dimension of the array a.
#
# pivot is a logical input variable. if pivot is set true,
# then column pivoting is enforced. if pivot is set false,
# then no column pivoting is done.
#
# ipvt is an integer output array of length lipvt. ipvt
# defines the permutation matrix p such that a*p = q*r.
# column j of p is column ipvt(j) of the identity matrix.
# if pivot is false, ipvt is not referenced.
#
# lipvt is a positive integer input variable. if pivot is false,
# then lipvt may be as small as 1. if pivot is true, then
# lipvt must be at least n.
#
# rdiag is an output array of length n which contains the
# diagonal elements of r.
#
# acnorm is an output array of length n which contains the
# norms of the corresponding columns of the input matrix a.
# if this information is not needed, then acnorm can coincide
# with rdiag.
#
# wa is a work array of length n. if pivot is false, then wa
# can coincide with rdiag.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm
#
# fortran-supplied ... dmax1,dsqrt,min0
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
#
# PIVOTING / PERMUTING:
#
# Upon return, A(*,*) is in standard parameter order, A(*,IPVT) is in
# permuted order.
#
# RDIAG is in permuted order.
# ACNORM is in standard parameter order.
#
#
# NOTE: in IDL the factors appear slightly differently than described
# above. The matrix A is still m x n where m >= n.
#
# The "upper" triangular matrix R is actually stored in the strict
# lower left triangle of A under the standard notation of IDL.
#
# The reflectors that generate Q are in the upper trapezoid of A upon
# output.
#
# EXAMPLE: decompose the matrix [[9.,2.,6.],[4.,8.,7.]]
# aa = [[9.,2.,6.],[4.,8.,7.]]
# mpfit_qrfac, aa, aapvt, rdiag, aanorm
# IDL> print, aa
# 1.81818* 0.181818* 0.545455*
# -8.54545+ 1.90160* 0.432573*
# IDL> print, rdiag
# -11.0000+ -7.48166+
#
# The components marked with a * are the components of the
# reflectors, and those marked with a + are components of R.
#
# To reconstruct Q and R we proceed as follows. First R.
# r = fltarr(m, n)
# for i = 0, n-1 do r(0:i,i) = aa(0:i,i) # fill in lower diag
# r(lindgen(n)*(m+1)) = rdiag
#
# Next, Q, which are composed from the reflectors. Each reflector v
# is taken from the upper trapezoid of aa, and converted to a matrix
# via (I - 2 vT . v / (v . vT)).
#
# hh = ident # identity matrix
# for i = 0, n-1 do begin
# v = aa(*,i) & if i GT 0 then v(0:i-1) = 0 # extract reflector
# hh = hh # (ident - 2*(v # v)/total(v * v)) # generate matrix
# endfor
#
# Test the result:
# IDL> print, hh # transpose(r)
# 9.00000 4.00000
# 2.00000 8.00000
# 6.00000 7.00000
#
# Note that it is usually never necessary to form the Q matrix
# explicitly, and MPFIT does not.
def qrfac(self, a, pivot=0):
if self.debug: print ('Entering qrfac...')
machep = self.machar.machep
sz = a.shape
m = sz[0]
n = sz[1]
# Compute the initial column norms and initialize arrays
acnorm = numpy.zeros(n, dtype=float)
for j in range(n):
acnorm[j] = self.enorm(a[:,j])
rdiag = acnorm.copy()
wa = rdiag.copy()
ipvt = numpy.arange(n)
# Reduce a to r with householder transformations
minmn = numpy.min([m,n])
for j in range(minmn):
if pivot != 0:
# Bring the column of largest norm into the pivot position
rmax = numpy.max(rdiag[j:])
kmax = (numpy.nonzero(rdiag[j:] == rmax))[0]
ct = len(kmax)
kmax = kmax + j
if ct > 0:
kmax = kmax[0]
# Exchange rows via the pivot only. Avoid actually exchanging
# the rows, in case there is lots of memory transfer. The
# exchange occurs later, within the body of MPFIT, after the
# extraneous columns of the matrix have been shed.
if kmax != j:
temp = ipvt[j] ; ipvt[j] = ipvt[kmax] ; ipvt[kmax] = temp
rdiag[kmax] = rdiag[j]
wa[kmax] = wa[j]
# Compute the householder transformation to reduce the jth
# column of A to a multiple of the jth unit vector
lj = ipvt[j]
ajj = a[j:,lj]
ajnorm = self.enorm(ajj)
if ajnorm == 0:
break
if a[j,lj] < 0:
ajnorm = -ajnorm
ajj = ajj / ajnorm
ajj[0] = ajj[0] + 1
# *** Note optimization a(j:*,j)
a[j:,lj] = ajj
# Apply the transformation to the remaining columns
# and update the norms
# NOTE to SELF: tried to optimize this by removing the loop,
# but it actually got slower. Reverted to "for" loop to keep
# it simple.
if j+1 < n:
for k in range(j+1, n):
lk = ipvt[k]
ajk = a[j:,lk]
# *** Note optimization a(j:*,lk)
# (corrected 20 Jul 2000)
if a[j,lj] != 0:
a[j:,lk] = ajk - ajj * sum(ajk*ajj)/a[j,lj]
if (pivot != 0) and (rdiag[k] != 0):
temp = a[j,lk]/rdiag[k]
rdiag[k] = rdiag[k] * numpy.sqrt(numpy.max([(1.-temp**2), 0.]))
temp = rdiag[k]/wa[k]
if (0.05*temp*temp) <= machep:
rdiag[k] = self.enorm(a[j+1:,lk])
wa[k] = rdiag[k]
rdiag[j] = -ajnorm
return [a, ipvt, rdiag, acnorm]
# Original FORTRAN documentation
# **********
#
# subroutine qrsolv
#
# given an m by n matrix a, an n by n diagonal matrix d,
# and an m-vector b, the problem is to determine an x which
# solves the system
#
# a*x = b , d*x = 0 ,
#
# in the least squares sense.
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then qrsolv expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. the system
# a*x = b, d*x = 0, is then equivalent to
#
# t t
# r*z = q *b , p *d*p*z = 0 ,
#
# where x = p*z. if this system does not have full rank,
# then a least squares solution is obtained. on output qrsolv
# also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + d*d)*p = s *s .
#
# s is computed within qrsolv and may be of separate interest.
#
# the subroutine statement is
#
# subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, d*x = 0.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def qrsolv(self, r, ipvt, diag, qtb, sdiag):
if self.debug:
print ('Entering qrsolv...')
sz = r.shape
m = sz[0]
n = sz[1]
# copy r and (q transpose)*b to preserve input and initialize s.
# in particular, save the diagonal elements of r in x.
for j in range(n):
r[j:n,j] = r[j,j:n]
x = numpy.diagonal(r).copy()
wa = qtb.copy()
# Eliminate the diagonal matrix d using a givens rotation
for j in range(n):
l = ipvt[j]
if diag[l] == 0:
break
sdiag[j:] = 0
sdiag[j] = diag[l]
# The transformations to eliminate the row of d modify only a
# single element of (q transpose)*b beyond the first n, which
# is initially zero.
qtbpj = 0.
for k in range(j,n):
if sdiag[k] == 0:
break
if numpy.abs(r[k,k]) < numpy.abs(sdiag[k]):
cotan = r[k,k]/sdiag[k]
sine = 0.5/numpy.sqrt(.25 + .25*cotan*cotan)
cosine = sine*cotan
else:
tang = sdiag[k]/r[k,k]
cosine = 0.5/numpy.sqrt(.25 + .25*tang*tang)
sine = cosine*tang
# Compute the modified diagonal element of r and the
# modified element of ((q transpose)*b,0).
r[k,k] = cosine*r[k,k] + sine*sdiag[k]
temp = cosine*wa[k] + sine*qtbpj
qtbpj = -sine*wa[k] + cosine*qtbpj
wa[k] = temp
# Accumulate the transformation in the row of s
if n > k+1:
temp = cosine*r[k+1:n,k] + sine*sdiag[k+1:n]
sdiag[k+1:n] = -sine*r[k+1:n,k] + cosine*sdiag[k+1:n]
r[k+1:n,k] = temp
sdiag[j] = r[j,j]
r[j,j] = x[j]
# Solve the triangular system for z. If the system is singular
# then obtain a least squares solution
nsing = n
wh = (numpy.nonzero(sdiag == 0))[0]
if len(wh) > 0:
nsing = wh[0]
wa[nsing:] = 0
if nsing >= 1:
wa[nsing-1] = wa[nsing-1]/sdiag[nsing-1] # Degenerate case
# *** Reverse loop ***
for j in range(nsing-2,-1,-1):
sum0 = sum(r[j+1:nsing,j]*wa[j+1:nsing])
wa[j] = (wa[j]-sum0)/sdiag[j]
# Permute the components of z back to components of x
x[ipvt] = wa
return (r, x, sdiag)
# Original FORTRAN documentation
#
# subroutine lmpar
#
# given an m by n matrix a, an n by n nonsingular diagonal
# matrix d, an m-vector b, and a positive number delta,
# the problem is to determine a value for the parameter
# par such that if x solves the system
#
# a*x = b , sqrt(par)*d*x = 0 ,
#
# in the least squares sense, and dxnorm is the euclidean
# norm of d*x, then either par is zero and
#
# (dxnorm-delta) .le. 0.1*delta ,
#
# or par is positive and
#
# abs(dxnorm-delta) .le. 0.1*delta .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then lmpar expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. on output
# lmpar also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + par*d*d)*p = s *s .
#
# s is employed within lmpar and may be of separate interest.
#
# only a few iterations are generally needed for convergence
# of the algorithm. if, however, the limit of 10 iterations
# is reached, then the output par will contain the best
# value obtained so far.
#
# the subroutine statement is
#
# subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag,
# wa1,wa2)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# delta is a positive input variable which specifies an upper
# bound on the euclidean norm of d*x.
#
# par is a nonnegative variable. on input par contains an
# initial estimate of the levenberg-marquardt parameter.
# on output par contains the final estimate.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, sqrt(par)*d*x = 0,
# for the output par.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa1 and wa2 are work arrays of length n.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm,qrsolv
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def lmpar(self, r, ipvt, diag, qtb, delta, x, sdiag, par=None):
if self.debug:
print ('Entering lmpar...')
dwarf = self.machar.minnum
machep = self.machar.machep
sz = r.shape
m = sz[0]
n = sz[1]
# Compute and store in x the gauss-newton direction. If the
# jacobian is rank-deficient, obtain a least-squares solution
nsing = n
wa1 = qtb.copy()
rthresh = numpy.max(numpy.abs(numpy.diagonal(r))) * machep
wh = (numpy.nonzero(numpy.abs(numpy.diagonal(r)) < rthresh))[0]
if len(wh) > 0:
nsing = wh[0]
wa1[wh[0]:] = 0
if nsing >= 1:
# *** Reverse loop ***
for j in range(nsing-1,-1,-1):
wa1[j] = wa1[j]/r[j,j]
if j-1 >= 0:
wa1[0:j] = wa1[0:j] - r[0:j,j]*wa1[j]
# Note: ipvt here is a permutation array
x[ipvt] = wa1
# Initialize the iteration counter. Evaluate the function at the
# origin, and test for acceptance of the gauss-newton direction
iter = 0
wa2 = diag * x
dxnorm = self.enorm(wa2)
fp = dxnorm - delta
if fp <= 0.1*delta:
return [r, 0., x, sdiag]
# If the jacobian is not rank deficient, the newton step provides a
# lower bound, parl, for the zero of the function. Otherwise set
# this bound to zero.
parl = 0.
if nsing >= n:
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
wa1[0] = wa1[0] / r[0,0] # Degenerate case
for j in range(1,n): # Note "1" here, not zero
sum0 = sum(r[0:j,j]*wa1[0:j])
wa1[j] = (wa1[j] - sum0)/r[j,j]
temp = self.enorm(wa1)
parl = ((fp/delta)/temp)/temp
# Calculate an upper bound, paru, for the zero of the function
for j in range(n):
sum0 = sum(r[0:j+1,j]*qtb[0:j+1])
wa1[j] = sum0/diag[ipvt[j]]
gnorm = self.enorm(wa1)
paru = gnorm/delta
if paru == 0:
paru = dwarf/numpy.min([delta,0.1])
# If the input par lies outside of the interval (parl,paru), set
# par to the closer endpoint
par = numpy.max([par,parl])
par = numpy.min([par,paru])
if par == 0:
par = gnorm/dxnorm
# Beginning of an interation
while(1):
iter = iter + 1
# Evaluate the function at the current value of par
if par == 0:
par = numpy.max([dwarf, paru*0.001])
temp = numpy.sqrt(par)
wa1 = temp * diag
[r, x, sdiag] = self.qrsolv(r, ipvt, wa1, qtb, sdiag)
wa2 = diag*x
dxnorm = self.enorm(wa2)
temp = fp
fp = dxnorm - delta
if (numpy.abs(fp) <= 0.1*delta) or \
((parl == 0) and (fp <= temp) and (temp < 0)) or \
(iter == 10):
break;
# Compute the newton correction
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
for j in range(n-1):
wa1[j] = wa1[j]/sdiag[j]
wa1[j+1:n] = wa1[j+1:n] - r[j+1:n,j]*wa1[j]
wa1[n-1] = wa1[n-1]/sdiag[n-1] # Degenerate case
temp = self.enorm(wa1)
parc = ((fp/delta)/temp)/temp
# Depending on the sign of the function, update parl or paru
if fp > 0:
parl = numpy.max([parl,par])
if fp < 0:
paru = numpy.min([paru,par])
# Compute an improved estimate for par
par = numpy.max([parl, par+parc])
# End of an iteration
# Termination
return [r, par, x, sdiag]
# Procedure to tie one parameter to another.
def tie(self, p, ptied=None):
if self.debug:
print ('Entering tie...')
if ptied is None:
return
for i in range(len(ptied)):
if ptied[i] == '':
continue
cmd = 'p[' + str(i) + '] = ' + ptied[i]
exec(cmd)
return p
# Original FORTRAN documentation
# **********
#
# subroutine covar
#
# given an m by n matrix a, the problem is to determine
# the covariance matrix corresponding to a, defined as
#
# t
# inverse(a *a) .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then covar expects
# the full upper triangle of r and the permutation matrix p.
# the covariance matrix is then computed as
#
# t t
# p*inverse(r *r)*p .
#
# if a is nearly rank deficient, it may be desirable to compute
# the covariance matrix corresponding to the linearly independent
# columns of a. to define the numerical rank of a, covar uses
# the tolerance tol. if l is the largest integer such that
#
# abs(r(l,l)) .gt. tol*abs(r(1,1)) ,
#
# then covar computes the covariance matrix corresponding to
# the first l columns of r. for k greater than l, column
# and row ipvt(k) of the covariance matrix are set to zero.
#
# the subroutine statement is
#
# subroutine covar(n,r,ldr,ipvt,tol,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle must
# contain the full upper triangle of the matrix r. on output
# r contains the square symmetric covariance matrix.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# tol is a nonnegative input variable used to define the
# numerical rank of a in the manner described above.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs
#
# argonne national laboratory. minpack project. august 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
def calc_covar(self, rr, ipvt=None, tol=1.e-14):
if self.debug:
print ('Entering calc_covar...')
if numpy.ndim(rr) != 2:
print ('ERROR: r must be a two-dimensional matrix')
return -1
s = rr.shape
n = s[0]
if s[0] != s[1]:
print ('ERROR: r must be a square matrix')
return -1
if ipvt is None:
ipvt = numpy.arange(n)
r = rr.copy()
r.shape = [n,n]
# For the inverse of r in the full upper triangle of r
l = -1
tolr = tol * numpy.abs(r[0,0])
for k in range(n):
if numpy.abs(r[k,k]) <= tolr:
break
r[k,k] = 1./r[k,k]
for j in range(k):
temp = r[k,k] * r[j,k]
r[j,k] = 0.
r[0:j+1,k] = r[0:j+1,k] - temp*r[0:j+1,j]
l = k
# Form the full upper triangle of the inverse of (r transpose)*r
# in the full upper triangle of r
if l >= 0:
for k in range(l+1):
for j in range(k):
temp = r[j,k]
r[0:j+1,j] = r[0:j+1,j] + temp*r[0:j+1,k]
temp = r[k,k]
r[0:k+1,k] = temp * r[0:k+1,k]
# For the full lower triangle of the covariance matrix
# in the strict lower triangle or and in wa
wa = numpy.repeat([r[0,0]], n)
for j in range(n):
jj = ipvt[j]
sing = j > l
for i in range(j+1):
if sing:
r[i,j] = 0.
ii = ipvt[i]
if ii > jj:
r[ii,jj] = r[i,j]
if ii < jj:
r[jj,ii] = r[i,j]
wa[jj] = r[j,j]
# Symmetrize the covariance matrix in r
for j in range(n):
r[0:j+1,j] = r[j,0:j+1]
r[j,j] = wa[j]
return r
class machar:
def __init__(self, double=1):
if double == 0:
info = numpy.finfo(numpy.float32)
else:
info = numpy.finfo(numpy.float64)
self.machep = info.eps
self.maxnum = info.max
self.minnum = info.tiny
self.maxlog = numpy.log(self.maxnum)
self.minlog = numpy.log(self.minnum)
self.rdwarf = numpy.sqrt(self.minnum*1.5) * 10
self.rgiant = numpy.sqrt(self.maxnum) * 0.1
|
USNavalResearchLaboratoryREPO_NAMEeispacPATH_START.@eispac_extracted@eispac-main@eispac@extern@mpfit.py@.PATH_END.py
|
{
"filename": "_docstring_gen.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/_docstring_gen.py",
"type": "Python"
}
|
from __future__ import absolute_import
import re as _re
import plotly.io as pio
from plotly.basedatatypes import BaseFigure
import sys
# Perform docstrings generation
def copy_doc_without_fig(from_fn, to_method):
"""
Copy docstring from a plotly.io function to a Figure method, removing the
fig argument docstring in the process
"""
docstr = _re.sub(r" {4}fig:(?:.*?\n)*? {4}(\w+)", r" \1", from_fn.__doc__)
if sys.version_info[0] < 3:
to_method.__func__.__doc__ = docstr
else:
to_method.__doc__ = docstr
copy_doc_without_fig(pio.show, BaseFigure.show)
copy_doc_without_fig(pio.to_json, BaseFigure.to_json)
copy_doc_without_fig(pio.write_json, BaseFigure.write_json)
copy_doc_without_fig(pio.to_html, BaseFigure.to_html)
copy_doc_without_fig(pio.write_html, BaseFigure.write_html)
copy_doc_without_fig(pio.to_image, BaseFigure.to_image)
copy_doc_without_fig(pio.write_image, BaseFigure.write_image)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@_docstring_gen.py@.PATH_END.py
|
{
"filename": "kkr.py",
"repo_name": "PyEllips/pyElli",
"repo_path": "pyElli_extracted/pyElli-master/src/elli/kkr/kkr.py",
"type": "Python"
}
|
r"""Calculate Kramers-Kronig relations according to Maclaurin's formula [1]_.
Here, the differential formulation is used,
which means that the transformation always diverges to zero at infinity,
leaving a free infinity offset.
This offset is typically referred to as :math:`\epsilon(\infty)` in spectroscopy.
Since the Kramers-Kronig relation integrates over the whole spectrum for each point,
it is very sensitive to sampling changes and non-zero values.
To obtain best values the y-axis
should be zero anywhere outside the integration range.
However, since this is not possible for every dispersion
relation the calculation should be done on a much wider
range than the actual interval used.
Additionally, the discretisation steps of the x-axis should be kept constant.
For the transformation of the real to imaginary part you need to be especially cautios.
Typically, in spectroscopy the real part of the dielectric
function is non-zero throughout the whole spectrum
and especially :math:`\epsilon(\infty) \ne 0`.
This makes the Kramers-Kronig transformation virtually impossible in these cases
as it suffers from major uncertanties.
Hence, this transformation is included in pyElli only for completeness and for the
special cases it may be applicable.
.. rubric:: References
.. [1] Ohta and Ishida, Appl. Spectroscopy 42, 952 (1988), https://doi.org/10.1366/0003702884430380
"""
# pylint: disable=invalid-name
from typing import Callable
import numpy as np
def _integrate_im(im: np.ndarray, x: np.ndarray, x_i: np.ndarray) -> np.ndarray:
"""Calculate the discrete imaginary sum (integral) for the kkr.
Args:
im (numpy.ndarray): The imaginary values from which to calculate. (shape (1, n))
x (numpy.ndarray): The x-axis on which to calculate. (shape (1, n))
x_i (numpy.ndarray): The current points around which to integrate. (shape (m, 1))
Returns:
numpy.ndarray: The integral sum. (shape (m,))
"""
return np.sum(x * im / (x * x - x_i * x_i), axis=1)
def _integrate_im_reciprocal(
im: np.ndarray, x: np.ndarray, x_i: np.ndarray
) -> np.ndarray:
"""Calculate the discrete imaginary sum (integral) for the kkr.
This formulation uses an 1/x axis to transform a wavelength axis.
Args:
im (numpy.ndarray): The imaginary values from which to calculate. (shape (1, n))
x (numpy.ndarray): The reciprocal x-axis on which to calculate. (shape (1, n))
x_i (numpy.ndarray): The current point around which to integrate. (shape (m, 1))
Returns:
numpy.ndarray: The integral sum. (shape (m,))
"""
return np.sum(im / (x * (1.0 - x * x / (x_i * x_i))), axis=1)
def _integrate_re(re: np.ndarray, x: np.ndarray, x_i: np.ndarray) -> np.ndarray:
"""Calculate the discrete real sum (integral) for the kkr.
Args:
re (numpy.ndarray): The real values from which to calculate. (shape (1, n))
x (numpy.ndarray): The x-axis on which to calculate. (shape (1, n))
x_i (numpy.ndarray): The current point around which to integrate. (shape (m, 1))
Returns:
numpy.ndarray: The real sum. (shape (m,))
"""
return np.sum(x_i * re / (x * x - x_i * x_i), axis=1)
def _integrate_re_reciprocal(
re: np.ndarray, x: np.ndarray, x_i: np.ndarray
) -> np.ndarray:
"""Calculate the discrete real sum (integral) for the kkr.
This formulation uses an 1/x axis to transform a wavelength axis.
Args:
re (numpy.ndarray): The real values from which to calculate. (shape (1, n))
x (numpy.ndarray): The reciprocal x-axis on which to calculate. (shape (1, n))
x_i (float): The current point around which to integrate. (shape (m, 1))
Returns:
numpy.ndarray: The real sum. (shape (m,))
"""
return np.sum(re / (x_i - x * x / x_i), axis=1)
def _calc_kkr(
t: np.ndarray,
x: np.ndarray,
trafo: Callable[[np.ndarray, np.ndarray, np.ndarray], np.ndarray],
) -> np.ndarray:
"""Calculates the Kramers-Kronig relation
according to Maclaurin's formula.
Args:
t (numpy.ndarray): The y-axis on which to transform.
x (numpy.ndarray): The x-axis on which to transform.
trafo (Callable[[numpy.ndarray, numpy.ndarray, numpy.ndarray], numpy.ndarray]):
The transformation function.
Raises:
ValueError: y and x axis must have the same length.
Returns:
np.ndarray: The kkr transformed y-axis
"""
if len(t) != len(x):
raise ValueError(
"y- and x-axes arrays must have the same length, "
f"but have lengths {len(t)} and {len(x)}."
)
integral = np.empty(len(t))
interval = np.diff(x, prepend=x[1] - x[0])
odd_slice = slice(1, None, 2)
even_slice = slice(0, None, 2)
integral[even_slice] = trafo(
t[np.newaxis, odd_slice],
x[np.newaxis, odd_slice],
x[even_slice, np.newaxis],
)
integral[odd_slice] = trafo(
t[np.newaxis, even_slice],
x[np.newaxis, even_slice],
x[odd_slice, np.newaxis],
)
return 4 / np.pi * interval * integral
def re2im(re: np.ndarray, x: np.ndarray) -> np.ndarray:
r"""Calculates the differential Kramers-Kronig relation from the
real to imaginary part
according to Maclaurin's formula.
The underlying formula reads:
.. math::
\Delta \Im(x_i) = \Im(x_i) - \Im(\infty) =
\frac{2}{\pi} \int_0^\infty \frac{x_i \Re(x)}{x^2 - x_i^2} dx
Args:
re (numpy.ndarray): The real values to transform.
x (numpy.ndarray): The axis on which to transform.
Returns:
numpy.ndarray: The transformed imaginary part.
"""
return _calc_kkr(re, x, _integrate_re)
def im2re(im: np.ndarray, x: np.ndarray) -> np.ndarray:
r"""Calculates the differential Kramers-Kronig relation from the
imaginary to real part
according to Maclaurin's formula.
The underlying formula reads:
.. math::
\Delta \Re(x_i) = \Re(x_i) - \Re(\infty) =
\frac{2}{\pi} \int_0^\infty \frac{x \Im(x)}{x^2 - x_i^2} dx
Args:
im (numpy.ndarray): The imaginary values to transform.
x (numpy.ndarray): The axis on which to transform.
Returns:
numpy.ndarray: The transformed real part.
"""
return _calc_kkr(im, x, _integrate_im)
def re2im_reciprocal(re: np.ndarray, x: np.ndarray) -> np.ndarray:
r"""Calculates the differential Kramers-Kronig relation from the
real to imaginary part
according to Maclaurin's formula.
This function assumes a reciprocal x-axis, e.g. wavelength in spectroscopy.
The underlying formula reads:
.. math::
\Delta \Im(x_i) = \Im(x_i) - \Im(\infty) =
\frac{2}{\pi} \int_0^\infty \frac{\Re(x)}{x_i - \frac{x^2}{x_i}} dx
Args:
re (numpy.ndarray): The real values to transform.
x (numpy.ndarray): The reciprocal axis on which to transform.
Returns:
numpy.ndarray: The transformed imaginary part.
"""
return _calc_kkr(re, x, _integrate_re_reciprocal)
def im2re_reciprocal(im: np.ndarray, x: np.ndarray) -> np.ndarray:
r"""Calculates the differential Kramers-Kronig relation from the
imaginary to real part
according to Maclaurin's formula.
This function assumes a reciprocal x-axis, e.g. wavelength in spectroscopy.
The underlying formula reads:
.. math::
\Delta \Re(x_i) = \Re(x_i) - \Re(\infty) =
\frac{2}{\pi} \int_0^\infty \frac{x \Im(x)}{1 - \frac{x^2}{x_i^2}} dx
Args:
im (numpy.ndarray): The imaginary values to transform.
x (numpy.ndarray): The reciprocal axis on which to transform.
Returns:
numpy.ndarray: The transformed real part.
"""
return _calc_kkr(im, x, _integrate_im_reciprocal)
|
PyEllipsREPO_NAMEpyElliPATH_START.@pyElli_extracted@pyElli-master@src@elli@kkr@kkr.py@.PATH_END.py
|
{
"filename": "test_binary_hashindex.py",
"repo_name": "facebookresearch/faiss",
"repo_path": "faiss_extracted/faiss-main/tests/test_binary_hashindex.py",
"type": "Python"
}
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import numpy as np
import faiss
from common_faiss_tests import make_binary_dataset
def bitvec_shuffle(a, order):
n, d = a.shape
db, = order.shape
b = np.empty((n, db // 8), dtype='uint8')
faiss.bitvec_shuffle(
n, d * 8, db,
faiss.swig_ptr(order),
faiss.swig_ptr(a), faiss.swig_ptr(b))
return b
class TestSmallFuncs(unittest.TestCase):
def test_shuffle(self):
d = 256
n = 1000
rs = np.random.RandomState(123)
o = rs.permutation(d).astype('int32')
x = rs.randint(256, size=(n, d // 8)).astype('uint8')
y1 = bitvec_shuffle(x, o[:128])
y2 = bitvec_shuffle(x, o[128:])
y = np.hstack((y1, y2))
oinv = np.empty(d, dtype='int32')
oinv[o] = np.arange(d)
z = bitvec_shuffle(y, oinv)
np.testing.assert_array_equal(x, z)
class TestRange(unittest.TestCase):
def test_hash(self):
d = 128
nq = 100
nb = 2000
(_, xb, xq) = make_binary_dataset(d, 0, nb, nq)
index_ref = faiss.IndexBinaryFlat(d)
index_ref.add(xb)
radius = 55
Lref, Dref, Iref = index_ref.range_search(xq, radius)
index = faiss.IndexBinaryHash(d, 10)
index.add(xb)
# index.display()
nfound = []
ndis = []
stats = faiss.cvar.indexBinaryHash_stats
for n_bitflips in range(index.b + 1):
index.nflip = n_bitflips
stats.reset()
Lnew, Dnew, Inew = index.range_search(xq, radius)
for i in range(nq):
ref = Iref[Lref[i]:Lref[i + 1]]
new = Inew[Lnew[i]:Lnew[i + 1]]
snew = set(new)
# no duplicates
self.assertTrue(len(new) == len(snew))
# subset of real results
self.assertTrue(snew <= set(ref))
nfound.append(Lnew[-1])
ndis.append(stats.ndis)
nfound = np.array(nfound)
self.assertTrue(nfound[-1] == Lref[-1])
self.assertTrue(np.all(nfound[1:] >= nfound[:-1]))
def test_multihash(self):
d = 128
nq = 100
nb = 2000
(_, xb, xq) = make_binary_dataset(d, 0, nb, nq)
index_ref = faiss.IndexBinaryFlat(d)
index_ref.add(xb)
radius = 55
Lref, Dref, Iref = index_ref.range_search(xq, radius)
nfound = []
ndis = []
for nh in 1, 3, 5:
index = faiss.IndexBinaryMultiHash(d, nh, 10)
index.add(xb)
# index.display()
stats = faiss.cvar.indexBinaryHash_stats
index.nflip = 2
stats.reset()
Lnew, Dnew, Inew = index.range_search(xq, radius)
for i in range(nq):
ref = Iref[Lref[i]:Lref[i + 1]]
new = Inew[Lnew[i]:Lnew[i + 1]]
snew = set(new)
# no duplicates
self.assertTrue(len(new) == len(snew))
# subset of real results
self.assertTrue(snew <= set(ref))
nfound.append(Lnew[-1])
ndis.append(stats.ndis)
nfound = np.array(nfound)
# self.assertTrue(nfound[-1] == Lref[-1])
self.assertTrue(np.all(nfound[1:] >= nfound[:-1]))
class TestKnn(unittest.TestCase):
def test_hash_and_multihash(self):
d = 128
nq = 100
nb = 2000
(_, xb, xq) = make_binary_dataset(d, 0, nb, nq)
index_ref = faiss.IndexBinaryFlat(d)
index_ref.add(xb)
k = 10
Dref, Iref = index_ref.search(xq, k)
nfound = {}
for nh in 0, 1, 3, 5:
for nbit in 4, 7:
if nh == 0:
index = faiss.IndexBinaryHash(d, nbit)
else:
index = faiss.IndexBinaryMultiHash(d, nh, nbit)
index.add(xb)
index.nflip = 2
Dnew, Inew = index.search(xq, k)
nf = 0
for i in range(nq):
ref = Iref[i]
new = Inew[i]
snew = set(new)
# no duplicates
self.assertTrue(len(new) == len(snew))
nf += len(set(ref) & snew)
nfound[(nh, nbit)] = nf
self.assertGreater(nfound[(nh, 4)], nfound[(nh, 7)])
# test serialization
index2 = faiss.deserialize_index_binary(
faiss.serialize_index_binary(index))
D2, I2 = index2.search(xq, k)
np.testing.assert_array_equal(Inew, I2)
np.testing.assert_array_equal(Dnew, D2)
self.assertGreater(3, abs(nfound[(0, 7)] - nfound[(1, 7)]))
self.assertGreater(nfound[(3, 7)], nfound[(1, 7)])
self.assertGreater(nfound[(5, 7)], nfound[(3, 7)])
def subtest_result_order(self, nh):
d = 128
nq = 10
nb = 200
(_, xb, xq) = make_binary_dataset(d, 0, nb, nq)
nbit = 10
if nh == 0:
index = faiss.IndexBinaryHash(d, nbit)
else:
index = faiss.IndexBinaryMultiHash(d, nh, nbit)
index.add(xb)
index.nflip = 5
k = 10
Do, Io = index.search(xq, k)
self.assertTrue(
np.all(Do[:, 1:] >= Do[:, :-1])
)
def test_result_order_binhash(self):
self.subtest_result_order(0)
def test_result_order_miltihash(self):
self.subtest_result_order(3)
"""
I suspect this test crashes CircleCI on Linux
# this is an expensive test, so we don't run it by default
class TestLargeIndexWrite: # (unittest.TestCase):
def test_write_580M(self):
dim = 8
nhash = 1
num_million = 580 # changing to 570 works
index1 = faiss.IndexBinaryMultiHash(dim, nhash, int(dim/nhash))
random_hash_codes = np.random.randint(0, 256, (
num_million * int(1e6), int(dim/8))).astype("uint8")
index1.add(random_hash_codes)
faiss.write_index_binary(index1, "/tmp/tmp.faiss")
index2 = faiss.read_index_binary("/tmp/tmp.faiss")
"""
|
facebookresearchREPO_NAMEfaissPATH_START.@faiss_extracted@faiss-main@tests@test_binary_hashindex.py@.PATH_END.py
|
{
"filename": "mpi_test.py",
"repo_name": "EliseJ/astroABC",
"repo_path": "astroABC_extracted/astroABC-master/astroabc/mpi_test.py",
"type": "Python"
}
|
import numpy as np
import os
import sys
#sys.path.append(os.getcwd())
from .abc_class import *
def dist(d,x):
'''Distance metric: rho'''
return np.sum(np.abs(np.mean(x,axis=0) - np.mean(d,axis=0)))
def simulation(param):
cov = 0.1
return Model("normal",1000).make_mock(param,cov)
class test_abc:
def setUp(self):
self.nparam =1
self.npart = 100
self.nsamples =1000
self.niter =15
self.tlevels = [.7,0.005]
self.model_type = "normal"
self.prop={'tol_type':'exp',"verbose":1,'adapt_t':True,'threshold':75,
'pert_kernel':2, 'variance_method':0, 'dist_type': "user",'dfunc':dist,
'outfile':"mpi_test.txt",'mpi':False,'mp':False,'num_proc':None,
'restart':"restart_test.txt",'from_restart':False}
self.param = [0.1411]
var = 0.1
self.data = Model(self.model_type,self.nsamples).make_mock(self.param,var)
priorname = ["normal"]
hyperp = [[x+0.2 ,0.05*2] for x in self.param]
self.prior = zip(priorname,hyperp)
def test_mpi(self):
self.prop['mpi']=True
sampler = ABC_class(self.nparam,self.npart,self.data,self.tlevels,self.niter,self.prior,**self.prop)
assert(sampler.parallel.pool)
|
EliseJREPO_NAMEastroABCPATH_START.@astroABC_extracted@astroABC-master@astroabc@mpi_test.py@.PATH_END.py
|
{
"filename": "_bgcolor.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/ternary/_bgcolor.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BgcolorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(self, plotly_name="bgcolor", parent_name="layout.ternary", **kwargs):
super(BgcolorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@ternary@_bgcolor.py@.PATH_END.py
|
{
"filename": "offsetbox.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/lib/matplotlib/offsetbox.py",
"type": "Python"
}
|
r"""
Container classes for `.Artist`\s.
`OffsetBox`
The base of all container artists defined in this module.
`AnchoredOffsetbox`, `AnchoredText`
Anchor and align an arbitrary `.Artist` or a text relative to the parent
axes or a specific anchor point.
`DrawingArea`
A container with fixed width and height. Children have a fixed position
inside the container and may be clipped.
`HPacker`, `VPacker`
Containers for layouting their children vertically or horizontally.
`PaddedBox`
A container to add a padding around an `.Artist`.
`TextArea`
Contains a single `.Text` instance.
"""
import functools
import numpy as np
import matplotlib as mpl
from matplotlib import _api, _docstring
import matplotlib.artist as martist
import matplotlib.path as mpath
import matplotlib.text as mtext
import matplotlib.transforms as mtransforms
from matplotlib.font_manager import FontProperties
from matplotlib.image import BboxImage
from matplotlib.patches import (
FancyBboxPatch, FancyArrowPatch, bbox_artist as mbbox_artist)
from matplotlib.transforms import Bbox, BboxBase, TransformedBbox
DEBUG = False
def _compat_get_offset(meth):
"""
Decorator for the get_offset method of OffsetBox and subclasses, that
allows supporting both the new signature (self, bbox, renderer) and the old
signature (self, width, height, xdescent, ydescent, renderer).
"""
sigs = [lambda self, width, height, xdescent, ydescent, renderer: locals(),
lambda self, bbox, renderer: locals()]
@functools.wraps(meth)
def get_offset(self, *args, **kwargs):
params = _api.select_matching_signature(sigs, self, *args, **kwargs)
bbox = (params["bbox"] if "bbox" in params else
Bbox.from_bounds(-params["xdescent"], -params["ydescent"],
params["width"], params["height"]))
return meth(params["self"], bbox, params["renderer"])
return get_offset
# for debugging use
def _bbox_artist(*args, **kwargs):
if DEBUG:
mbbox_artist(*args, **kwargs)
def _get_packed_offsets(widths, total, sep, mode="fixed"):
r"""
Pack boxes specified by their *widths*.
For simplicity of the description, the terminology used here assumes a
horizontal layout, but the function works equally for a vertical layout.
There are three packing *mode*\s:
- 'fixed': The elements are packed tight to the left with a spacing of
*sep* in between. If *total* is *None* the returned total will be the
right edge of the last box. A non-*None* total will be passed unchecked
to the output. In particular this means that right edge of the last
box may be further to the right than the returned total.
- 'expand': Distribute the boxes with equal spacing so that the left edge
of the first box is at 0, and the right edge of the last box is at
*total*. The parameter *sep* is ignored in this mode. A total of *None*
is accepted and considered equal to 1. The total is returned unchanged
(except for the conversion *None* to 1). If the total is smaller than
the sum of the widths, the laid out boxes will overlap.
- 'equal': If *total* is given, the total space is divided in N equal
ranges and each box is left-aligned within its subspace.
Otherwise (*total* is *None*), *sep* must be provided and each box is
left-aligned in its subspace of width ``(max(widths) + sep)``. The
total width is then calculated to be ``N * (max(widths) + sep)``.
Parameters
----------
widths : list of float
Widths of boxes to be packed.
total : float or None
Intended total length. *None* if not used.
sep : float or None
Spacing between boxes.
mode : {'fixed', 'expand', 'equal'}
The packing mode.
Returns
-------
total : float
The total width needed to accommodate the laid out boxes.
offsets : array of float
The left offsets of the boxes.
"""
_api.check_in_list(["fixed", "expand", "equal"], mode=mode)
if mode == "fixed":
offsets_ = np.cumsum([0] + [w + sep for w in widths])
offsets = offsets_[:-1]
if total is None:
total = offsets_[-1] - sep
return total, offsets
elif mode == "expand":
# This is a bit of a hack to avoid a TypeError when *total*
# is None and used in conjugation with tight layout.
if total is None:
total = 1
if len(widths) > 1:
sep = (total - sum(widths)) / (len(widths) - 1)
else:
sep = 0
offsets_ = np.cumsum([0] + [w + sep for w in widths])
offsets = offsets_[:-1]
return total, offsets
elif mode == "equal":
maxh = max(widths)
if total is None:
if sep is None:
raise ValueError("total and sep cannot both be None when "
"using layout mode 'equal'")
total = (maxh + sep) * len(widths)
else:
sep = total / len(widths) - maxh
offsets = (maxh + sep) * np.arange(len(widths))
return total, offsets
def _get_aligned_offsets(yspans, height, align="baseline"):
"""
Align boxes each specified by their ``(y0, y1)`` spans.
For simplicity of the description, the terminology used here assumes a
horizontal layout (i.e., vertical alignment), but the function works
equally for a vertical layout.
Parameters
----------
yspans
List of (y0, y1) spans of boxes to be aligned.
height : float or None
Intended total height. If None, the maximum of the heights
(``y1 - y0``) in *yspans* is used.
align : {'baseline', 'left', 'top', 'right', 'bottom', 'center'}
The alignment anchor of the boxes.
Returns
-------
(y0, y1)
y range spanned by the packing. If a *height* was originally passed
in, then for all alignments other than "baseline", a span of ``(0,
height)`` is used without checking that it is actually large enough).
descent
The descent of the packing.
offsets
The bottom offsets of the boxes.
"""
_api.check_in_list(
["baseline", "left", "top", "right", "bottom", "center"], align=align)
if height is None:
height = max(y1 - y0 for y0, y1 in yspans)
if align == "baseline":
yspan = (min(y0 for y0, y1 in yspans), max(y1 for y0, y1 in yspans))
offsets = [0] * len(yspans)
elif align in ["left", "bottom"]:
yspan = (0, height)
offsets = [-y0 for y0, y1 in yspans]
elif align in ["right", "top"]:
yspan = (0, height)
offsets = [height - y1 for y0, y1 in yspans]
elif align == "center":
yspan = (0, height)
offsets = [(height - (y1 - y0)) * .5 - y0 for y0, y1 in yspans]
return yspan, offsets
class OffsetBox(martist.Artist):
"""
The OffsetBox is a simple container artist.
The child artists are meant to be drawn at a relative position to its
parent.
Being an artist itself, all parameters are passed on to `.Artist`.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args)
self._internal_update(kwargs)
# Clipping has not been implemented in the OffsetBox family, so
# disable the clip flag for consistency. It can always be turned back
# on to zero effect.
self.set_clip_on(False)
self._children = []
self._offset = (0, 0)
def set_figure(self, fig):
"""
Set the `.Figure` for the `.OffsetBox` and all its children.
Parameters
----------
fig : `~matplotlib.figure.Figure`
"""
super().set_figure(fig)
for c in self.get_children():
c.set_figure(fig)
@martist.Artist.axes.setter
def axes(self, ax):
# TODO deal with this better
martist.Artist.axes.fset(self, ax)
for c in self.get_children():
if c is not None:
c.axes = ax
def contains(self, mouseevent):
"""
Delegate the mouse event contains-check to the children.
As a container, the `.OffsetBox` does not respond itself to
mouseevents.
Parameters
----------
mouseevent : `~matplotlib.backend_bases.MouseEvent`
Returns
-------
contains : bool
Whether any values are within the radius.
details : dict
An artist-specific dictionary of details of the event context,
such as which points are contained in the pick radius. See the
individual Artist subclasses for details.
See Also
--------
.Artist.contains
"""
if self._different_canvas(mouseevent):
return False, {}
for c in self.get_children():
a, b = c.contains(mouseevent)
if a:
return a, b
return False, {}
def set_offset(self, xy):
"""
Set the offset.
Parameters
----------
xy : (float, float) or callable
The (x, y) coordinates of the offset in display units. These can
either be given explicitly as a tuple (x, y), or by providing a
function that converts the extent into the offset. This function
must have the signature::
def offset(width, height, xdescent, ydescent, renderer) \
-> (float, float)
"""
self._offset = xy
self.stale = True
@_compat_get_offset
def get_offset(self, bbox, renderer):
"""
Return the offset as a tuple (x, y).
The extent parameters have to be provided to handle the case where the
offset is dynamically determined by a callable (see
`~.OffsetBox.set_offset`).
Parameters
----------
bbox : `.Bbox`
renderer : `.RendererBase` subclass
"""
return (
self._offset(bbox.width, bbox.height, -bbox.x0, -bbox.y0, renderer)
if callable(self._offset)
else self._offset)
def set_width(self, width):
"""
Set the width of the box.
Parameters
----------
width : float
"""
self.width = width
self.stale = True
def set_height(self, height):
"""
Set the height of the box.
Parameters
----------
height : float
"""
self.height = height
self.stale = True
def get_visible_children(self):
r"""Return a list of the visible child `.Artist`\s."""
return [c for c in self._children if c.get_visible()]
def get_children(self):
r"""Return a list of the child `.Artist`\s."""
return self._children
def _get_bbox_and_child_offsets(self, renderer):
"""
Return the bbox of the offsetbox and the child offsets.
The bbox should satisfy ``x0 <= x1 and y0 <= y1``.
Parameters
----------
renderer : `.RendererBase` subclass
Returns
-------
bbox
list of (xoffset, yoffset) pairs
"""
raise NotImplementedError(
"get_bbox_and_offsets must be overridden in derived classes")
def get_bbox(self, renderer):
"""Return the bbox of the offsetbox, ignoring parent offsets."""
bbox, offsets = self._get_bbox_and_child_offsets(renderer)
return bbox
def get_window_extent(self, renderer=None):
# docstring inherited
if renderer is None:
renderer = self.get_figure(root=True)._get_renderer()
bbox = self.get_bbox(renderer)
try: # Some subclasses redefine get_offset to take no args.
px, py = self.get_offset(bbox, renderer)
except TypeError:
px, py = self.get_offset()
return bbox.translated(px, py)
def draw(self, renderer):
"""
Update the location of children if necessary and draw them
to the given *renderer*.
"""
bbox, offsets = self._get_bbox_and_child_offsets(renderer)
px, py = self.get_offset(bbox, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px + ox, py + oy))
c.draw(renderer)
_bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class PackerBase(OffsetBox):
def __init__(self, pad=0., sep=0., width=None, height=None,
align="baseline", mode="fixed", children=None):
"""
Parameters
----------
pad : float, default: 0.0
The boundary padding in points.
sep : float, default: 0.0
The spacing between items in points.
width, height : float, optional
Width and height of the container box in pixels, calculated if
*None*.
align : {'top', 'bottom', 'left', 'right', 'center', 'baseline'}, \
default: 'baseline'
Alignment of boxes.
mode : {'fixed', 'expand', 'equal'}, default: 'fixed'
The packing mode.
- 'fixed' packs the given `.Artist`\\s tight with *sep* spacing.
- 'expand' uses the maximal available space to distribute the
artists with equal spacing in between.
- 'equal': Each artist an equal fraction of the available space
and is left-aligned (or top-aligned) therein.
children : list of `.Artist`
The artists to pack.
Notes
-----
*pad* and *sep* are in points and will be scaled with the renderer
dpi, while *width* and *height* are in pixels.
"""
super().__init__()
self.height = height
self.width = width
self.sep = sep
self.pad = pad
self.mode = mode
self.align = align
self._children = children
class VPacker(PackerBase):
"""
VPacker packs its children vertically, automatically adjusting their
relative positions at draw time.
"""
def _get_bbox_and_child_offsets(self, renderer):
# docstring inherited
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
if self.width is not None:
for c in self.get_visible_children():
if isinstance(c, PackerBase) and c.mode == "expand":
c.set_width(self.width)
bboxes = [c.get_bbox(renderer) for c in self.get_visible_children()]
(x0, x1), xoffsets = _get_aligned_offsets(
[bbox.intervalx for bbox in bboxes], self.width, self.align)
height, yoffsets = _get_packed_offsets(
[bbox.height for bbox in bboxes], self.height, sep, self.mode)
yoffsets = height - (yoffsets + [bbox.y1 for bbox in bboxes])
ydescent = yoffsets[0]
yoffsets = yoffsets - ydescent
return (
Bbox.from_bounds(x0, -ydescent, x1 - x0, height).padded(pad),
[*zip(xoffsets, yoffsets)])
class HPacker(PackerBase):
"""
HPacker packs its children horizontally, automatically adjusting their
relative positions at draw time.
"""
def _get_bbox_and_child_offsets(self, renderer):
# docstring inherited
dpicor = renderer.points_to_pixels(1.)
pad = self.pad * dpicor
sep = self.sep * dpicor
bboxes = [c.get_bbox(renderer) for c in self.get_visible_children()]
if not bboxes:
return Bbox.from_bounds(0, 0, 0, 0).padded(pad), []
(y0, y1), yoffsets = _get_aligned_offsets(
[bbox.intervaly for bbox in bboxes], self.height, self.align)
width, xoffsets = _get_packed_offsets(
[bbox.width for bbox in bboxes], self.width, sep, self.mode)
x0 = bboxes[0].x0
xoffsets -= ([bbox.x0 for bbox in bboxes] - x0)
return (Bbox.from_bounds(x0, y0, width, y1 - y0).padded(pad),
[*zip(xoffsets, yoffsets)])
class PaddedBox(OffsetBox):
"""
A container to add a padding around an `.Artist`.
The `.PaddedBox` contains a `.FancyBboxPatch` that is used to visualize
it when rendering.
"""
def __init__(self, child, pad=0., *, draw_frame=False, patch_attrs=None):
"""
Parameters
----------
child : `~matplotlib.artist.Artist`
The contained `.Artist`.
pad : float, default: 0.0
The padding in points. This will be scaled with the renderer dpi.
In contrast, *width* and *height* are in *pixels* and thus not
scaled.
draw_frame : bool
Whether to draw the contained `.FancyBboxPatch`.
patch_attrs : dict or None
Additional parameters passed to the contained `.FancyBboxPatch`.
"""
super().__init__()
self.pad = pad
self._children = [child]
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=1, # self.prop.get_size_in_points(),
snap=True,
visible=draw_frame,
boxstyle="square,pad=0",
)
if patch_attrs is not None:
self.patch.update(patch_attrs)
def _get_bbox_and_child_offsets(self, renderer):
# docstring inherited.
pad = self.pad * renderer.points_to_pixels(1.)
return (self._children[0].get_bbox(renderer).padded(pad), [(0, 0)])
def draw(self, renderer):
# docstring inherited
bbox, offsets = self._get_bbox_and_child_offsets(renderer)
px, py = self.get_offset(bbox, renderer)
for c, (ox, oy) in zip(self.get_visible_children(), offsets):
c.set_offset((px + ox, py + oy))
self.draw_frame(renderer)
for c in self.get_visible_children():
c.draw(renderer)
self.stale = False
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.bounds)
if fontsize:
self.patch.set_mutation_scale(fontsize)
self.stale = True
def draw_frame(self, renderer):
# update the location and size of the legend
self.update_frame(self.get_window_extent(renderer))
self.patch.draw(renderer)
class DrawingArea(OffsetBox):
"""
The DrawingArea can contain any Artist as a child. The DrawingArea
has a fixed width and height. The position of children relative to
the parent is fixed. The children can be clipped at the
boundaries of the parent.
"""
def __init__(self, width, height, xdescent=0., ydescent=0., clip=False):
"""
Parameters
----------
width, height : float
Width and height of the container box.
xdescent, ydescent : float
Descent of the box in x- and y-direction.
clip : bool
Whether to clip the children to the box.
"""
super().__init__()
self.width = width
self.height = height
self.xdescent = xdescent
self.ydescent = ydescent
self._clip_children = clip
self.offset_transform = mtransforms.Affine2D()
self.dpi_transform = mtransforms.Affine2D()
@property
def clip_children(self):
"""
If the children of this DrawingArea should be clipped
by DrawingArea bounding box.
"""
return self._clip_children
@clip_children.setter
def clip_children(self, val):
self._clip_children = bool(val)
self.stale = True
def get_transform(self):
"""
Return the `~matplotlib.transforms.Transform` applied to the children.
"""
return self.dpi_transform + self.offset_transform
def set_transform(self, t):
"""
set_transform is ignored.
"""
def set_offset(self, xy):
"""
Set the offset of the container.
Parameters
----------
xy : (float, float)
The (x, y) coordinates of the offset in display units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
self.stale = True
def get_offset(self):
"""Return offset of the container."""
return self._offset
def get_bbox(self, renderer):
# docstring inherited
dpi_cor = renderer.points_to_pixels(1.)
return Bbox.from_bounds(
-self.xdescent * dpi_cor, -self.ydescent * dpi_cor,
self.width * dpi_cor, self.height * dpi_cor)
def add_artist(self, a):
"""Add an `.Artist` to the container box."""
self._children.append(a)
if not a.is_transform_set():
a.set_transform(self.get_transform())
if self.axes is not None:
a.axes = self.axes
fig = self.get_figure(root=False)
if fig is not None:
a.set_figure(fig)
def draw(self, renderer):
# docstring inherited
dpi_cor = renderer.points_to_pixels(1.)
self.dpi_transform.clear()
self.dpi_transform.scale(dpi_cor)
# At this point the DrawingArea has a transform
# to the display space so the path created is
# good for clipping children
tpath = mtransforms.TransformedPath(
mpath.Path([[0, 0], [0, self.height],
[self.width, self.height],
[self.width, 0]]),
self.get_transform())
for c in self._children:
if self._clip_children and not (c.clipbox or c._clippath):
c.set_clip_path(tpath)
c.draw(renderer)
_bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class TextArea(OffsetBox):
"""
The TextArea is a container artist for a single Text instance.
The text is placed at (0, 0) with baseline+left alignment, by default. The
width and height of the TextArea instance is the width and height of its
child text.
"""
def __init__(self, s,
*,
textprops=None,
multilinebaseline=False,
):
"""
Parameters
----------
s : str
The text to be displayed.
textprops : dict, default: {}
Dictionary of keyword parameters to be passed to the `.Text`
instance in the TextArea.
multilinebaseline : bool, default: False
Whether the baseline for multiline text is adjusted so that it
is (approximately) center-aligned with single-line text.
"""
if textprops is None:
textprops = {}
self._text = mtext.Text(0, 0, s, **textprops)
super().__init__()
self._children = [self._text]
self.offset_transform = mtransforms.Affine2D()
self._baseline_transform = mtransforms.Affine2D()
self._text.set_transform(self.offset_transform +
self._baseline_transform)
self._multilinebaseline = multilinebaseline
def set_text(self, s):
"""Set the text of this area as a string."""
self._text.set_text(s)
self.stale = True
def get_text(self):
"""Return the string representation of this area's text."""
return self._text.get_text()
def set_multilinebaseline(self, t):
"""
Set multilinebaseline.
If True, the baseline for multiline text is adjusted so that it is
(approximately) center-aligned with single-line text. This is used
e.g. by the legend implementation so that single-line labels are
baseline-aligned, but multiline labels are "center"-aligned with them.
"""
self._multilinebaseline = t
self.stale = True
def get_multilinebaseline(self):
"""
Get multilinebaseline.
"""
return self._multilinebaseline
def set_transform(self, t):
"""
set_transform is ignored.
"""
def set_offset(self, xy):
"""
Set the offset of the container.
Parameters
----------
xy : (float, float)
The (x, y) coordinates of the offset in display units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
self.stale = True
def get_offset(self):
"""Return offset of the container."""
return self._offset
def get_bbox(self, renderer):
_, h_, d_ = renderer.get_text_width_height_descent(
"lp", self._text._fontproperties,
ismath="TeX" if self._text.get_usetex() else False)
bbox, info, yd = self._text._get_layout(renderer)
w, h = bbox.size
self._baseline_transform.clear()
if len(info) > 1 and self._multilinebaseline:
yd_new = 0.5 * h - 0.5 * (h_ - d_)
self._baseline_transform.translate(0, yd - yd_new)
yd = yd_new
else: # single line
h_d = max(h_ - d_, h - yd)
h = h_d + yd
ha = self._text.get_horizontalalignment()
x0 = {"left": 0, "center": -w / 2, "right": -w}[ha]
return Bbox.from_bounds(x0, -yd, w, h)
def draw(self, renderer):
# docstring inherited
self._text.draw(renderer)
_bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class AuxTransformBox(OffsetBox):
"""
Offset Box with the aux_transform. Its children will be
transformed with the aux_transform first then will be
offsetted. The absolute coordinate of the aux_transform is meaning
as it will be automatically adjust so that the left-lower corner
of the bounding box of children will be set to (0, 0) before the
offset transform.
It is similar to drawing area, except that the extent of the box
is not predetermined but calculated from the window extent of its
children. Furthermore, the extent of the children will be
calculated in the transformed coordinate.
"""
def __init__(self, aux_transform):
self.aux_transform = aux_transform
super().__init__()
self.offset_transform = mtransforms.Affine2D()
# ref_offset_transform makes offset_transform always relative to the
# lower-left corner of the bbox of its children.
self.ref_offset_transform = mtransforms.Affine2D()
def add_artist(self, a):
"""Add an `.Artist` to the container box."""
self._children.append(a)
a.set_transform(self.get_transform())
self.stale = True
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the children
"""
return (self.aux_transform
+ self.ref_offset_transform
+ self.offset_transform)
def set_transform(self, t):
"""
set_transform is ignored.
"""
def set_offset(self, xy):
"""
Set the offset of the container.
Parameters
----------
xy : (float, float)
The (x, y) coordinates of the offset in display units.
"""
self._offset = xy
self.offset_transform.clear()
self.offset_transform.translate(xy[0], xy[1])
self.stale = True
def get_offset(self):
"""Return offset of the container."""
return self._offset
def get_bbox(self, renderer):
# clear the offset transforms
_off = self.offset_transform.get_matrix() # to be restored later
self.ref_offset_transform.clear()
self.offset_transform.clear()
# calculate the extent
bboxes = [c.get_window_extent(renderer) for c in self._children]
ub = Bbox.union(bboxes)
# adjust ref_offset_transform
self.ref_offset_transform.translate(-ub.x0, -ub.y0)
# restore offset transform
self.offset_transform.set_matrix(_off)
return Bbox.from_bounds(0, 0, ub.width, ub.height)
def draw(self, renderer):
# docstring inherited
for c in self._children:
c.draw(renderer)
_bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class AnchoredOffsetbox(OffsetBox):
"""
An offset box placed according to location *loc*.
AnchoredOffsetbox has a single child. When multiple children are needed,
use an extra OffsetBox to enclose them. By default, the offset box is
anchored against its parent Axes. You may explicitly specify the
*bbox_to_anchor*.
"""
zorder = 5 # zorder of the legend
# Location codes
codes = {'upper right': 1,
'upper left': 2,
'lower left': 3,
'lower right': 4,
'right': 5,
'center left': 6,
'center right': 7,
'lower center': 8,
'upper center': 9,
'center': 10,
}
def __init__(self, loc, *,
pad=0.4, borderpad=0.5,
child=None, prop=None, frameon=True,
bbox_to_anchor=None,
bbox_transform=None,
**kwargs):
"""
Parameters
----------
loc : str
The box location. Valid locations are
'upper left', 'upper center', 'upper right',
'center left', 'center', 'center right',
'lower left', 'lower center', 'lower right'.
For backward compatibility, numeric values are accepted as well.
See the parameter *loc* of `.Legend` for details.
pad : float, default: 0.4
Padding around the child as fraction of the fontsize.
borderpad : float, default: 0.5
Padding between the offsetbox frame and the *bbox_to_anchor*.
child : `.OffsetBox`
The box that will be anchored.
prop : `.FontProperties`
This is only used as a reference for paddings. If not given,
:rc:`legend.fontsize` is used.
frameon : bool
Whether to draw a frame around the box.
bbox_to_anchor : `.BboxBase`, 2-tuple, or 4-tuple of floats
Box that is used to position the legend in conjunction with *loc*.
bbox_transform : None or :class:`matplotlib.transforms.Transform`
The transform for the bounding box (*bbox_to_anchor*).
**kwargs
All other parameters are passed on to `.OffsetBox`.
Notes
-----
See `.Legend` for a detailed description of the anchoring mechanism.
"""
super().__init__(**kwargs)
self.set_bbox_to_anchor(bbox_to_anchor, bbox_transform)
self.set_child(child)
if isinstance(loc, str):
loc = _api.check_getitem(self.codes, loc=loc)
self.loc = loc
self.borderpad = borderpad
self.pad = pad
if prop is None:
self.prop = FontProperties(size=mpl.rcParams["legend.fontsize"])
else:
self.prop = FontProperties._from_any(prop)
if isinstance(prop, dict) and "size" not in prop:
self.prop.set_size(mpl.rcParams["legend.fontsize"])
self.patch = FancyBboxPatch(
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True,
visible=frameon,
boxstyle="square,pad=0",
)
def set_child(self, child):
"""Set the child to be anchored."""
self._child = child
if child is not None:
child.axes = self.axes
self.stale = True
def get_child(self):
"""Return the child."""
return self._child
def get_children(self):
"""Return the list of children."""
return [self._child]
def get_bbox(self, renderer):
# docstring inherited
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
pad = self.pad * fontsize
return self.get_child().get_bbox(renderer).padded(pad)
def get_bbox_to_anchor(self):
"""Return the bbox that the box is anchored to."""
if self._bbox_to_anchor is None:
return self.axes.bbox
else:
transform = self._bbox_to_anchor_transform
if transform is None:
return self._bbox_to_anchor
else:
return TransformedBbox(self._bbox_to_anchor, transform)
def set_bbox_to_anchor(self, bbox, transform=None):
"""
Set the bbox that the box is anchored to.
*bbox* can be a Bbox instance, a list of [left, bottom, width,
height], or a list of [left, bottom] where the width and
height will be assumed to be zero. The bbox will be
transformed to display coordinate by the given transform.
"""
if bbox is None or isinstance(bbox, BboxBase):
self._bbox_to_anchor = bbox
else:
try:
l = len(bbox)
except TypeError as err:
raise ValueError(f"Invalid bbox: {bbox}") from err
if l == 2:
bbox = [bbox[0], bbox[1], 0, 0]
self._bbox_to_anchor = Bbox.from_bounds(*bbox)
self._bbox_to_anchor_transform = transform
self.stale = True
@_compat_get_offset
def get_offset(self, bbox, renderer):
# docstring inherited
pad = (self.borderpad
* renderer.points_to_pixels(self.prop.get_size_in_points()))
bbox_to_anchor = self.get_bbox_to_anchor()
x0, y0 = _get_anchored_bbox(
self.loc, Bbox.from_bounds(0, 0, bbox.width, bbox.height),
bbox_to_anchor, pad)
return x0 - bbox.x0, y0 - bbox.y0
def update_frame(self, bbox, fontsize=None):
self.patch.set_bounds(bbox.bounds)
if fontsize:
self.patch.set_mutation_scale(fontsize)
def draw(self, renderer):
# docstring inherited
if not self.get_visible():
return
# update the location and size of the legend
bbox = self.get_window_extent(renderer)
fontsize = renderer.points_to_pixels(self.prop.get_size_in_points())
self.update_frame(bbox, fontsize)
self.patch.draw(renderer)
px, py = self.get_offset(self.get_bbox(renderer), renderer)
self.get_child().set_offset((px, py))
self.get_child().draw(renderer)
self.stale = False
def _get_anchored_bbox(loc, bbox, parentbbox, borderpad):
"""
Return the (x, y) position of the *bbox* anchored at the *parentbbox* with
the *loc* code with the *borderpad*.
"""
# This is only called internally and *loc* should already have been
# validated. If 0 (None), we just let ``bbox.anchored`` raise.
c = [None, "NE", "NW", "SW", "SE", "E", "W", "E", "S", "N", "C"][loc]
container = parentbbox.padded(-borderpad)
return bbox.anchored(c, container=container).p0
class AnchoredText(AnchoredOffsetbox):
"""
AnchoredOffsetbox with Text.
"""
def __init__(self, s, loc, *, pad=0.4, borderpad=0.5, prop=None, **kwargs):
"""
Parameters
----------
s : str
Text.
loc : str
Location code. See `AnchoredOffsetbox`.
pad : float, default: 0.4
Padding around the text as fraction of the fontsize.
borderpad : float, default: 0.5
Spacing between the offsetbox frame and the *bbox_to_anchor*.
prop : dict, optional
Dictionary of keyword parameters to be passed to the
`~matplotlib.text.Text` instance contained inside AnchoredText.
**kwargs
All other parameters are passed to `AnchoredOffsetbox`.
"""
if prop is None:
prop = {}
badkwargs = {'va', 'verticalalignment'}
if badkwargs & set(prop):
raise ValueError(
'Mixing verticalalignment with AnchoredText is not supported.')
self.txt = TextArea(s, textprops=prop)
fp = self.txt._text.get_fontproperties()
super().__init__(
loc, pad=pad, borderpad=borderpad, child=self.txt, prop=fp,
**kwargs)
class OffsetImage(OffsetBox):
def __init__(self, arr, *,
zoom=1,
cmap=None,
norm=None,
interpolation=None,
origin=None,
filternorm=True,
filterrad=4.0,
resample=False,
dpi_cor=True,
**kwargs
):
super().__init__()
self._dpi_cor = dpi_cor
self.image = BboxImage(bbox=self.get_window_extent,
cmap=cmap,
norm=norm,
interpolation=interpolation,
origin=origin,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
**kwargs
)
self._children = [self.image]
self.set_zoom(zoom)
self.set_data(arr)
def set_data(self, arr):
self._data = np.asarray(arr)
self.image.set_data(self._data)
self.stale = True
def get_data(self):
return self._data
def set_zoom(self, zoom):
self._zoom = zoom
self.stale = True
def get_zoom(self):
return self._zoom
def get_offset(self):
"""Return offset of the container."""
return self._offset
def get_children(self):
return [self.image]
def get_bbox(self, renderer):
dpi_cor = renderer.points_to_pixels(1.) if self._dpi_cor else 1.
zoom = self.get_zoom()
data = self.get_data()
ny, nx = data.shape[:2]
w, h = dpi_cor * nx * zoom, dpi_cor * ny * zoom
return Bbox.from_bounds(0, 0, w, h)
def draw(self, renderer):
# docstring inherited
self.image.draw(renderer)
# bbox_artist(self, renderer, fill=False, props=dict(pad=0.))
self.stale = False
class AnnotationBbox(martist.Artist, mtext._AnnotationBase):
"""
Container for an `OffsetBox` referring to a specific position *xy*.
Optionally an arrow pointing from the offsetbox to *xy* can be drawn.
This is like `.Annotation`, but with `OffsetBox` instead of `.Text`.
"""
zorder = 3
def __str__(self):
return f"AnnotationBbox({self.xy[0]:g},{self.xy[1]:g})"
@_docstring.interpd
def __init__(self, offsetbox, xy, xybox=None, xycoords='data', boxcoords=None, *,
frameon=True, pad=0.4, # FancyBboxPatch boxstyle.
annotation_clip=None,
box_alignment=(0.5, 0.5),
bboxprops=None,
arrowprops=None,
fontsize=None,
**kwargs):
"""
Parameters
----------
offsetbox : `OffsetBox`
xy : (float, float)
The point *(x, y)* to annotate. The coordinate system is determined
by *xycoords*.
xybox : (float, float), default: *xy*
The position *(x, y)* to place the text at. The coordinate system
is determined by *boxcoords*.
xycoords : single or two-tuple of str or `.Artist` or `.Transform` or \
callable, default: 'data'
The coordinate system that *xy* is given in. See the parameter
*xycoords* in `.Annotation` for a detailed description.
boxcoords : single or two-tuple of str or `.Artist` or `.Transform` \
or callable, default: value of *xycoords*
The coordinate system that *xybox* is given in. See the parameter
*textcoords* in `.Annotation` for a detailed description.
frameon : bool, default: True
By default, the text is surrounded by a white `.FancyBboxPatch`
(accessible as the ``patch`` attribute of the `.AnnotationBbox`).
If *frameon* is set to False, this patch is made invisible.
annotation_clip: bool or None, default: None
Whether to clip (i.e. not draw) the annotation when the annotation
point *xy* is outside the Axes area.
- If *True*, the annotation will be clipped when *xy* is outside
the Axes.
- If *False*, the annotation will always be drawn.
- If *None*, the annotation will be clipped when *xy* is outside
the Axes and *xycoords* is 'data'.
pad : float, default: 0.4
Padding around the offsetbox.
box_alignment : (float, float)
A tuple of two floats for a vertical and horizontal alignment of
the offset box w.r.t. the *boxcoords*.
The lower-left corner is (0, 0) and upper-right corner is (1, 1).
bboxprops : dict, optional
A dictionary of properties to set for the annotation bounding box,
for example *boxstyle* and *alpha*. See `.FancyBboxPatch` for
details.
arrowprops: dict, optional
Arrow properties, see `.Annotation` for description.
fontsize: float or str, optional
Translated to points and passed as *mutation_scale* into
`.FancyBboxPatch` to scale attributes of the box style (e.g. pad
or rounding_size). The name is chosen in analogy to `.Text` where
*fontsize* defines the mutation scale as well. If not given,
:rc:`legend.fontsize` is used. See `.Text.set_fontsize` for valid
values.
**kwargs
Other `AnnotationBbox` properties. See `.AnnotationBbox.set` for
a list.
"""
martist.Artist.__init__(self)
mtext._AnnotationBase.__init__(
self, xy, xycoords=xycoords, annotation_clip=annotation_clip)
self.offsetbox = offsetbox
self.arrowprops = arrowprops.copy() if arrowprops is not None else None
self.set_fontsize(fontsize)
self.xybox = xybox if xybox is not None else xy
self.boxcoords = boxcoords if boxcoords is not None else xycoords
self._box_alignment = box_alignment
if arrowprops is not None:
self._arrow_relpos = self.arrowprops.pop("relpos", (0.5, 0.5))
self.arrow_patch = FancyArrowPatch((0, 0), (1, 1),
**self.arrowprops)
else:
self._arrow_relpos = None
self.arrow_patch = None
self.patch = FancyBboxPatch( # frame
xy=(0.0, 0.0), width=1., height=1.,
facecolor='w', edgecolor='k',
mutation_scale=self.prop.get_size_in_points(),
snap=True,
visible=frameon,
)
self.patch.set_boxstyle("square", pad=pad)
if bboxprops:
self.patch.set(**bboxprops)
self._internal_update(kwargs)
@property
def xyann(self):
return self.xybox
@xyann.setter
def xyann(self, xyann):
self.xybox = xyann
self.stale = True
@property
def anncoords(self):
return self.boxcoords
@anncoords.setter
def anncoords(self, coords):
self.boxcoords = coords
self.stale = True
def contains(self, mouseevent):
if self._different_canvas(mouseevent):
return False, {}
if not self._check_xy(None):
return False, {}
return self.offsetbox.contains(mouseevent)
# self.arrow_patch is currently not checked as this can be a line - JJ
def get_children(self):
children = [self.offsetbox, self.patch]
if self.arrow_patch:
children.append(self.arrow_patch)
return children
def set_figure(self, fig):
if self.arrow_patch is not None:
self.arrow_patch.set_figure(fig)
self.offsetbox.set_figure(fig)
martist.Artist.set_figure(self, fig)
def set_fontsize(self, s=None):
"""
Set the fontsize in points.
If *s* is not given, reset to :rc:`legend.fontsize`.
"""
if s is None:
s = mpl.rcParams["legend.fontsize"]
self.prop = FontProperties(size=s)
self.stale = True
def get_fontsize(self):
"""Return the fontsize in points."""
return self.prop.get_size_in_points()
def get_window_extent(self, renderer=None):
# docstring inherited
if renderer is None:
renderer = self.get_figure(root=True)._get_renderer()
self.update_positions(renderer)
return Bbox.union([child.get_window_extent(renderer)
for child in self.get_children()])
def get_tightbbox(self, renderer=None):
# docstring inherited
if renderer is None:
renderer = self.get_figure(root=True)._get_renderer()
self.update_positions(renderer)
return Bbox.union([child.get_tightbbox(renderer)
for child in self.get_children()])
def update_positions(self, renderer):
"""Update pixel positions for the annotated point, the text, and the arrow."""
ox0, oy0 = self._get_xy(renderer, self.xybox, self.boxcoords)
bbox = self.offsetbox.get_bbox(renderer)
fw, fh = self._box_alignment
self.offsetbox.set_offset(
(ox0 - fw*bbox.width - bbox.x0, oy0 - fh*bbox.height - bbox.y0))
bbox = self.offsetbox.get_window_extent(renderer)
self.patch.set_bounds(bbox.bounds)
mutation_scale = renderer.points_to_pixels(self.get_fontsize())
self.patch.set_mutation_scale(mutation_scale)
if self.arrowprops:
# Use FancyArrowPatch if self.arrowprops has "arrowstyle" key.
# Adjust the starting point of the arrow relative to the textbox.
# TODO: Rotation needs to be accounted.
arrow_begin = bbox.p0 + bbox.size * self._arrow_relpos
arrow_end = self._get_position_xy(renderer)
# The arrow (from arrow_begin to arrow_end) will be first clipped
# by patchA and patchB, then shrunk by shrinkA and shrinkB (in
# points). If patch A is not set, self.bbox_patch is used.
self.arrow_patch.set_positions(arrow_begin, arrow_end)
if "mutation_scale" in self.arrowprops:
mutation_scale = renderer.points_to_pixels(
self.arrowprops["mutation_scale"])
# Else, use fontsize-based mutation_scale defined above.
self.arrow_patch.set_mutation_scale(mutation_scale)
patchA = self.arrowprops.get("patchA", self.patch)
self.arrow_patch.set_patchA(patchA)
def draw(self, renderer):
# docstring inherited
if not self.get_visible() or not self._check_xy(renderer):
return
renderer.open_group(self.__class__.__name__, gid=self.get_gid())
self.update_positions(renderer)
if self.arrow_patch is not None:
if (self.arrow_patch.get_figure(root=False) is None and
(fig := self.get_figure(root=False)) is not None):
self.arrow_patch.set_figure(fig)
self.arrow_patch.draw(renderer)
self.patch.draw(renderer)
self.offsetbox.draw(renderer)
renderer.close_group(self.__class__.__name__)
self.stale = False
class DraggableBase:
"""
Helper base class for a draggable artist (legend, offsetbox).
Derived classes must override the following methods::
def save_offset(self):
'''
Called when the object is picked for dragging; should save the
reference position of the artist.
'''
def update_offset(self, dx, dy):
'''
Called during the dragging; (*dx*, *dy*) is the pixel offset from
the point where the mouse drag started.
'''
Optionally, you may override the following method::
def finalize_offset(self):
'''Called when the mouse is released.'''
In the current implementation of `.DraggableLegend` and
`DraggableAnnotation`, `update_offset` places the artists in display
coordinates, and `finalize_offset` recalculates their position in axes
coordinate and set a relevant attribute.
"""
def __init__(self, ref_artist, use_blit=False):
self.ref_artist = ref_artist
if not ref_artist.pickable():
ref_artist.set_picker(True)
self.got_artist = False
self._use_blit = use_blit and self.canvas.supports_blit
callbacks = self.canvas.callbacks
self._disconnectors = [
functools.partial(
callbacks.disconnect, callbacks._connect_picklable(name, func))
for name, func in [
("pick_event", self.on_pick),
("button_release_event", self.on_release),
("motion_notify_event", self.on_motion),
]
]
# A property, not an attribute, to maintain picklability.
canvas = property(lambda self: self.ref_artist.get_figure(root=True).canvas)
cids = property(lambda self: [
disconnect.args[0] for disconnect in self._disconnectors[:2]])
def on_motion(self, evt):
if self._check_still_parented() and self.got_artist:
dx = evt.x - self.mouse_x
dy = evt.y - self.mouse_y
self.update_offset(dx, dy)
if self._use_blit:
self.canvas.restore_region(self.background)
self.ref_artist.draw(
self.ref_artist.get_figure(root=True)._get_renderer())
self.canvas.blit()
else:
self.canvas.draw()
def on_pick(self, evt):
if self._check_still_parented():
if evt.artist == self.ref_artist:
self.mouse_x = evt.mouseevent.x
self.mouse_y = evt.mouseevent.y
self.save_offset()
self.got_artist = True
if self.got_artist and self._use_blit:
self.ref_artist.set_animated(True)
self.canvas.draw()
fig = self.ref_artist.get_figure(root=False)
self.background = self.canvas.copy_from_bbox(fig.bbox)
self.ref_artist.draw(fig._get_renderer())
self.canvas.blit()
def on_release(self, event):
if self._check_still_parented() and self.got_artist:
self.finalize_offset()
self.got_artist = False
if self._use_blit:
self.canvas.restore_region(self.background)
self.ref_artist.draw(self.ref_artist.figure._get_renderer())
self.canvas.blit()
self.ref_artist.set_animated(False)
def _check_still_parented(self):
if self.ref_artist.get_figure(root=False) is None:
self.disconnect()
return False
else:
return True
def disconnect(self):
"""Disconnect the callbacks."""
for disconnector in self._disconnectors:
disconnector()
def save_offset(self):
pass
def update_offset(self, dx, dy):
pass
def finalize_offset(self):
pass
class DraggableOffsetBox(DraggableBase):
def __init__(self, ref_artist, offsetbox, use_blit=False):
super().__init__(ref_artist, use_blit=use_blit)
self.offsetbox = offsetbox
def save_offset(self):
offsetbox = self.offsetbox
renderer = offsetbox.get_figure(root=True)._get_renderer()
offset = offsetbox.get_offset(offsetbox.get_bbox(renderer), renderer)
self.offsetbox_x, self.offsetbox_y = offset
self.offsetbox.set_offset(offset)
def update_offset(self, dx, dy):
loc_in_canvas = self.offsetbox_x + dx, self.offsetbox_y + dy
self.offsetbox.set_offset(loc_in_canvas)
def get_loc_in_canvas(self):
offsetbox = self.offsetbox
renderer = offsetbox.get_figure(root=True)._get_renderer()
bbox = offsetbox.get_bbox(renderer)
ox, oy = offsetbox._offset
loc_in_canvas = (ox + bbox.x0, oy + bbox.y0)
return loc_in_canvas
class DraggableAnnotation(DraggableBase):
def __init__(self, annotation, use_blit=False):
super().__init__(annotation, use_blit=use_blit)
self.annotation = annotation
def save_offset(self):
ann = self.annotation
self.ox, self.oy = ann.get_transform().transform(ann.xyann)
def update_offset(self, dx, dy):
ann = self.annotation
ann.xyann = ann.get_transform().inverted().transform(
(self.ox + dx, self.oy + dy))
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@lib@matplotlib@offsetbox.py@.PATH_END.py
|
{
"filename": "_z.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/isosurface/slices/_z.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ZValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="z", parent_name="isosurface.slices", **kwargs):
super(ZValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Z"),
data_docs=kwargs.pop(
"data_docs",
"""
fill
Sets the fill ratio of the `slices`. The
default fill value of the `slices` is 1 meaning
that they are entirely shaded. On the other
hand Applying a `fill` ratio less than one
would allow the creation of openings parallel
to the edges.
locations
Specifies the location(s) of slices on the
axis. When not specified slices would be
created for all points of the axis z except
start and end.
locationssrc
Sets the source reference on Chart Studio Cloud
for `locations`.
show
Determines whether or not slice planes about
the z dimension are drawn.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@isosurface@slices@_z.py@.PATH_END.py
|
{
"filename": "ext_signature.py",
"repo_name": "quatrope/feets",
"repo_path": "feets_extracted/feets-master/feets/extractors/ext_signature.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2018 Bruno Sanchez
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# =============================================================================
# DOC
# =============================================================================
""""""
# =============================================================================
# IMPORTS
# =============================================================================
import numpy as np
import seaborn as sns
from .core import Extractor
# =============================================================================
# EXTRACTOR CLASS
# =============================================================================
class Signature(Extractor):
data = ["magnitude", "time"]
dependencies = ["PeriodLS", "Amplitude"]
params = {"phase_bins": 18, "mag_bins": 12}
features = ["SignaturePhMag"]
def plot_feature(
self, feature, value, ax, plot_kws, phase_bins, mag_bins, **kwargs
):
ax.set_title(f"SignaturePhMag - {phase_bins}x{mag_bins}")
ax.set_xlabel("Phase")
ax.set_ylabel("Magnitude")
sns.heatmap(value, ax=ax, **plot_kws)
def fit(self, magnitude, time, PeriodLS, Amplitude, phase_bins, mag_bins):
first_period = PeriodLS[0]
lc_yaxis = (magnitude - np.min(magnitude)) / np.float(Amplitude)
# SHIFT TO BEGIN AT MINIMUM
loc = np.argmin(lc_yaxis)
lc_phase = np.remainder(time - time[loc], first_period) / first_period
bins = (phase_bins, mag_bins)
signature = np.histogram2d(lc_phase, lc_yaxis, bins=bins, normed=True)[
0
]
return {"SignaturePhMag": signature}
|
quatropeREPO_NAMEfeetsPATH_START.@feets_extracted@feets-master@feets@extractors@ext_signature.py@.PATH_END.py
|
{
"filename": "test_spectral_utils.py",
"repo_name": "gwpy/gwpy",
"repo_path": "gwpy_extracted/gwpy-main/gwpy/signal/tests/test_spectral_utils.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Copyright (C) Duncan Macleod (2013-2020)
#
# This file is part of GWpy.
#
# GWpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GWpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GWpy. If not, see <http://www.gnu.org/licenses/>.
"""Unit test for signal module
"""
import pytest
from astropy import units
from ..spectral import _utils as fft_utils
def test_scale_timeseries_unit():
"""Test :func:`gwpy.signal.spectral.utils.scale_timeseries_units`
"""
scale_ = fft_utils.scale_timeseries_unit
u = units.Unit('m')
# check default
assert scale_(u) == units.Unit('m^2/Hz')
# check scaling='density'
assert scale_(u, scaling='density') == units.Unit('m^2/Hz')
# check scaling='spectrum'
assert scale_(u, scaling='spectrum') == units.Unit('m^2')
# check anything else raises an exception
with pytest.raises(ValueError):
scale_(u, scaling='other')
# check null unit
assert scale_(None) == units.Unit('Hz^-1')
|
gwpyREPO_NAMEgwpyPATH_START.@gwpy_extracted@gwpy-main@gwpy@signal@tests@test_spectral_utils.py@.PATH_END.py
|
{
"filename": "test_icrs_observed_transformations.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/coordinates/tests/test_icrs_observed_transformations.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Accuracy tests for ICRS transformations, primarily to/from AltAz."""
import numpy as np
from astropy import units as u
from astropy.coordinates import (
CIRS,
ICRS,
AltAz,
EarthLocation,
HADec,
SkyCoord,
frame_transform_graph,
golden_spiral_grid,
)
from astropy.tests.helper import assert_quantity_allclose as assert_allclose
from astropy.time import Time
def test_icrs_altaz_consistency():
"""
Check ICRS<->AltAz for consistency with ICRS<->CIRS<->AltAz
The latter is extensively tested in test_intermediate_transformations.py
"""
usph = golden_spiral_grid(200)
dist = np.linspace(0.5, 1, len(usph)) * u.km * 1e5
icoo = SkyCoord(ra=usph.lon, dec=usph.lat, distance=dist)
observer = EarthLocation(28 * u.deg, 23 * u.deg, height=2000.0 * u.km)
obstime = Time("J2010")
aa_frame = AltAz(obstime=obstime, location=observer)
# check we are going direct!
trans = frame_transform_graph.get_transform(ICRS, AltAz).transforms
assert len(trans) == 1
# check that ICRS-AltAz and ICRS->CIRS->AltAz are consistent
aa1 = icoo.transform_to(aa_frame)
aa2 = icoo.transform_to(CIRS()).transform_to(aa_frame)
assert_allclose(aa1.separation_3d(aa2), 0 * u.mm, atol=1 * u.mm)
# check roundtrip
roundtrip = icoo.transform_to(aa_frame).transform_to(icoo)
assert_allclose(roundtrip.separation_3d(icoo), 0 * u.mm, atol=1 * u.mm)
# check there and back via CIRS mish-mash
roundtrip = icoo.transform_to(aa_frame).transform_to(CIRS()).transform_to(icoo)
assert_allclose(roundtrip.separation_3d(icoo), 0 * u.mm, atol=1 * u.mm)
def test_icrs_hadec_consistency():
"""
Check ICRS<->HADec for consistency with ICRS<->CIRS<->HADec
"""
usph = golden_spiral_grid(200)
dist = np.linspace(0.5, 1, len(usph)) * u.km * 1e5
icoo = SkyCoord(ra=usph.lon, dec=usph.lat, distance=dist)
observer = EarthLocation(28 * u.deg, 23 * u.deg, height=2000.0 * u.km)
obstime = Time("J2010")
hd_frame = HADec(obstime=obstime, location=observer)
# check we are going direct!
trans = frame_transform_graph.get_transform(ICRS, HADec).transforms
assert len(trans) == 1
# check that ICRS-HADec and ICRS->CIRS->HADec are consistent
aa1 = icoo.transform_to(hd_frame)
aa2 = icoo.transform_to(CIRS()).transform_to(hd_frame)
assert_allclose(aa1.separation_3d(aa2), 0 * u.mm, atol=1 * u.mm)
# check roundtrip
roundtrip = icoo.transform_to(hd_frame).transform_to(icoo)
assert_allclose(roundtrip.separation_3d(icoo), 0 * u.mm, atol=1 * u.mm)
# check there and back via CIRS mish-mash
roundtrip = icoo.transform_to(hd_frame).transform_to(CIRS()).transform_to(icoo)
assert_allclose(roundtrip.separation_3d(icoo), 0 * u.mm, atol=1 * u.mm)
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@coordinates@tests@test_icrs_observed_transformations.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.