repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
gwsky
|
gwsky-master/gwsky/para_sampler/__init__.py
|
from .para_sampler import ParameterSampler
from .prior_dict import PriorDictSampler
from .mass import O3aMassSampler
from .sky import uniform_sky_sampler, SHSkySampler, DipoleSkySampler
| 185
| 45.5
| 68
|
py
|
gwsky
|
gwsky-master/gwsky/para_sampler/sky.py
|
import numpy as np
from scipy.special import lpmv, eval_legendre
from healpy.rotator import rotateDirection, dir2vec
from bilby.core.prior import Prior, Uniform, Sine, Cosine, Interped, PriorDict
from .para_sampler import ParameterSampler
from .prior_dict import PriorDictSampler
from ..utils import sh_normal_coeff, rotation_matrix_from_vec
from typing import Tuple
from ..typing import SHModes, ParameterVector, ParaNameList
def uniform_sky_sampler(ra_dec: bool = True) -> PriorDictSampler:
if ra_dec:
priors = {
'ra': Uniform(name='ra', minimum=0, maximum=2*np.pi, boundary='periodic'),
'dec': Cosine(name='dec')
}
else:
priors = {
'phi': Uniform(name='phi', minimum=0, maximum=2*np.pi, boundary='periodic'),
'theta': Sine(name='theta')
}
priors = PriorDict(priors)
return PriorDictSampler(priors)
class SHPhiPrior(Prior):
def __init__(self, alms: SHModes) -> None:
self.alms: SHModes = {}
for (l, m), alm in alms.items():
if m >= 0:
self.alms[(l, m)] = alm
else:
al_m = -alm.conjugate() # expect value of a_{l,-m}
if alms.get((l, -m), al_m) != al_m:
raise ValueError('Wrong alm for real field')
else:
self.alms[(l, -m)] = al_m
# self.alms only contain m>0 mode
# (for real field, corresponding -m mode is the minus conjugate)
self._cos_theta = None
self.cosm_coeff = {}
self.sinm_coeff = {}
super().__init__(minimum=0, maximum=2*np.pi)
@property
def cos_theta(self):
return self._cos_theta
@cos_theta.setter
def cos_theta(self, value):
self._cos_theta = value
cosm_coeff = {}
sinm_coeff = {}
for (l, m), alm in self.alms.items():
theta_item = sh_normal_coeff(l, m) * lpmv(m, l, self._cos_theta)
# 2 for both m and -m mode
cosm_coeff[m] = cosm_coeff.get(m, 0) + 2*alm.real*theta_item
sinm_coeff[m] = sinm_coeff.get(m, 0) - 2*alm.imag*theta_item
normalization = cosm_coeff[0] * 2*np.pi
self.cosm_coeff = {
m: value/normalization for m, value in cosm_coeff.items()}
self.sinm_coeff = {
m: value/normalization for m, value in sinm_coeff.items()}
def prob(self, val):
cos = sum([coefficient*np.cos(m*val)
for m, coefficient in self.cosm_coeff.items()])
sin = sum([coefficient*np.sin(m*val)
for m, coefficient in self.sinm_coeff.items()])
return cos+sin
def rescale(self, val):
# an upper bound of max value of PDF
max_bound = sum(map(np.abs, self.cosm_coeff.values())) + \
sum(map(np.abs, self.sinm_coeff.values()))
while True:
phi = np.random.uniform(0, 2*np.pi)
if val <= self.prob(phi) / max_bound:
return phi
val = np.random.random()
class SHSkySampler(ParameterSampler):
def __init__(self, alms: SHModes, ra_dec: bool = True):
UNIFORM_A00 = 2 * np.pi**0.5
a00 = alms.get((0, 0), UNIFORM_A00)
self.alms = {lm: value/a00 * UNIFORM_A00
for lm, value in alms.items()} # normalize
self.alms[(0, 0)] = UNIFORM_A00
self.cos_theta_prior = self.get_cos_theta_prior(self.alms)
self.phi_prior, self.conditional_phi = self.get_phi_prior(self.alms)
self.ra_dec = ra_dec
def _check_positive_prior(self, prior):
if np.any(prior < 0):
raise ValueError(
'Wrong sperical harmonic coefficients value, '
'summed prior function should be larger than zero')
def get_cos_theta_prior(self, alms: SHModes, interp_len: int = 1000) -> Prior:
cos_theta = np.linspace(-1, 1, interp_len)
prior = np.zeros(cos_theta.shape)
for (l, m), alm in alms.items():
# distribution of theta is Y(theta,phi) marginalized by phi
if m == 0:
prior += alm*sh_normal_coeff(l, m)*eval_legendre(l, cos_theta)
self._check_positive_prior(prior)
return Interped(xx=cos_theta, yy=prior, minimum=-1, maximum=1)
def get_phi_prior(self, alms: SHModes) -> Tuple[Prior, bool]:
if all(map(lambda lm: lm[1]==0, alms.keys())):
# alms does not contain m!=0 item, phi is uniformly distributed.
# faster to compute
return Uniform(
name='phi', minimum=0, maximum=2*np.pi, boundary='periodic'), False
else:
return SHPhiPrior(alms), True
@property
def keys(self) -> ParaNameList:
if self.ra_dec:
return ['ra', 'dec']
else:
return ['theta', 'phi']
def sample_theta_phi(self) -> Tuple[float, float]:
cos_theta = self.cos_theta_prior.sample()
if self.conditional_phi:
self.phi_prior.cos_theta = cos_theta
phi = self.phi_prior.sample()
return np.arccos(cos_theta), phi
def theta_phi_to_para(self, theta, phi) -> ParameterVector:
if self.ra_dec:
return [phi, np.pi/2-theta]
else:
return [theta, phi]
def sample(self) -> ParameterVector:
theta, phi = self.sample_theta_phi()
return self.theta_phi_to_para(theta, phi)
class DipoleSkySampler(SHSkySampler):
def __init__(self, amplitude: float, dipole_theta: float, dipole_phi: float, ra_dec: bool = True) -> None:
self.amplitude = amplitude
self.dipole_theta = dipole_theta
self.dipole_phi = dipole_phi
if amplitude < 0 or amplitude > 1:
raise ValueError('Dipole amplitude should be >=0 and <=1.')
self.rot_mat = rotation_matrix_from_vec(
orig_vec=np.array([0, 0, 1]),
dest_vec=dir2vec(self.dipole_theta, self.dipole_phi))
alms = {(1, 0): amplitude/sh_normal_coeff(1, 0)}
super().__init__(alms=alms, ra_dec=ra_dec)
def sample_theta_phi(self) -> Tuple[float, float]:
theta, phi = super().sample_theta_phi()
r_theta, r_phi = rotateDirection(self.rot_mat, theta, phi)
return r_theta, r_phi
| 6,291
| 34.954286
| 110
|
py
|
wgenpatex
|
wgenpatex-main/model.py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Generator's convolutional blocks 2D
class Conv_block2D(nn.Module):
def __init__(self, n_ch_in, n_ch_out, m=0.1):
super(Conv_block2D, self).__init__()
self.conv1 = nn.Conv2d(n_ch_in, n_ch_out, 3, padding=0, bias=True)
self.bn1 = nn.BatchNorm2d(n_ch_out, momentum=m)
self.conv2 = nn.Conv2d(n_ch_out, n_ch_out, 3, padding=0, bias=True)
self.bn2 = nn.BatchNorm2d(n_ch_out, momentum=m)
self.conv3 = nn.Conv2d(n_ch_out, n_ch_out, 1, padding=0, bias=True)
self.bn3 = nn.BatchNorm2d(n_ch_out, momentum=m)
def forward(self, x):
x = F.leaky_relu(self.bn1(self.conv1(x)))
x = F.leaky_relu(self.bn2(self.conv2(x)))
x = F.leaky_relu(self.bn3(self.conv3(x)))
return x
# Up-sampling block
class Upsample(nn.Module):
""" nn.Upsample is deprecated """
def __init__(self, scale_factor, mode="nearest"):
super(Upsample, self).__init__()
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
x = F.interpolate(x, scale_factor=self.scale_factor, mode=self.mode)
return x
# Up-sampling + batch normalization block
class Up_Bn2D(nn.Module):
def __init__(self, n_ch):
super(Up_Bn2D, self).__init__()
self.up = Upsample(scale_factor=2, mode='nearest')
self.bn = nn.BatchNorm2d(n_ch)
def forward(self, x):
x = self.bn(self.up(x))
return x
# The whole network
class generator(nn.Module):
def __init__(self, nlayers=5, ch_in=3, ch_step=2, device=DEVICE):
super(generator, self).__init__()
self.ch_in = ch_in
self.nlayers = nlayers
self.first_conv = Conv_block2D(ch_in,ch_step).to(device)
self.cb1 = nn.ModuleList()
self.cb2 = nn.ModuleList()
self.up = nn.ModuleList()
for n in range(0, nlayers):
self.up.append(Up_Bn2D((n+1)*ch_step).to(device))
self.cb1.append(Conv_block2D(ch_in,ch_step).to(device))
self.cb2.append(Conv_block2D((n+2)*ch_step,(n+2)*ch_step).to(device))
self.last_conv = nn.Conv2d((nlayers+1)*ch_step, 3, 1, padding=0, bias=False).to(device)
def forward(self, z):
nlayers=self.nlayers
y = self.first_conv(z[0])
for n in range(0,nlayers):
y = self.up[n](y)
y = torch.cat((y, self.cb1[n](z[n+1])), 1)
y = self.cb2[n](y)
y = self.last_conv(y)
return y
# Function to generate an output sample
def sample_fake_img(G, size, n_samples=1):
# dimension of the first input noise
strow = int(np.ceil(size[2])/2**G.nlayers)
stcol = int(np.ceil(size[3])/2**G.nlayers)
# input noise and forward pass
ztab = [torch.rand(n_samples, G.ch_in, 8+2**k*strow+4*int(k!=0), 8+2**k*stcol+4*int(k!=0), device=DEVICE, dtype=torch.float) for k in range(0, G.nlayers+1)]
Z = [Variable(z) for z in ztab]
return G(Z)
| 3,143
| 33.173913
| 160
|
py
|
wgenpatex
|
wgenpatex-main/run_optim_synthesis.py
|
import argparse
import wgenpatex
parser = argparse.ArgumentParser()
parser.add_argument('target_image_path', help='paths of target texture image')
parser.add_argument('-s', '--size', default=None, help="size of synthetized texture [nrow, ncol] (default: target texture size)")
parser.add_argument('-w', '--patch_size', type=int,default=4, help="patch size (default: 4)")
parser.add_argument('-nmax', '--n_iter_max', type=int, default=500, help="max iterations of the algorithm(default: 500)")
parser.add_argument('-npsi', '--n_iter_psi', type=int, default=10, help="max iterations for psi (default: 10)")
parser.add_argument('-nin', '--n_patches_in', type=int, default=-1, help="number of patches of the synthetized texture used at each iteration, -1 corresponds to all patches (default: -1)")
parser.add_argument('-nout', '--n_patches_out', type=int, default=2000, help="number maximum of patches of the target texture used, -1 corresponds to all patches (default: 2000)")
parser.add_argument('-sc', '--scales', type=int, default=4, help="number of scales used (default: 4)")
parser.add_argument('--visu', action='store_true', help='show intermediate results')
parser.add_argument('--save', action='store_true', help='save temp results in /tmp folder')
parser.add_argument('--keops', action='store_true', help='use keops package')
args = parser.parse_args()
synth_img = wgenpatex.optim_synthesis(args)
# plot and save the synthesized texture
wgenpatex.imshow(synth_img)
wgenpatex.imsave('synthesized.png', synth_img)
| 1,523
| 68.272727
| 188
|
py
|
wgenpatex
|
wgenpatex-main/wgenpatex.py
|
import torch
from torch import nn
from torch.autograd.variable import Variable
import matplotlib.pyplot as plt
import numpy as np
import math
import time
import model
from os import mkdir
from os.path import isdir
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(DEVICE)
def imread(img_name):
"""
loads an image as torch.tensor on the selected device
"""
np_img = plt.imread(img_name)
tens_img = torch.tensor(np_img, dtype=torch.float, device=DEVICE)
if torch.max(tens_img) > 1:
tens_img/=255
if len(tens_img.shape) < 3:
tens_img = tens_img.unsqueeze(2)
if tens_img.shape[2] > 3:
tens_img = tens_img[:,:,:3]
tens_img = tens_img.permute(2,0,1)
return tens_img.unsqueeze(0)
def imshow(tens_img):
"""
shows a tensor image
"""
np_img = np.clip(tens_img.squeeze(0).permute(1,2,0).data.cpu().numpy(), 0,1)
if np_img.shape[2] < 3:
np_img = np_img[:,:,0]
ax = plt.imshow(np_img)
ax.set_cmap('gray')
else:
ax = plt.imshow(np_img)
plt.axis('off')
return plt.show()
def imsave(save_name, tens_img):
"""
save a tensor image
"""
np_img = np.clip(tens_img.squeeze(0).permute(1,2,0).data.cpu().numpy(), 0,1)
if np_img.shape[2] < 3:
np_img = np_img[:,:,0]
plt.imsave(save_name, np_img)
return
class gaussian_downsample(nn.Module):
"""
Downsampling module with Gaussian filtering
"""
def __init__(self, kernel_size, sigma, stride, pad=False):
super(gaussian_downsample, self).__init__()
self.gauss = nn.Conv2d(3, 3, kernel_size, stride=stride, groups=3, bias=False)
gaussian_weights = self.init_weights(kernel_size, sigma)
self.gauss.weight.data = gaussian_weights.to(DEVICE)
self.gauss.weight.requires_grad_(False)
self.pad = pad
self.padsize = kernel_size-1
def forward(self, x):
if self.pad:
x = torch.cat((x, x[:,:,:self.padsize,:]), 2)
x = torch.cat((x, x[:,:,:,:self.padsize]), 3)
return self.gauss(x)
def init_weights(self, kernel_size, sigma):
x_cord = torch.arange(kernel_size)
x_grid = x_cord.repeat(kernel_size).view(kernel_size, kernel_size)
y_grid = x_grid.t()
xy_grid = torch.stack([x_grid, y_grid], dim=-1)
mean = (kernel_size - 1)/2.
variance = sigma**2.
gaussian_kernel = (1./(2.*math.pi*variance))*torch.exp(-torch.sum((xy_grid - mean)**2., dim=-1)/(2*variance))
gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)
return gaussian_kernel.view(1, 1, kernel_size, kernel_size).repeat(3, 1, 1, 1)
class semidual(nn.Module):
"""
Computes the semi-dual loss between inputy and inputx for the dual variable psi
"""
def __init__(self, inputy, device=DEVICE, usekeops=False):
super(semidual, self).__init__()
self.psi = nn.Parameter(torch.zeros(inputy.shape[0], device=device))
self.yt = inputy.transpose(1,0)
self.usekeops = usekeops
self.y2 = torch.sum(self.yt **2,0,keepdim=True)
def forward(self, inputx):
if self.usekeops:
from pykeops.torch import LazyTensor
y = self.yt.transpose(1,0)
x_i = LazyTensor(inputx.unsqueeze(1).contiguous())
y_j = LazyTensor(y.unsqueeze(0).contiguous())
v_j = LazyTensor(self.psi.unsqueeze(0).unsqueeze(2).contiguous())
sx2_i = LazyTensor(torch.sum(inputx**2,1,keepdim=True).unsqueeze(2).contiguous())
sy2_j = LazyTensor(self.y2.unsqueeze(2).contiguous())
rmv = sx2_i + sy2_j - 2*(x_i*y_j).sum(-1) - v_j
amin = rmv.argmin(dim=1).view(-1)
loss = torch.mean(torch.sum((inputx-y[amin,:])**2,1)-self.psi[amin]) + torch.mean(self.psi)
else:
cxy = torch.sum(inputx**2,1,keepdim=True) + self.y2 - 2*torch.matmul(inputx,self.yt)
loss = torch.mean(torch.min(cxy - self.psi.unsqueeze(0),1)[0]) + torch.mean(self.psi)
return loss
class gaussian_layer(nn.Module):
"""
Gaussian layer for the dowsampling pyramid
"""
def __init__(self, gaussian_kernel_size, gaussian_std, stride = 2, pad=False):
super(gaussian_layer, self).__init__()
self.downsample = gaussian_downsample(gaussian_kernel_size, gaussian_std, stride, pad=pad)
def forward(self, input):
self.down_img = self.downsample(input)
return self.down_img
class identity(nn.Module):
"""
Identity layer for the dowsampling pyramid
"""
def __init__(self):
super(identity, self).__init__()
def forward(self, input):
self.down_img = input
return input
def create_gaussian_pyramid(gaussian_kernel_size, gaussian_std, n_scales, stride = 2, pad=False):
"""
Create a dowsampling Gaussian pyramid
"""
layer = identity()
gaussian_pyramid = nn.Sequential(layer)
for i in range(n_scales-1):
layer = gaussian_layer(gaussian_kernel_size, gaussian_std, stride, pad=pad)
gaussian_pyramid.add_module("Gaussian_downsampling_{}".format(i+1), layer)
return gaussian_pyramid
class patch_extractor(nn.Module):
"""
Module for creating custom patch extractor
"""
def __init__(self, patch_size, pad=False):
super(patch_extractor, self).__init__()
self.im2pat = nn.Unfold(kernel_size=patch_size)
self.pad = pad
self.padsize = patch_size-1
def forward(self, input, batch_size=0):
if self.pad:
input = torch.cat((input, input[:,:,:self.padsize,:]), 2)
input = torch.cat((input, input[:,:,:,:self.padsize]), 3)
patches = self.im2pat(input).squeeze(0).transpose(1,0)
if batch_size > 0:
idx = torch.randperm(patches.size(0))[:batch_size]
patches = patches[idx,:]
return patches
def optim_synthesis(args):
"""
Perform the texture synthesis of an examplar image
"""
target_img_name = args.target_image_path
patch_size = args.patch_size
n_iter_max = args.n_iter_max
n_iter_psi = args.n_iter_psi
n_patches_in = args.n_patches_in
n_patches_out = args.n_patches_out
n_scales = args.scales
usekeops = args.keops
visu = args.visu
save = args.save
# fixed parameters
monitoring_step=50
saving_folder='tmp/'
# parameters for Gaussian downsampling
gaussian_kernel_size = 4
gaussian_std = 1
stride = 2
# load image
target_img = imread(target_img_name)
# synthesized size
if args.size is None:
nrow = target_img.shape[2]
ncol = target_img.shape[3]
else:
nrow = args.size[0]
ncol = args.size[1]
if save:
if not isdir(saving_folder):
mkdir(saving_folder)
imsave(saving_folder+'original.png', target_img)
# Create Gaussian Pyramid downsamplers
target_downsampler = create_gaussian_pyramid(gaussian_kernel_size, gaussian_std, n_scales, stride, pad=False)
input_downsampler = create_gaussian_pyramid(gaussian_kernel_size, gaussian_std, n_scales, stride, pad=True)
target_downsampler(target_img) # evaluate on the target image
# create patch extractors
target_im2pat = patch_extractor(patch_size, pad=False)
input_im2pat = patch_extractor(patch_size, pad=True)
# create semi-dual module at each scale
semidual_loss = []
for s in range(n_scales):
real_data = target_im2pat(target_downsampler[s].down_img, n_patches_out) # exctract at most n_patches_out patches from the downsampled target images
layer = semidual(real_data, device=DEVICE, usekeops=usekeops)
semidual_loss.append(layer)
if visu:
imshow(target_downsampler[s].down_img)
# Weights on scales
prop = torch.ones(n_scales, device=DEVICE)/n_scales # all scales with same weight
# initialize the generated image
fake_img = torch.rand(1, 3, nrow,ncol, device=DEVICE, requires_grad=True)
# intialize optimizer for image
optim_img = torch.optim.Adam([fake_img], lr=0.01)
# initialize the loss vector
total_loss = np.zeros(n_iter_max)
# Main loop
t = time.time()
for it in range(n_iter_max):
# 1. update psi
input_downsampler(fake_img.detach()) # evaluate on the current fake image
for s in range(n_scales):
optim_psi = torch.optim.ASGD([semidual_loss[s].psi], lr=1, alpha=0.5, t0=1)
for i in range(n_iter_psi):
fake_data = input_im2pat(input_downsampler[s].down_img, n_patches_in)
optim_psi.zero_grad()
loss = -semidual_loss[s](fake_data)
loss.backward()
optim_psi.step()
semidual_loss[s].psi.data = optim_psi.state[semidual_loss[s].psi]['ax']
# 2. perform gradient step on the image
optim_img.zero_grad()
tloss = 0
for s in range(n_scales):
input_downsampler(fake_img)
fake_data = input_im2pat(input_downsampler[s].down_img, n_patches_in)
loss = prop[s]*semidual_loss[s](fake_data)
loss.backward()
tloss += loss.item()
optim_img.step()
# save loss
total_loss[it] = tloss
# save some results
if it % monitoring_step == 0:
print('iteration '+str(it)+' - elapsed '+str(int(time.time()-t))+'s - loss = '+str(tloss))
if visu:
imshow(fake_img)
if save:
imsave(saving_folder+'it'+str(it)+'.png', fake_img)
print('DONE - total time is '+str(int(time.time()-t))+'s')
if visu:
plt.plot(total_loss)
plt.show()
if save:
plt.savefig(saving_folder+'loss_multiscale.png')
plt.close()
if save:
np.save(saving_folder+'loss.npy', total_loss)
return fake_img
def learn_model(args):
target_img_name = args.target_image_path
patch_size = args.patch_size
n_iter_max = args.n_iter_max
n_iter_psi = args.n_iter_psi
n_patches_in = args.n_patches_in
n_patches_out = args.n_patches_out
n_scales = args.scales
usekeops = args.keops
visu = args.visu
save = args.save
# fixed parameters
monitoring_step=100
saving_folder='tmp/'
# parameters for Gaussian downsampling
gaussian_kernel_size = 4
gaussian_std = 1
stride = 2
# load image
target_img = imread(target_img_name)
if save:
if not isdir(saving_folder):
mkdir(saving_folder)
imsave(saving_folder+'original.png', target_img)
# Create Gaussian Pyramid downsamplers
target_downsampler = create_gaussian_pyramid(gaussian_kernel_size, gaussian_std, n_scales, stride, pad=False)
input_downsampler = create_gaussian_pyramid(gaussian_kernel_size, gaussian_std, n_scales, stride, pad=False)
target_downsampler(target_img) # evaluate on the target image
# create patch extractors
target_im2pat = patch_extractor(patch_size, pad=False)
input_im2pat = patch_extractor(patch_size, pad=False)
# create semi-dual module at each scale
semidual_loss = []
for s in range(n_scales):
real_data = target_im2pat(target_downsampler[s].down_img, n_patches_out) # exctract at most n_patches_out patches from the downsampled target images
layer = semidual(real_data, device=DEVICE, usekeops=usekeops)
semidual_loss.append(layer)
if visu:
imshow(target_downsampler[s].down_img)
#plt.pause(0.01)
# Weights on scales
prop = torch.ones(n_scales, device=DEVICE)/n_scales # all scales with same weight
# initialize generator
G = model.generator(n_scales)
fake_img = model.sample_fake_img(G, target_img.shape, n_samples=1)
# intialize optimizer for image
optim_G = torch.optim.Adam(G.parameters(), lr=0.01)
# initialize the loss vector
total_loss = np.zeros(n_iter_max)
# Main loop
t = time.time()
for it in range(n_iter_max):
# 1. update psi
fake_img = model.sample_fake_img(G, target_img.shape, n_samples=1)
input_downsampler(fake_img.detach())
for s in range(n_scales):
optim_psi = torch.optim.ASGD([semidual_loss[s].psi], lr=1, alpha=0.5, t0=1)
for i in range(n_iter_psi):
# evaluate on the current fake image
fake_data = input_im2pat(input_downsampler[s].down_img, n_patches_in)
optim_psi.zero_grad()
loss = -semidual_loss[s](fake_data)
loss.backward()
optim_psi.step()
semidual_loss[s].psi.data = optim_psi.state[semidual_loss[s].psi]['ax']
# 2. perform gradient step on the image
optim_G.zero_grad()
tloss = 0
input_downsampler(fake_img)
for s in range(n_scales):
fake_data = input_im2pat(input_downsampler[s].down_img, n_patches_in)
loss = prop[s]*semidual_loss[s](fake_data)
tloss += loss
tloss.backward()
optim_G.step()
# save loss
total_loss[it] = tloss.item()
# save some results
if it % monitoring_step == 0:
print('iteration '+str(it)+' - elapsed '+str(int(time.time()-t))+'s - loss = '+str(tloss.item()))
if visu:
imshow(fake_img)
if save:
imsave(saving_folder+'it'+str(it)+'.png', fake_img)
print('DONE - total time is '+str(int(time.time()-t))+'s')
if visu:
plt.plot(total_loss)
plt.show()
plt.pause(0.01)
if save:
plt.savefig(saving_folder+'loss.png')
plt.close()
if save:
np.save(saving_folder+'loss.npy', total_loss)
return G
| 14,073
| 34.185
| 157
|
py
|
wgenpatex
|
wgenpatex-main/run_cnn_synthesis.py
|
import argparse
import wgenpatex
import model
import torch
parser = argparse.ArgumentParser()
parser.add_argument('target_image_path', help='paths of target texture image')
parser.add_argument('-w', '--patch_size', type=int,default=4, help="patch size (default: 4)")
parser.add_argument('-nmax', '--n_iter_max', type=int, default=5000, help="max iterations of the algorithm(default: 5000)")
parser.add_argument('-npsi', '--n_iter_psi', type=int, default=10, help="max iterations for psi (default: 10)")
parser.add_argument('-nin', '--n_patches_in', type=int, default=-1, help="number of patches of the synthetized texture used at each iteration, -1 corresponds to all patches (default: -1)")
parser.add_argument('-nout', '--n_patches_out', type=int, default=2000, help="number maximum of patches of the target texture used, -1 corresponds to all patches (default: 2000)")
parser.add_argument('-sc', '--scales', type=int, default=5, help="number of scales used (default: 5)")
parser.add_argument('--visu', action='store_true', help='show intermediate results')
parser.add_argument('--save', action='store_true', help='save temp results in /tmp folder')
parser.add_argument('--keops', action='store_true', help='use keops package')
args = parser.parse_args()
generator = wgenpatex.learn_model(args)
# save the texture generator
torch.save(generator.state_dict(), 'generator.pt')
# sample an image and save it
synth_img = model.sample_fake_img(generator, [1,3,512,512] , n_samples=1)
wgenpatex.imshow(synth_img)
wgenpatex.imsave('synthesized.png', synth_img)
| 1,562
| 54.821429
| 188
|
py
|
descqa
|
descqa-master/setup.py
|
#!/usr/bin/env python
"""
DESCQA: LSST DESC QA Framework for mock galaxy catalogs
Copyright (c) 2018 LSST DESC
http://opensource.org/licenses/MIT
"""
import os
from setuptools import setup
with open(os.path.join(os.path.dirname(__file__), 'descqa', 'version.py')) as f:
exec(f.read()) #pylint: disable=W0122
setup(
name='descqa',
version=__version__, #pylint: disable=E0602
description='DESCQA: LSST DESC QA Framework for mock galaxy catalogs',
url='https://github.com/LSSTDESC/descqa',
author='LSST DESC',
maintainer='Yao-Yuan Mao',
maintainer_email='yymao.astro@gmail.com',
license='MIT',
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
keywords='DESCQA',
packages=['descqa'],
install_requires=['future', 'pyyaml', 'jinja2'],
extras_require={
'full': ['numpy', 'scipy', 'matplotlib', 'GCR>=0.8.7', 'healpy', 'treecorr', 'camb', 'scikit-learn', 'pandas', 'astropy', 'POT', 'numba',
'pyccl', 'CatalogMatcher @ https://github.com/LSSTDESC/CatalogMatcher/archive/master.zip'],
},
package_data={'descqa': ['configs/*.yaml', 'data/*']},
)
| 1,291
| 33
| 145
|
py
|
descqa
|
descqa-master/descqa/base.py
|
from __future__ import division, unicode_literals, absolute_import
import os
__all__ = ['BaseValidationTest', 'TestResult']
class TestResult(object):
"""
class for passing back test result
"""
def __init__(self, score=None, summary=None, passed=False, skipped=False, inspect_only=False):
"""
Parameters
----------
score : float
a float number to represent the test score
summary : str
short summary string
passed : bool
if the test is passed
skipped : bool
if the test is skipped, overwrites all other arguments
inspect_only : bool
if the test is only for inspection (i.e., no passing criteria)
"""
self.passed = bool(passed)
self.skipped = bool(skipped)
self.inspect_only = bool(inspect_only)
self.summary = summary or ''
if sum((self.passed, self.skipped, self.inspect_only)) > 1:
raise ValueError('Only *one* of `passed`, `skipped`, and `inspect_only` can be set to True.')
# set score
if not (self.skipped or self.inspect_only):
try:
self.score = float(score)
except (TypeError, ValueError):
raise ValueError('Must set a float value for `score`')
@property
def status_code(self):
"""
get status code (e.g. VALIDATION_TEST_PASSED)
"""
if self.passed:
return 'VALIDATION_TEST_PASSED'
if self.skipped:
return 'VALIDATION_TEST_SKIPPED'
if self.inspect_only:
return 'VALIDATION_TEST_INSPECT'
return 'VALIDATION_TEST_FAILED'
@property
def status_full(self):
"""
get full status (3 lines of string: status code, summary, score)
"""
output = [self.status_code, self.summary]
if self.score:
output.append('{:.3g}'.format(self.score))
return '\n'.join(output)
class BaseValidationTest(object):
"""
very abstract class for validation test class
"""
data_dir = os.path.join(os.path.dirname(__file__), 'data')
external_data_dir = "/global/cfs/cdirs/lsst/groups/CS/descqa/data"
def __init__(self, **kwargs):
pass
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
"""
Run the validation test on a single catalog.
Return an instance of TestResult.
This method will be called once for each catalog.
Parameters
----------
catalog_instance : instance of BaseGenericCatalog
instance of the galaxy catalog
catalog_name : str
name of the galaxy catalog
output_dir : str
output directory (all output must be under this directory)
Returns
-------
test_result : instance of TestResult
use the TestResult object to return test result
"""
raise NotImplementedError
def conclude_test(self, output_dir):
"""
Conclude the test.
One can make summary plots for all catalogs here.
Return None.
This method will be called once when all catalogs are done.
Parameters
----------
output_dir: str
output directory (all output must be under this directory)
"""
| 3,403
| 27.366667
| 105
|
py
|
descqa
|
descqa-master/descqa/readiness_test.py
|
from __future__ import print_function, division, unicode_literals, absolute_import
import os
import re
import fnmatch
from itertools import cycle
from collections import defaultdict, OrderedDict
import numpy as np
import numexpr as ne
from scipy.stats import norm
from .base import BaseValidationTest, TestResult
from .plotting import plt
__all__ = ['CheckQuantities']
def check_uniqueness(x, mask=None):
""" Return True if the elements of the input x are unique, else False.
Optionally only evaluate uniqueness on a subset defined by the input mask.
Examples
--------
>>> x = np.random.randint(0, 10, 100)
>>> assert check_uniqueness(x) == False
>>> assert check_uniqueness(np.arange(5)) == True
"""
x = np.asarray(x)
if mask is None:
return x.size == np.unique(x).size
else:
return check_uniqueness(x[mask])
def find_outlier(x):
"""
return a bool array indicating outliers or not in *x*
"""
l, m, h = np.percentile(x, norm.cdf([-1, 0, 1])*100)
d = (h-l) * 0.5
return (x > (m + d*3)) | (x < (m - d*3))
def calc_frac(x, func, total=None):
"""
calculate the fraction of entries in *x* that satisfy *func*
"""
total = total or len(x)
return np.count_nonzero(func(x)) / total
def split_for_natural_sort(s):
"""
split a string *s* for natural sort.
"""
return tuple((int(y) if y.isdigit() else y for y in re.split(r'(\d+)', s)))
def evaluate_expression(expression, catalog_instance):
"""
evaluate a numexpr expression on a GCR catalog
"""
quantities_needed = set(ne.necompiler.precompile(expression)[-1])
if not catalog_instance.has_quantities(quantities_needed):
raise KeyError("Not all quantities needed exist")
return ne.evaluate(expression,
local_dict=catalog_instance.get_quantities(quantities_needed),
global_dict={})
def check_relation(relation, catalog_instance):
"""
check if *relation* is true in *catalog_instance*
"""
expr1, simeq, expr2 = relation.partition('~==')
if simeq:
expr1 = expr1.strip()
expr2 = expr2.strip()
return np.allclose(
evaluate_expression(expr1, catalog_instance),
evaluate_expression(expr2, catalog_instance),
equal_nan=True,
)
return evaluate_expression(relation, catalog_instance).all()
class CheckQuantities(BaseValidationTest):
"""
Readiness test to check catalog quantities before image simulations
"""
stats = OrderedDict((
('min', np.min),
('max', np.max),
('median', np.median),
('mean', np.mean),
('std', np.std),
('f_inf', np.isinf),
('f_nan', np.isnan),
('f_zero', np.logical_not),
('f_outlier', find_outlier),
))
def __init__(self, **kwargs):
self.quantities_to_check = kwargs.get('quantities_to_check', [])
self.relations_to_check = kwargs.get('relations_to_check', [])
self.uniqueness_to_check = kwargs.get('uniqueness_to_check', [])
self.catalog_filters = kwargs.get('catalog_filters', [])
self.lgndtitle_fontsize = kwargs.get('lgndtitle_fontsize', 12)
self.truncate_cat_name = kwargs.get('truncate_cat_name', False)
self.no_version = kwargs.get('no_version', False)
self.title_size = kwargs.get('title_size', 'small')
self.font_size = kwargs.get('font_size', 12)
self.legend_size = kwargs.get('legend_size', 'x-small')
if not any((
self.quantities_to_check,
self.relations_to_check,
self.uniqueness_to_check,
)):
raise ValueError('must specify quantities_to_check, relations_to_check, or uniqueness_to_check')
if not all(d.get('quantities') for d in self.quantities_to_check):
raise ValueError('yaml file error: `quantities` must exist for each item in `quantities_to_check`')
if not all(isinstance(d, str) for d in self.relations_to_check):
raise ValueError('yaml file error: each item in `relations_to_check` must be a string')
if not all(d.get('quantity') for d in self.uniqueness_to_check):
raise ValueError('yaml file error: `quantity` must exist for each item in `uniqueness_to_check`')
if self.catalog_filters:
if not all(d.get('quantity') for d in self.catalog_filters):
raise ValueError('yaml file error: `quantity` must exist for each item in `catalog_filters`')
if not all(d.get('min') for d in self.catalog_filters) or all(d.get('max') for d in self.catalog_filters):
raise ValueError('yaml file error: `min` or `max` must exist for each item in `catalog_filters`')
self.enable_individual_summary = bool(kwargs.get('enable_individual_summary', True))
self.enable_aggregated_summary = bool(kwargs.get('enable_aggregated_summary', False))
self.always_show_plot = bool(kwargs.get('always_show_plot', True))
self.nbins = int(kwargs.get('nbins', 50))
self.prop_cycle = None
self.current_catalog_name = None
self.current_failed_count = None
self._aggregated_header = list()
self._aggregated_table = list()
self._individual_header = list()
self._individual_table = list()
super(CheckQuantities, self).__init__(**kwargs)
def record_result(self, results, quantity_name=None, more_info=None, failed=None, individual_only=False):
if isinstance(results, dict):
self.current_failed_count += sum(1 for v in results.values() if v[1] == 'fail')
elif failed:
self.current_failed_count += 1
if self.enable_individual_summary:
if quantity_name is None:
self._individual_header.append(self.format_result_header(results, failed))
else:
self._individual_table.append(self.format_result_row(results, quantity_name, more_info))
if self.enable_aggregated_summary and not individual_only:
if quantity_name is None:
results = '{} {}'.format(self.current_catalog_name, results) if self.current_catalog_name else results
self._aggregated_header.append(self.format_result_header(results, failed))
else:
quantity_name = '{} {}'.format(self.current_catalog_name, quantity_name) if self.current_catalog_name else quantity_name
self._aggregated_table.append(self.format_result_row(results, quantity_name, more_info))
def format_result_row(self, results, quantity_name, more_info):
more_info = 'title="{}"'.format(more_info) if more_info else ''
output = ['<tr>', '<td {1}>{0}</td>'.format(quantity_name, more_info)]
for s in self.stats:
output.append('<td class="{1}" title="{2}">{0:.4g}</td>'.format(*results[s]))
output.append('</tr>')
return ''.join(output)
@staticmethod
def format_result_header(results, failed=False):
return '<span {1}>{0}</span>'.format(results, 'class="fail"' if failed else '')
def generate_summary(self, output_dir, aggregated=False):
if aggregated:
if not self.enable_aggregated_summary:
return
header = self._aggregated_header
table = self._aggregated_table
else:
if not self.enable_individual_summary:
return
header = self._individual_header
table = self._individual_table
with open(os.path.join(output_dir, 'SUMMARY.html'), 'w') as f:
f.write('<html><head><style>html{font-family: monospace;} table{border-spacing: 0;} thead,tr:nth-child(even){background: #ddd;} thead{font-weight: bold;} td{padding: 2px 8px;} .fail{color: #F00;} .none{color: #444;}</style></head><body>\n')
f.write('<ul>\n')
for line in header:
f.write('<li>')
f.write(line)
f.write('</li>\n')
f.write('</ul><br>\n')
f.write('<table><thead><tr><td>Quantity</td>\n')
for s in self.stats:
f.write('<td>{}</td>'.format(s))
f.write('</tr></thead><tbody>\n')
for line in table:
f.write(line)
f.write('\n')
f.write('</tbody></table></body></html>\n')
if not aggregated:
self._individual_header.clear()
self._individual_table.clear()
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
all_quantities = sorted(map(str, catalog_instance.list_all_quantities(True)))
self.prop_cycle = cycle(iter(plt.rcParams['axes.prop_cycle']))
self.current_catalog_name = catalog_name
self.current_failed_count = 0
galaxy_count = None
quantity_hashes = defaultdict(set)
self.record_result('Running readiness test on {} {}'.format(
catalog_name,
getattr(catalog_instance, 'version', ''),
individual_only=True,
))
if self.truncate_cat_name:
catalog_name = catalog_name.partition("_")[0]
version = getattr(catalog_instance, 'version', '') if not self.no_version else ''
# check filters
filters = []
filter_labels = ''
for d in self.catalog_filters:
fq = d.get('quantity')
if fq in all_quantities:
filter_label=''
qlabel = d.get('label') if d.get('label') else fq
if d.get('min') is not None:
filters.append('{} >= {}'.format(fq, d.get('min')))
filter_label = '{} <= {}'.format(d.get('min'), qlabel)
if d.get('max') is not None:
filters.append('{} < {}'.format(fq, d.get('max')))
flabel = '{} <= {}'.format(qlabel, d.get('max'))
filter_label = flabel if len(filter_label)==0 else re.sub(str(d.get('label')), flabel, filter_label)
filter_labels = '$'+filter_label+'$' if len(filter_labels)==0 else ', '.join([filter_labels,
'$'+filter_label+'$'])
else:
self.record_result('Found no matching quantity for filtering on {}'.format(fq), failed=True)
continue
print(filters, filter_labels)
lgnd_loc_dflt ='best'
for i, checks in enumerate(self.quantities_to_check):
quantity_patterns = checks['quantities'] if isinstance(checks['quantities'], (tuple, list)) else [checks['quantities']]
quantities_this = set()
quantity_pattern = None
for quantity_pattern in quantity_patterns:
quantities_this.update(fnmatch.filter(all_quantities, quantity_pattern))
if not quantities_this:
self.record_result('Found no matching quantities for {}'.format(quantity_pattern), failed=True)
continue
quantities_this = sorted(quantities_this, key=split_for_natural_sort)
if 'label' in checks:
quantity_group_label = checks['label']
else:
quantity_group_label = re.sub('_+', '_', re.sub(r'\W+', '_', quantity_pattern)).strip('_')
plot_filename = 'p{:02d}_{}.png'.format(i, quantity_group_label)
fig, ax = plt.subplots()
has_plot = False
for quantity in quantities_this:
if len(filters) > 0:
catalog_data = catalog_instance.get_quantities([quantity], filters=filters)
value = catalog_data[quantity]
else:
value = catalog_instance[quantity]
need_plot = False
if galaxy_count is None:
galaxy_count = len(value)
self.record_result('Found {} entries in this catalog.'.format(galaxy_count))
elif galaxy_count != len(value):
self.record_result('"{}" has {} entries (different from {})'.format(quantity, len(value), galaxy_count), failed=True)
need_plot = True
if checks.get('log'):
value = np.log10(value)
value_finite = value[np.isfinite(value)]
result_this_quantity = {}
for s, func in self.stats.items():
if s == 'f_outlier':
s_value = calc_frac(value_finite, func, len(value))
elif s.startswith('f_'):
s_value = calc_frac(value, func)
else:
s_value = func(value_finite)
flag = False
if s in checks:
try:
min_value, max_value = checks[s]
except (TypeError, ValueError):
flag |= (s_value != checks[s])
else:
if min_value is not None:
flag |= (s_value < min_value)
if max_value is not None:
flag |= (s_value > max_value)
else:
flag = None
result_this_quantity[s] = (
s_value,
'none' if flag is None else ('fail' if flag else 'pass'),
checks.get(s, ''),
)
if flag:
need_plot = True
quantity_hashes[tuple(result_this_quantity[s][0] for s in self.stats)].add(quantity)
self.record_result(
result_this_quantity,
quantity + (' [log]' if checks.get('log') else ''),
plot_filename
)
if need_plot or self.always_show_plot:
ax.hist(value_finite, self.nbins, histtype='step', fill=False, label=quantity, **next(self.prop_cycle))
has_plot = True
if has_plot:
ax.set_xlabel(('log ' if checks.get('log') else '') + quantity_group_label, size=self.font_size)
ax.yaxis.set_ticklabels([])
if checks.get('plot_min') is not None: #zero values fail otherwise
ax.set_xlim(left=checks.get('plot_min'))
if checks.get('plot_max') is not None:
ax.set_xlim(right=checks.get('plot_max'))
ax.set_title('{} {}'.format(catalog_name, version), fontsize=self.title_size)
fig.tight_layout()
if len(quantities_this) <= 9:
#check for special legend location
lgnd_loc = lgnd_loc_dflt
if checks.get('lgnd_loc') is not None:
lgnd_loc = checks.get('lgnd_loc')
leg = ax.legend(loc=lgnd_loc, fontsize=self.legend_size, ncol=3, frameon=True, facecolor='white',
title=filter_labels, title_fontsize=self.lgndtitle_fontsize)
leg.get_frame().set_alpha(0.5)
fig.savefig(os.path.join(output_dir, plot_filename))
plt.close(fig)
for same_quantities in quantity_hashes.values():
if len(same_quantities) > 1:
self.record_result('{} seem be to identical!'.format(', '.join(same_quantities)), failed=True)
for relation in self.relations_to_check:
try:
result = check_relation(relation, catalog_instance)
except Exception as e: # pylint: disable=broad-except
self.record_result('Not able to evaluate `{}`! {}'.format(relation, e), failed=True)
continue
if result:
self.record_result('It is true that `{}`'.format(relation))
else:
self.record_result('It is NOT true that `{}`'.format(relation), failed=True)
for d in self.uniqueness_to_check:
quantity = label = d.get('quantity')
mask = d.get('mask')
quantities_needed = [quantity]
if mask is not None:
quantities_needed.append(mask)
label += '[{}]'.format(mask)
if not catalog_instance.has_quantities(quantities_needed):
self.record_result('{} does not exist'.format(' or '.join(quantities_needed)), failed=True)
continue
data = catalog_instance.get_quantities(quantities_needed)
if check_uniqueness(data[quantity], data.get(mask)):
self.record_result('{} is all unique'.format(label))
else:
self.record_result('{} has repeated entries!'.format(label), failed=True)
self.generate_summary(output_dir)
return TestResult(passed=(self.current_failed_count == 0), score=self.current_failed_count)
def conclude_test(self, output_dir):
self.generate_summary(output_dir, aggregated=True)
| 17,354
| 40.92029
| 252
|
py
|
descqa
|
descqa-master/descqa/DensityVersusSkyPosition.py
|
from __future__ import unicode_literals, absolute_import, division
import os
import numpy as np
from scipy.stats import binned_statistic
from .base import BaseValidationTest, TestResult
from .plotting import plt
import healpy as hp
__all__ = ['DensityVersusSkyPosition']
def create_hp_map(ra, dec, nside):
"""
Auxiliary function to generate HEALPix maps from catalogs.
It reads the ra and dec in degrees and returns a HEALPix map
"""
pixnums = hp.ang2pix(nside, ra, dec, lonlat=True)
return np.bincount(pixnums, minlength=hp.nside2npix(nside)).astype(float) # pylint: disable=no-member
class DensityVersusSkyPosition(BaseValidationTest):
"""
This test checks the object density as a function
of another map (e.g: extinction, airmass, etc)
Args:
----
validation_map_filename (str): Path to extinction, airmass or other map
nside (int): Healpix nside parameter in which to compute the density.
xlabel (str): Name of the quantity read from validation_map_filename, it will
serve as x-label axis of the validation plots
"""
def __init__(self,**kwargs): # pylint: disable=W0231
self.kwargs = kwargs
self.test_name = kwargs['test_name']
self.validation_path = os.path.join(self.external_data_dir, kwargs['validation_map_filename'])
self.nside = kwargs['nside']
self.validation_data = hp.ud_grade(hp.read_map(self.validation_path), nside_out=self.nside)
self.xlabel = kwargs['xlabel']
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
if not catalog_instance.has_quantities(['ra', 'dec', 'extendedness']):
return TestResult(skipped=True, summary='catalog does not have needed quantities')
catalog_data = catalog_instance.get_quantities(['ra', 'dec', 'extendedness'], filters=['extendedness == 1'])
data_map = create_hp_map(catalog_data['ra'], catalog_data['dec'], self.nside)
mask = data_map>0 # This is a good approximation if the pixels are big enough
xmin, xmax = np.percentile(self.validation_data[mask], [5,95])
data_map /= (3600*hp.nside2pixarea(self.nside, degrees=True)) # To get the density in arcmin^-2
mean_dens, be, _ = binned_statistic(self.validation_data[mask], data_map[mask], statistic='mean', range=(xmin, xmax))
std_dens, be, _ = binned_statistic(self.validation_data[mask], data_map[mask], statistic='std', range=(xmin, xmax))
counts, be, _ = binned_statistic(self.validation_data[mask], data_map[mask], statistic='count', range=(xmin, xmax))
bin_centers = 0.5*be[1:]+0.5*be[:-1]
fig, ax = plt.subplots(1,1)
ax.errorbar(bin_centers, mean_dens, std_dens/np.sqrt(counts), fmt='o')
ax.set_xlabel(self.xlabel)
ax.set_ylabel('Mean density [arcmin$^{-2}$]')
fig.savefig(os.path.join(output_dir, '%s_density_vs_extinction.png' % catalog_name))
return TestResult(inspect_only=True)
| 2,990
| 46.47619
| 125
|
py
|
descqa
|
descqa-master/descqa/SizeDistribution.py
|
import os
import numpy as np
from itertools import count
import re
from .base import BaseValidationTest, TestResult
from .plotting import plt
from .utils import get_opt_binpoints
__all__ = ['SizeDistribution']
class SizeDistribution(BaseValidationTest):
"""
validation test to check the slope of the size distribution at small sizes.
"""
#plotting constants
validation_color = 'black'
validation_marker = 'o'
default_markers = ['v', 's', 'd', 'H', '^', 'D', 'h', '<', '>', '.']
msize = 4 #marker-size
yaxis_xoffset = 0.02
yaxis_yoffset = 0.5
def __init__(self, **kwargs):
#pylint: disable=W0231
#validation data
validation_filepath = os.path.join(self.data_dir, kwargs['data_filename'])
self.validation_data = np.loadtxt(validation_filepath)
self.acceptable_keys = kwargs['possible_size_fields']
self.acceptable_mag_keys = kwargs['possible_mag_fields']
self.fontsize = kwargs.get('fontsize', 15)
self.lgnd_fontsize = kwargs.get('lgnd_fontsize', 12)
self.truncate_cat_name = kwargs.get('truncate_cat_name', True)
self.truncate_key_name = kwargs.get('truncate_key_name', True)
self._color_iterator = ('C{}'.format(i) for i in count())
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
# update color and marker to preserve catalog colors and markers across tests
catalog_color = next(self._color_iterator)
# check catalog data for required quantities
key = catalog_instance.first_available(*self.acceptable_keys)
if not key:
summary = 'Missing required quantity' + ' or '.join(['{}']*len(self.acceptable_keys))
return TestResult(skipped=True, summary=summary.format(*self.acceptable_keys))
mag_key = catalog_instance.first_available(*self.acceptable_mag_keys)
if not mag_key:
summary = 'Missing required quantity' + ' or '.join(['{}']*len(self.acceptable_mag_keys))
return TestResult(skipped=True, summary=summary.format(*self.acceptable_mag_keys))
# get data
catalog_data = catalog_instance.get_quantities([key, mag_key])
sizes = catalog_data[key][catalog_data[mag_key]<25.2]
good_data_mask = np.logical_not(np.logical_or(np.isinf(sizes), np.isnan(sizes)))
sizes = sizes[good_data_mask]
non_neg_mask = sizes > 0
if np.sum(non_neg_mask) > 0:
print('Warning: some sizes were negative or zero; these are being masked')
sizes = sizes[non_neg_mask]
min_sizes = np.min(sizes)
max_sizes = np.max(sizes)
if self.truncate_cat_name:
catalog_name = re.split('_', catalog_name)[0]
# Compute N(size) and its slope at the small end.
# Things seem to be roughly linear where N(size)>0.5*Ntot so use those points.
# Get ~20 points for the line fit, but compute the whole graph
median = np.median(sizes)
n_bins = int(20*(max_sizes-min_sizes)/(median-min_sizes))
N, bin_edges = np.histogram(sizes, bins=n_bins)
sumM = np.histogram(sizes, weights=sizes, bins=bin_edges)[0]
sumM2 = np.histogram(sizes, weights=sizes**2, bins=bin_edges)[0]
size_pts = get_opt_binpoints(N, sumM, sumM2, bin_edges)
diff = size_pts[1:] - size_pts[:-1]
if not np.all(diff >= 0):
# Sparsely populated bins sometimes cause problems for
# get_opt_binpoints; replace with the dumb solution
size_pts = 0.5*(bin_edges[:-1]+bin_edges[1:])
mask = size_pts < median
# Normalize so we can compare datasets of different sizes
cumul_N_norm = np.array(
[1.0*np.sum(N[-i-1:]) for i in range(len(N))], dtype=float
)[::-1]/np.sum(N)
data_slope, data_intercept = np.polyfit(size_pts[mask], cumul_N_norm[mask], 1)
# Compute the slope for the validation dataset in this size range.
# Copy the validation dataset so we can play with it
validation_data = self.validation_data.copy()
validation_mask = (validation_data[:, 0] > min_sizes) & (validation_data[:, 0] < median)
validation_data[:, 1] /= validation_data[validation_mask, 1][0]
validation_slope, _ = np.polyfit(validation_data[validation_mask, 0],
validation_data[validation_mask, 1], 1)
# plot a histogram of sizes. This is easier to see as log(sizes) so do that.
fig, (hist_ax, cumul_ax) = plt.subplots(1, 2)
fig.subplots_adjust(wspace=0.4)
xname = key if not self.truncate_key_name else re.split('_', key)[0]
hist_ax.hist(np.log10(sizes), color=catalog_color, edgecolor='black', alpha=0.75,
density=True, bins=20)
hist_ax.set_xlabel("$\\log_{{10}}(\\rm{{{}/arcsec}})$".format(xname), fontsize=self.fontsize)
hist_ax.set_ylabel("$dN/d\\log_{{10}}(\\rm{{{}/arcsec}})$".format(xname), fontsize=self.fontsize)
# plot the CDF and the line fit
cumul_ax.plot(np.log10(size_pts), cumul_N_norm,
color=catalog_color, label='{}: ${:.2f}$'.format(catalog_name, data_slope))
cumul_ax.plot(np.log10(size_pts[mask]), (data_intercept+data_slope*size_pts[mask]), color='gray',
label='COSMOS: ${:.2f}$'.format(validation_slope))
#cumul_ax.set_xscale('log')
#cumul_ax.text(0.95, 0.96,
# 'COSMOS: ${:.2f}$\n{}: ${:.2f}$'.format(validation_slope, catalog_name, data_slope),
# horizontalalignment='right', verticalalignment='top',
# transform=cumul_ax.transAxes)
cumul_ax.set_xlabel("$\\log_{{10}}(\\rm{{{}/arcsec}})$".format(xname), fontsize=self.fontsize)
cumul_ax.set_ylabel("$N(\\rm{{{}}}$)".format(xname), fontsize=self.fontsize)
cumul_ax.legend(loc='upper right', title='Slopes', fontsize=self.lgnd_fontsize)
cumul_ax.set_ylim(-0.05, 1.4) #force room for legend
with open(os.path.join(output_dir, 'size_distribution_{}.txt'.format(catalog_name)), 'w'
) as f:
f.write("# Slope, intercept\n")
f.write("%7f %9f\n"%(data_slope, data_intercept))
fig.savefig(os.path.join(output_dir, 'size_distribution_{}.png'.format(catalog_name)))
plt.close(fig)
return TestResult(score=data_slope/validation_slope,
passed=(0.5<=(data_slope/validation_slope)<=2.0))
| 6,607
| 48.313433
| 107
|
py
|
descqa
|
descqa-master/descqa/basic_test.py
|
from __future__ import division, unicode_literals, absolute_import
import os
from builtins import str #pylint: disable=W0622
import yaml
import numpy as np
import healpy as hp
from .base import BaseValidationTest, TestResult
from .plotting import plt
__all__ = ['ListAvailableQuantities', 'SkyArea']
class ListAvailableQuantities(BaseValidationTest):
"""
validation test to list all available quantities
"""
def __init__(self, **kwargs): #pylint: disable=W0231
self.kwargs = kwargs
self.calc_min_max = kwargs.get('calc_min_max', False)
def _save_quantities(self, catalog_name, quantities, filename):
is_dict = isinstance(quantities, dict)
maxlen = max((len(q) for q in quantities))
with open(filename, 'w') as f:
if is_dict:
f.write('{} {} {} \n'.format('# ' + catalog_name.ljust(maxlen-2), 'Minimum'.rjust(13), 'Maximum'.rjust(13)))
else:
f.write('# ' + catalog_name + '\n')
for q in sorted(quantities):
if is_dict:
f.write('{0} {1[0]:13.4g} {1[1]:13.4g} '.format(q.ljust(maxlen), quantities[q]))
else:
f.write(str(q))
f.write('\n')
def _get_data_ranges(self, catalog_instance, native=False):
quantities = catalog_instance.list_all_native_quantities() if native else catalog_instance.list_all_quantities()
if not self.calc_min_max:
return quantities
if native:
#check for name collisions and add native quantity
quantities_needed = []
gcr_quantities = catalog_instance.list_all_quantities()
for q in quantities:
if q in gcr_quantities:
catalog_instance.add_quantity_modifier(q + '_native', q)
quantities_needed.append(q + '_native')
else:
quantities_needed.append(q)
else:
quantities_needed = quantities
d_min = {}
d_max = {}
for data in catalog_instance.get_quantities(quantities_needed, return_iterator=True):
for qx in quantities_needed:
q = qx.replace('_native','') if qx.endswith('_native') else qx
if data[qx].dtype.char in 'bBiulfd':
d_min[q] = min(np.nanmin(data[qx]), d_min.get(q, np.inf))
d_max[q] = max(np.nanmax(data[qx]), d_max.get(q, -np.inf))
#clean_up q_native added quantities
for qx in quantities_needed:
if qx.endswith('_native'):
catalog_instance.del_quantity_modifier(qx)
return {q: (d_min[q], d_max[q]) for q in d_min}
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
self._save_quantities(catalog_name, self._get_data_ranges(catalog_instance, native=False), os.path.join(output_dir, 'quantities.txt'))
self._save_quantities(catalog_name, self._get_data_ranges(catalog_instance, native=True), os.path.join(output_dir, 'native_quantities.txt'))
with open(os.path.join(output_dir, 'config.yaml'), 'w') as f:
f.write(yaml.dump(catalog_instance.get_catalog_info(), default_flow_style=False))
f.write('\n')
return TestResult(inspect_only=True)
class SkyArea(BaseValidationTest):
"""
validation test to show sky area
"""
def __init__(self, **kwargs): #pylint: disable=W0231
self.nside = kwargs.get('nside', 64)
assert hp.isnsideok(self.nside), '`nside` value {} not correct'.format(self.nside)
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
if not catalog_instance.has_quantities(['ra_true', 'dec_true']):
return TestResult(skipped=True)
pixels = set()
for d in catalog_instance.get_quantities(['ra_true', 'dec_true'], return_iterator=True):
pixels.update(hp.ang2pix(self.nside, d['ra_true'], d['dec_true'], lonlat=True))
frac = len(pixels) / hp.nside2npix(self.nside)
skyarea = frac * np.rad2deg(np.rad2deg(4.0*np.pi))
hp_map = np.empty(hp.nside2npix(self.nside))
hp_map.fill(hp.UNSEEN)
hp_map[list(pixels)] = 0
hp.mollview(hp_map, title=catalog_name, coord='C', cbar=None)
plt.savefig(os.path.join(output_dir, 'skymap.png'))
plt.close()
return TestResult(inspect_only=True, summary='approx. {:.7g} sq. deg.'.format(skyarea))
| 4,516
| 39.693694
| 148
|
py
|
descqa
|
descqa-master/descqa/plotting.py
|
import matplotlib
mpl = matplotlib
mpl.use('Agg') # Must be before importing matplotlib.pyplot
mpl.rcParams['font.size'] = 13.0
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['mathtext.fontset'] = 'stix'
mpl.rcParams['legend.frameon'] = False
mpl.rcParams['legend.fontsize'] = 'small'
mpl.rcParams['figure.dpi'] = 200.0
mpl.rcParams['lines.markersize'] = 4.0
mpl.rcParams['xtick.minor.visible'] = True
mpl.rcParams['ytick.minor.visible'] = True
mpl.rcParams['xtick.major.size'] = 5.0
mpl.rcParams['xtick.minor.size'] = 3.0
mpl.rcParams['ytick.major.size'] = 5.0
mpl.rcParams['ytick.minor.size'] = 3.0
import matplotlib.pyplot
plt = matplotlib.pyplot
__all__ = ['matplotlib', 'mpl', 'plt']
| 696
| 29.304348
| 59
|
py
|
descqa
|
descqa-master/descqa/truth_galaxy_verification.py
|
from __future__ import unicode_literals, absolute_import, division
import os
import re
import numpy as np
from .base import BaseValidationTest, TestResult
from .plotting import plt
__all__ = ['TruthGalaxyVerification']
class TruthGalaxyVerification(BaseValidationTest):
"""
Verify the galaxy components of the truth catalog.
Works on a composite catalog that joins the truth and extragalactic catalogs.
Parameters
----------
to_verify: list of dict
each dict should have keys `truth` and `extragalactic` that specify the column names
and also optional keys `atol` and `rtol` that specify tolerance
check_missing_galaxy_quantities : list of str
column names in extragalactic catalog to plot the properties of missing galaxies
"""
def __init__(self, **kwargs):
to_verify = kwargs.get('to_verify')
if not to_verify:
raise ValueError('Nothing to verify!')
if not all(isinstance(d, dict) for d in to_verify):
raise ValueError('`to_verify` must be a list of dictionaries')
if not all('truth' in d and 'extragalactic' in d for d in to_verify):
raise ValueError('each dict in `to_verify` must have `truth` and `extragalactic`')
self.to_verify = tuple(to_verify)
self.check_missing_galaxy_quantities = tuple(kwargs.get('check_missing_galaxy_quantities', []))
self.bins = int(kwargs.get('bins', 50))
super(TruthGalaxyVerification, self).__init__(**kwargs)
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
passed = []
failed = []
masked = None
for i, to_verify in enumerate(self.to_verify):
quantities = [
('extragalactic', to_verify['extragalactic']),
('truth', to_verify['truth']),
]
if not catalog_instance.has_quantities(quantities):
failed.append(quantities)
continue
data = catalog_instance.get_quantities(quantities)
q1 = data[quantities[0]]
q2 = data[quantities[1]]
del data
if masked is None and np.ma.is_masked(q2):
masked = q2.mask.copy()
if to_verify.get('atol') or to_verify.get('rtol'):
passed_this = np.allclose(q1, q2, **{k: float(to_verify.get(k, 0)) for k in ('atol', 'rtol')})
else:
passed_this = (q1 == q2).all()
if passed_this:
passed.append(quantities)
else:
failed.append(quantities)
if not passed_this or to_verify.get('always_show_plot'):
diff = (q1 - q2)
if np.ma.is_masked(diff):
diff = diff.compressed()
self.plot_hist(diff, '{0[0]}:{0[1]} - {1[0]}:{1[1]}'.format(*quantities), 'diff_{:02d}'.format(i), output_dir, log=True, title='Difference')
if masked is not None and masked.any():
print('[Warning] Truth catalog has {} galaxies fewer than the extragalactic catalog'.format(np.count_nonzero(masked)))
if self.check_missing_galaxy_quantities:
data = catalog_instance.get_quantities([('extragalactic', q) for q in self.check_missing_galaxy_quantities])
for i, q in enumerate(self.check_missing_galaxy_quantities):
self.plot_hist(data[('extragalactic', q)][masked], q, 'missing_{:02d}'.format(i), output_dir, log=True, color='C1', title='Missing galaxies')
if passed:
with open(os.path.join(output_dir, 'results_passed.txt'), 'w') as f:
for q in passed:
f.write(str(q) + '\n')
if failed:
with open(os.path.join(output_dir, 'results_failed.txt'), 'w') as f:
for q in failed:
f.write(str(q) + '\n')
return TestResult(score=len(failed), passed=(not failed))
def plot_hist(self, data, xlabel, filename_prefix, output_dir, title=None, **kwargs):
filename = '{}_{}.png'.format(filename_prefix, re.sub('_+', '_', re.sub(r'\W+', '_', xlabel)).strip('_')).strip('_')
fig, ax = plt.subplots()
data = data[np.isfinite(data)]
if data.size:
ax.hist(data, self.bins, **kwargs)
ax.set_xlabel(xlabel)
ax.set_ylabel('Count')
if title:
ax.set_title(title)
fig.tight_layout()
fig.savefig(os.path.join(output_dir, filename))
plt.close(fig)
| 4,557
| 41.598131
| 161
|
py
|
descqa
|
descqa-master/descqa/clf_test.py
|
from __future__ import unicode_literals, absolute_import, division
import os
import numpy as np
from GCR import GCRQuery
from .base import BaseValidationTest, TestResult
from .plotting import plt
__all__ = ['ConditionalLuminosityFunction']
class ConditionalLuminosityFunction(BaseValidationTest):
def __init__(self, **kwargs): #pylint: disable=W0231
self.kwargs = kwargs
self.band1 = kwargs.get('band1', 'g')
self.band2 = kwargs.get('band2', 'r')
self.magnitude_bins = np.linspace(*kwargs.get('magnitude_bins', (-27, -18, 29)))
self.mass_bins = 10**np.array((kwargs.get('mass_bins', [13.5, 14.0, 14.5, 15.0, 15.5])))
self.z_bins = np.linspace(*kwargs.get('z_bins', (0.2, 1.0, 4)))
self.color_cut_fraction = float(kwargs.get('color_cut_fraction', 0.2))
self.color_cut_redshift = float(kwargs.get('color_cut_redshift', 0.2))
possible_Mag_fields = ('Mag_true_{}_lsst_z0',
'Mag_true_{}_lsst_z01',
'Mag_true_{}_des_z0',
'Mag_true_{}_des_z01',
'Mag_true_{}_sdss_z0',
'Mag_true_{}_sdss_z01',
)
self.possible_Mag1_fields = [f.format(self.band1) for f in possible_Mag_fields]
self.possible_Mag2_fields = [f.format(self.band2) for f in possible_Mag_fields]
self.n_magnitude_bins = len(self.magnitude_bins) - 1
self.n_mass_bins = len(self.mass_bins) - 1
self.n_z_bins = len(self.z_bins) - 1
self.dmag = self.magnitude_bins[1:] - self.magnitude_bins[:-1]
self.mag_center = (self.magnitude_bins[1:] + self.magnitude_bins[:-1])*0.5
def prepare_galaxy_catalog(self, gc):
quantities_needed = {'redshift_true', 'is_central', 'halo_mass'}
if gc.has_quantities(['truth/RHALO', 'truth/R200']):
gc.add_quantity_modifier('r_host', 'truth/RHALO', overwrite=True)
gc.add_quantity_modifier('r_vir', 'truth/R200', overwrite=True)
quantities_needed.add('r_host')
quantities_needed.add('r_vir')
absolute_magnitude1_field = gc.first_available(*self.possible_Mag1_fields)
absolute_magnitude2_field = gc.first_available(*self.possible_Mag2_fields)
quantities_needed.add(absolute_magnitude1_field)
quantities_needed.add(absolute_magnitude2_field)
if not (absolute_magnitude1_field and absolute_magnitude2_field and gc.has_quantities(quantities_needed)):
return
return absolute_magnitude1_field, absolute_magnitude2_field, quantities_needed
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
prepared = self.prepare_galaxy_catalog(catalog_instance)
if prepared is None:
return TestResult(skipped=True)
absolute_magnitude1_field, absolute_magnitude2_field, quantities_needed = prepared
# find out color cut threshold
color = []
for data in catalog_instance.get_quantities(
[absolute_magnitude1_field, absolute_magnitude2_field, 'redshift_true'],
filters=['redshift_true < 0.2'],
return_iterator=True,
):
color.append(data[absolute_magnitude1_field] - data[absolute_magnitude2_field])
color_cut_percentile_at = 100.0 * (1 - self.color_cut_fraction)
color_cut_thres = np.percentile(np.concatenate(color), color_cut_percentile_at)
del color
colnames = [absolute_magnitude2_field, 'halo_mass', 'redshift_true']
bins = (self.magnitude_bins, self.mass_bins, self.z_bins)
hist_cen = np.zeros((self.n_magnitude_bins, self.n_mass_bins, self.n_z_bins))
hist_sat = np.zeros_like(hist_cen)
cen_query = GCRQuery('is_central')
sat_query = ~GCRQuery('is_central')
if 'r_host' in quantities_needed and 'r_vir' in quantities_needed:
sat_query &= GCRQuery('r_host < r_vir')
for data in catalog_instance.get_quantities(
quantities_needed,
filters=['{} - {} > {}'.format(absolute_magnitude1_field, absolute_magnitude2_field, color_cut_thres)],
return_iterator=True,
):
cen_mask = cen_query.mask(data)
sat_mask = sat_query.mask(data)
data = np.stack((data[k] for k in colnames)).T
hist_cen += np.histogramdd(data[cen_mask], bins)[0]
hist_sat += np.histogramdd(data[sat_mask], bins)[0]
data = cen_mask = sat_mask = None
halo_counts = hist_cen.sum(axis=0)
clf = dict()
clf['sat'] = hist_sat / halo_counts
clf['cen'] = hist_cen / halo_counts
clf['tot'] = clf['sat'] + clf['cen']
self.make_plot(clf, catalog_name, os.path.join(output_dir, 'clf.png'))
return TestResult(inspect_only=True)
def make_plot(self, clf, name, save_to):
fig, ax = plt.subplots(self.n_mass_bins, self.n_z_bins, sharex=True, sharey=True, figsize=(12,10), dpi=100)
for i in range(self.n_z_bins):
for j in range(self.n_mass_bins):
ax_this = ax[j,i]
for k, ls in zip(('total', 'satellites', 'centrals'), ('-', ':', '--')):
ax_this.semilogy(self.mag_center, clf[k[:3]][:,j,i]/self.dmag, label=k, ls=ls)
ax_this.set_ylim(0.05, 50)
bins = self.mass_bins[j], self.mass_bins[j+1], self.z_bins[i], self.z_bins[i+1]
ax_this.text(-25, 10, '${:.1E}\\leq M <{:.1E}$\n${:g}\\leq z<{:g}$'.format(*bins))
ax_this.legend(loc='lower right', frameon=False, fontsize='medium')
ax = fig.add_subplot(111, frameon=False)
ax.tick_params(labelcolor='none', top='off', bottom='off', left='off', right='off')
ax.grid(False)
ax.set_ylabel(r'$\phi(M_{{{}}}\,|\,M_{{\rm vir}},z)\quad[{{\rm Mag}}^{{-1}}]$'.format(self.band2))
ax.set_xlabel(r'$M_{{{}}}\quad[{{\rm Mag}}]$'.format(self.band2))
ax.set_title(name)
fig.tight_layout()
fig.savefig(save_to)
plt.close(fig)
| 6,179
| 42.216783
| 115
|
py
|
descqa
|
descqa-master/descqa/stats.py
|
from __future__ import division
from builtins import range # pylint: disable=W0622
import numpy as np
from scipy.stats import chi2
def get_subvolume_indices(x, y, z, box_size, n_side):
side_size = box_size/n_side
return np.ravel_multi_index(np.floor(np.vstack((x, y, z))/side_size).astype(int), (n_side,)*3, 'wrap')
def jackknife(data, jack_indices, n_jack, func, full_args=(), full_kwargs={}, jack_args=(), jack_kwargs={}): # pylint: disable=W0102
if len(data) != len(jack_indices):
raise ValueError('`data` and `jack_indices` must have the same length')
if not np.in1d(jack_indices, np.arange(n_jack)).all():
raise ValueError('`jack_indices` must be an array of int between 0 to n_jack-1')
full = np.array(func(data, *full_args, **full_kwargs), dtype=np.float)
jack = []
for i in range(n_jack):
jack.append(func(data[jack_indices != i], *jack_args, **jack_kwargs))
jack = np.array(jack, dtype=np.float)
bias = (jack.mean(axis=0) - full)*(n_jack-1)
return full-bias, bias, np.cov(jack, rowvar=False, bias=True)*float(n_jack-1)
def chisq(difference, covariance, dof):
d = np.asarray(difference)
cov = np.asarray(covariance)
if cov.ndim == 1:
cov = np.diag(cov)
chisq_value = np.dot(d, np.dot(np.linalg.inv(cov), d))
return chisq_value, chi2.cdf(chisq_value, dof)
def Lp_norm(difference, p=2.0):
d = np.asarray(difference)
d **= p
return d.sum() ** (1.0/p)
def AD_statistic(n1, n2, y1, y2, threshold):
'''
Calculate the two-sample Anderson-Darling statistic from two CDFs;
n1, n2: number of objects in the two samples;
y1, y2: CDF y-values of the two distribution, and they should have
the same x-axis.
'''
n = n1+n2
h = (n1*y1+n2*y2)/n
# compute Anderson-Darling statistic
inv_weight = (h*(1-h))[:-1]
# remove infinities in the weight function
mask = (inv_weight<1e-5)
inv_weight[mask] = 1
ads = n1*n2/n * np.sum(((y2 - y1)[:-1])**2*(h[1:]-h[:-1])/inv_weight)
if ads<threshold:
success = True
else:
success = False
return ads, success
def CvM_statistic(n1, n2, x1, y1, x2, y2):
'''
Calculate the two-sample Cramer-von Mises statistic from two CDFs;
n1, n2: number of objects in the two samples;
y1, y2: CDF y-values of the two distribution, and they should have
the same x-axis.
'''
n = n1+n2
x_interp = np.linspace(-2, 5, 10000)
y1_interp = np.interp(x_interp, x1, y1)
y2_interp = np.interp(x_interp, x2, y2)
h = (n1*y1_interp+n2*y2_interp)/n
cvm_omega = np.sqrt(np.trapz((y2_interp-y1_interp)**2, x=h))
return cvm_omega
| 2,681
| 30.928571
| 132
|
py
|
descqa
|
descqa-master/descqa/SizeStellarMassLuminosity.py
|
from __future__ import print_function, division, unicode_literals, absolute_import
import os
import numpy as np
import re
from GCR import GCRQuery
from scipy import interpolate
from scipy.stats import binned_statistic
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from .base import BaseValidationTest, TestResult
from .plotting import plt
__all__ = ['SizeStellarMassLuminosity']
def redshift2dist(cosmology):
z = np.arange(0, 5.1, 0.5)
comov_d = cosmology.comoving_distance(z).to('kpc').value
spl = interpolate.splrep(z, comov_d)
return spl
class SizeStellarMassLuminosity(BaseValidationTest):
"""
Validation test of 2pt correlation function
"""
_ARCSEC_TO_RADIAN = np.pi / 180. / 3600.
def __init__(self, **kwargs):
#pylint: disable=W0231
self.kwargs = kwargs
self.observation = kwargs['observation']
self.possible_mag_fields = kwargs['possible_mag_fields']
self.possible_native_luminosities = kwargs['possible_native_luminosities']
self.use_mag = kwargs.get('use_mag', False)
self.test_name = kwargs['test_name']
self.data_label = kwargs['data_label']
self.z_bins = kwargs['z_bins']
self.output_filename_template = kwargs['output_filename_template']
self.label_template = kwargs['label_template']
self.fig_xlabel = kwargs['fig_xlabel']
self.fig_ylabel = kwargs['fig_ylabel']
self.chisq_max = kwargs['chisq_max']
self.truncate_cat_name = kwargs.get('truncate_cat_name', False)
self.title_in_legend = kwargs.get('title_in_legend', False)
self.font_size = kwargs.get('font_size', 16)
self.legend_size = kwargs.get('legend_size', 11)
self.legend_location = kwargs.get('legend_location', 'best')
self.survey_label = kwargs.get('survey_label', '')
self.no_title = kwargs.get('no_title', False)
self.ncolumns = kwargs.get('ncolumns', 3)
self.nrows = kwargs.get('nrows', 2)
self.fig_xlim = kwargs.get('fig_xlim', None)
self.fig_ylim = kwargs.get('fig_ylim', None)
if self.fig_xlim is not None:
self.fig_xlim = [float(x) for x in self.fig_xlim]
if self.fig_ylim is not None:
self.fig_ylim = [float(y) for y in self.fig_ylim]
validation_filepath = os.path.join(self.data_dir, kwargs['data_filename'])
self.validation_data = np.genfromtxt(validation_filepath)
if len(self.survey_label) == 0 and self.no_title:
self.survey_label = self.data_label
self.fig_y = 3*self.nrows + 1
self.fig_x = 3*self.ncolumns + 1
@staticmethod
def ConvertAbsMagLuminosity(AbsM, band):
'''AbsM: absolute magnitude, band: filter (V, CFHT_I, 'i' in AB, rest in Vega)'''
AbsM = np.asarray(AbsM)
bands = {'U':5.61, 'B':5.48, 'V':4.80, 'R':4.42, 'I':4.08,
'J':3.64, 'H':3.32, 'K':3.28, 'g':5.33, 'r':4.67,
'i':4.52, 'z':4.42, 'F300W':6.09, 'F450W':5.32, 'F555W':4.85,
'F606W':4.66, 'F702W':4.32, 'F814W':4.15, 'CFHT_U':5.57,
'CFHT_B':5.49, 'CFHT_V':4.81, 'CFHT_R':4.44, 'CFHT_I':4.51,
'NIRI_J':3.64, 'NIRI_H':3.33, 'NIRI_K':3.29}
if band in bands.keys():
AbsSun = bands[band]
else:
raise ValueError('Filter not implemented')
logL = (AbsSun - AbsM) / 2.5 #unit of sun
return logL
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
'''
Loop over magnitude cuts and make plots
'''
# load catalog data
spl = redshift2dist(catalog_instance.cosmology)
colnames = dict()
colnames['z'] = catalog_instance.first_available('redshift', 'redshift_true')
xname = 'mag' if self.use_mag else 'lum'
if self.use_mag:
colnames[xname] = catalog_instance.first_available(*self.possible_mag_fields)
else:
colnames[xname] = catalog_instance.first_available(*self.possible_native_luminosities)
if self.observation == 'onecomp':
colnames['size'] = catalog_instance.first_available('size', 'size_true')
elif self.observation == 'twocomp':
colnames['size_bulge'] = catalog_instance.first_available('size_bulge', 'size_bulge_true')
colnames['size_disk'] = catalog_instance.first_available('size_disk', 'size_disk_true')
# parse filter names
if self.use_mag:
filtername = colnames['mag'].split('_')[(-1 if colnames['mag'].startswith('m') else -2)].upper()
band = colnames['mag'].split('_')[(2 if 'true' in colnames['mag'] else 1)]
else:
filtername = ''
band = colnames[xname].split(':')[(-3 if colnames[xname].endswith('s') else -2)].upper()
if colnames[xname].startswith('m'):
filter_id = '{} {}'.format(filtername, band) if filtername != band else band
elif colnames[xname].startswith('M'):
filter_id = '{} $M_{}$'.format(filtername, band) if filtername != band else band
else:
filter_id = ''
if not all(v for v in colnames.values()):
return TestResult(skipped=True, summary='Missing requested quantities')
#Check whether the columns are finite or not
filters = [(np.isfinite, c) for c in colnames.values()]
if self.truncate_cat_name:
catalog_name = re.split('_', catalog_name)[0]
#Select objects within maximum and minimum redshift of all the bins
filters.extend((
'{} < {}'.format(colnames['z'], max(z_bin['z_max'] for z_bin in self.z_bins)),
'{} >= {}'.format(colnames['z'], min(z_bin['z_min'] for z_bin in self.z_bins)),
))
catalog_data = catalog_instance.get_quantities(list(colnames.values()), filters=filters)
catalog_data = {k: catalog_data[v] for k, v in colnames.items()}
fig, axes = plt.subplots(self.nrows, self.ncolumns, figsize=(self.fig_x, self.fig_y), sharex=True, sharey=True)
list_of_validation_values = []
try:
for z_bin, ax in zip_longest(self.z_bins, axes.flat):
# filter catalog data for this bin
if z_bin is None:
ax.set_visible(False)
continue
filters = [
'z < {}'.format(z_bin['z_max']),
'z >= {}'.format(z_bin['z_min']),
]
z_mean = (z_bin['z_max'] + z_bin['z_min']) / 2.
z_width = (z_bin['z_max'] - z_bin['z_min']) / 2.
legend_title = self.label_template.format(z_bin['z_min'], z_bin['z_max'])
catalog_data_this = GCRQuery(*filters).filter(catalog_data)
maskz = (self.validation_data[:,0] < z_mean + z_width) & (self.validation_data[:,0] > z_mean - z_width)
maskL = (self.validation_data[:,1] > 0.)
validation_this = self.validation_data[(maskz) & (maskL)]
if len(catalog_data_this['z']) == 0 or len(validation_this) == 0:
ax.set_visible(False)
continue
output_filepath = os.path.join(output_dir, self.output_filename_template.format(catalog_name, z_bin['z_min'], z_bin['z_max']))
colors = ['r', 'b']
default_L_bin_edges = np.array([9, 9.5, 10, 10.5, 11, 11.5])
default_L_bins = (default_L_bin_edges[1:] + default_L_bin_edges[:-1]) / 2.
if self.observation == 'onecomp':
if self.use_mag:
logL_G = self.ConvertAbsMagLuminosity(catalog_data_this['mag'], band)
else:
logL_G = self.ConvertAbsMagLuminosity(-2.5*np.log10(catalog_data_this['lum']), band)
size_kpc = catalog_data_this['size'] * self._ARCSEC_TO_RADIAN * interpolate.splev(catalog_data_this['z'], spl) / (1 + catalog_data_this['z'])
binned_size_kpc = binned_statistic(logL_G, size_kpc, bins=default_L_bin_edges, statistic='mean')[0]
binned_size_kpc_err = binned_statistic(logL_G, size_kpc, bins=default_L_bin_edges, statistic='std')[0]
heading = 'Luminosity Size (kpc), Size Error (kpc)'
np.savetxt(output_filepath, np.transpose((default_L_bins, binned_size_kpc, binned_size_kpc_err)), fmt='%11.4e', header=heading)
ax.semilogy(validation_this[:,1], 10**validation_this[:,2], label=self.survey_label)
ax.fill_between(validation_this[:,1], 10**validation_this[:,3], 10**validation_this[:,4], lw=0, alpha=0.2)
ax.errorbar(default_L_bins, binned_size_kpc, binned_size_kpc_err, marker='o', ls='', label=' '.join([catalog_name, filter_id]))
validation = self.compute_chisq(default_L_bins, binned_size_kpc, binned_size_kpc_err,
validation_this[:,1], 10**validation_this[:,2])
list_of_validation_values.append(validation)
elif self.observation == 'twocomp':
logL_I = self.ConvertAbsMagLuminosity(catalog_data_this['mag'], band)
arcsec_to_kpc = self._ARCSEC_TO_RADIAN * interpolate.splev(catalog_data_this['z'], spl) / (1 + catalog_data_this['z'])
binned_bulgesize_kpc = binned_statistic(logL_I, catalog_data_this['size_bulge'] * arcsec_to_kpc, bins=default_L_bin_edges, statistic='mean')[0]
binned_bulgesize_kpc_err = binned_statistic(logL_I, catalog_data_this['size_bulge'] * arcsec_to_kpc, bins=default_L_bin_edges, statistic='std')[0]
binned_disksize_kpc = binned_statistic(logL_I, catalog_data_this['size_disk'] * arcsec_to_kpc, bins=default_L_bin_edges, statistic='mean')[0]
binned_disksize_kpc_err = binned_statistic(logL_I, catalog_data_this['size_disk'] * arcsec_to_kpc, bins=default_L_bin_edges, statistic='std')[0]
binned_bulgesize_kpc = np.nan_to_num(binned_bulgesize_kpc)
binned_bulgesize_kpc_err = np.nan_to_num(binned_bulgesize_kpc_err)
binned_disksize_kpc = np.nan_to_num(binned_disksize_kpc)
binned_disksize_kpc_err = np.nan_to_num(binned_disksize_kpc_err)
heading = 'Luminosity Bulge-Size (kpc), Bulge Error (kpc) Disk-Size (kpc), Disk Error (kpc)'
np.savetxt(output_filepath, np.transpose((default_L_bins, binned_bulgesize_kpc, binned_bulgesize_kpc_err,
binned_disksize_kpc, binned_disksize_kpc_err)), fmt='%11.4e', header=heading)
ax.semilogy(validation_this[:,1], validation_this[:,2], label=' '.join([self.survey_label, 'Bulge']), color=colors[0])
ax.fill_between(validation_this[:,1], validation_this[:,2] + validation_this[:,4],
validation_this[:,2] - validation_this[:,4], lw=0, alpha=0.2, facecolor=colors[0])
ax.semilogy(validation_this[:,1] + 0.2, validation_this[:,3], label=' '.join([self.survey_label, 'Disk']), color=colors[1])
ax.fill_between(validation_this[:,1] + 0.2, validation_this[:,3] + validation_this[:,5],
validation_this[:,3] - validation_this[:,5], lw=0, alpha=0.2, facecolor=colors[1])
ax.errorbar(default_L_bins, binned_bulgesize_kpc, binned_bulgesize_kpc_err, marker='o', ls='',
c=colors[0], label=' '.join([catalog_name, filter_id, 'Bulge']))
ax.errorbar(default_L_bins+0.2, binned_disksize_kpc, binned_disksize_kpc_err, marker='o', ls='',
c=colors[1], label=' '.join([catalog_name, filter_id, 'Disk']))
ax.set_yscale('log', nonposy='clip')
validation_bulge = self.compute_chisq(default_L_bins, binned_bulgesize_kpc, binned_bulgesize_kpc_err,
validation_this[:,1], validation_this[:,2])
validation_disk = self.compute_chisq(default_L_bins, binned_disksize_kpc, binned_disksize_kpc_err,
validation_this[:,1]+0.2, validation_this[:,3])
list_of_validation_values.append([validation_bulge, validation_disk])
del catalog_data_this
if self.fig_xlim is not None:
ax.set_xlim(self.fig_xlim)
if self.fig_xlim is not None:
ax.set_ylim(self.fig_ylim)
ax.legend(loc=self.legend_location, title=legend_title, fontsize=self.legend_size)
ax.tick_params(labelbottom=True, direction='in', which='both')
for axlabel in ax.get_xticklabels():
axlabel.set_visible(True)
ax.set_xlabel(self.fig_xlabel, size=self.font_size)
# center axis labels
fig.text(0.05, 0.5, self.fig_ylabel, fontsize=self.font_size, ha="center", va="center", rotation=90)
#fig.text(0.5, 0.05, self.fig_xlabel, fontsize=self.font_size, ha="center", va="center")
fig.subplots_adjust(hspace=0, wspace=0)
if not self.no_title:
fig.suptitle('{} vs. {}'.format(catalog_name, self.data_label), fontsize='medium', y=0.93)
finally:
fig.savefig(os.path.join(output_dir, '{:s}.png'.format(self.test_name)), bbox_inches='tight')
plt.close(fig)
allpass = True
for validation_val, zbin in zip(list_of_validation_values, self.z_bins):
if hasattr(validation_val, '__iter__'):
print("Redshift bin {}-{}: bulge chi-square/dof: {}, disk chi-square/dof: {}.".format(
zbin['z_min'], zbin['z_max'], validation_val[0], validation_val[1]))
if validation_val[0] > self.chisq_max:
print("Chi-square/dof with respect to validation data is too large for bulges in redshift bin {}-{}".format(
zbin['z_min'], zbin['z_max']))
allpass = False
if validation_val[1] > self.chisq_max:
print("Chi-square/dof with respect to validation data is too large for disks in redshift bin {}-{}".format(
zbin['z_min'], zbin['z_max']))
allpass = False
else:
print("Redshift bin {}-{}: chi-square/dof: {}.".format(
zbin['z_min'], zbin['z_max'], validation_val))
if validation_val > self.chisq_max:
print("Chi-square/dof with respect to validation data is too large for redshift bin {}-{}".format(
zbin['z_min'], zbin['z_max']))
allpass = False
#TODO: calculate summary statistics
return TestResult(score=np.mean(list_of_validation_values), passed=allpass)
def compute_chisq(self, bins, binned_data, binned_err, validation_points, validation_data):
if np.any(validation_data==0):
mask = validation_data!=0
validation_points = validation_points[mask]
validation_data = validation_data[mask]
if len(validation_points)>0 and validation_points[-1]<validation_points[0]:
validation_points = validation_points[::-1]
validation_data = validation_data[::-1]
if len(validation_points)>1:
validation_at_binpoints = interpolate.CubicSpline(validation_points, validation_data)(bins)
else:
validation_at_binpoints = binned_data #force chi-sq to zero if no validation data
weights = 1./binned_err**2
return np.sum(weights*(validation_at_binpoints-binned_data)**2)/len(weights)
| 16,283
| 56.744681
| 166
|
py
|
descqa
|
descqa-master/descqa/shear_test.py
|
from __future__ import unicode_literals, absolute_import, division
import os
import time
import numpy as np
from scipy.interpolate import interp1d
from scipy.integrate import quad
from sklearn.cluster import k_means
import treecorr
import pyccl as ccl
import camb
import camb.correlations
import astropy.units as u
import astropy.constants as const
from astropy.cosmology import WMAP7 # pylint: disable=no-name-in-module
from GCR import GCRQuery
from .base import BaseValidationTest, TestResult
from .plotting import plt
pars = camb.CAMBparams()
__all__ = ['ShearTest']
class ShearTest(BaseValidationTest):
"""
Validation test for shear and convergence quantities
"""
def __init__(self,
z='redshift_true',
ra='ra',
dec='dec',
e1='shear_1',
e2='shear_2_phosim',
mag='Mag_true_r_sdss_z0',
maglim=-19.0,
kappa='convergence',
nbins=20,
min_sep=2.5,
max_sep=250,
sep_units='arcmin',
bin_slop=0.1,
zlo=0.5,
zhi=0.6,
ntomo=2,
z_range=0.05,
do_jackknife=False,
N_clust=10,
**kwargs):
#pylint: disable=W0231
self.axsize = kwargs.get('axsize', 17)
self.title_size = kwargs.get('title_size', 18)
self.legend_size = kwargs.get('legend_size', 15)
self.truncate_cat_name = kwargs.get('truncate_cat_name', True)
self.z = z
#sep-bounds and binning
self.min_sep = min_sep
self.max_sep = max_sep
self.nbins = nbins
self.sep_bins = np.linspace(min_sep, max_sep, nbins + 1)
self.sep_units = sep_units
self.bin_slop = bin_slop
self.ra = ra
self.dec = dec
self.mag = mag
self.maglim = maglim
self.e1 = e1
self.e2 = e2
self.kappa = kappa
self.N_clust = N_clust
self.do_jackknife = do_jackknife
# cut in redshift
self.summary_fig, self.summary_ax = plt.subplots(nrows=2, ncols=ntomo, sharex=True, squeeze=False, figsize=(ntomo*5, 5))
self.ntomo = ntomo
self.z_range = z_range
self.zlo = zlo
self.zhi = zhi
self.zmeans = np.linspace(self.zlo, self.zhi, self.ntomo+2)[1:-1]
def compute_nz(self, n_z):
'''create interpolated n(z) distribution'''
z_bins = np.linspace(self.zlo, self.zhi, 301)
n = np.histogram(n_z, bins=z_bins)[0]
z = (z_bins[1:] - z_bins[:-1]) / 2. + z_bins[:-1]
n2 = interp1d(z, n, bounds_error=False, fill_value=0.0, kind='cubic')
n2_sum = quad(n2, self.zlo, self.zhi)[0]
n2 = interp1d(z, n / n2_sum, bounds_error=False, fill_value=0.0, kind='cubic')
return n2
def theory_corr(self, n_z2, xvals, lmax2, chi_max, zlo2, zhi2, cosmo_cat):
'''compute the correlation function from limber integration over the CAMB power spectrum'''
nz_int = self.compute_nz(n_z2)
z_vals = np.linspace(zlo2,zhi2,1000)
n_vals = nz_int(z_vals)
ns = getattr(cosmo_cat, 'n_s', 0.963)
s8 = getattr(cosmo_cat, 'sigma8', 0.8)
Omega_c = (cosmo_cat.Om0 - cosmo_cat.Ob0)
Omega_b = cosmo_cat.Ob0
h = cosmo_cat.H0.value/100.
cosmo_ccl = ccl.Cosmology(Omega_c=Omega_c, Omega_b=Omega_b, h=h, sigma8 = s8, n_s = ns)#, transfer_function='boltzmann_class', matter_power_spectrum='emu')
ll = np.arange(0, 15000)
lens1 = ccl.WeakLensingTracer(cosmo_ccl, dndz=(z_vals, n_vals))
pp = ccl.angular_cl(cosmo_ccl, lens1, lens1, ll)
pp3_2 = np.zeros((lmax2, 4))
pp3_2[:, 1] = pp[:] * (ll * (ll + 1.)) / (2. * np.pi)
cxvals = np.cos(xvals / (60.) / (180. / np.pi))
vals = camb.correlations.cl2corr(pp3_2, cxvals)
return xvals, vals[:, 1], vals[:, 2]
def get_score(self, measured, theory, cov, opt='diagonal'):
if opt == 'cov':
cov = np.matrix(cov).I
print("inverse covariance matrix")
print(cov)
chi2 = np.matrix(measured - theory) * cov * np.matrix(measured - theory).T
elif opt == 'diagonal':
chi2 = np.sum([(measured[i] - theory[i])**2 / cov[i][i] for i in range(len(measured))])
else:
chi2 = np.sum([(measured[i] - theory[i])**2 / theory[i]**2 for i in range(len(measured))])
diff = chi2 / float(len(measured))
return diff
def jackknife(self, catalog_data, xip, xim, mask):
" computing jack-knife covariance matrix using K-means clustering"
#k-means clustering to define areas
#NOTE: This is somewhat deprecated, the jack-knifing takes too much effort to find appropriately accurate covariance matrices.
# If you want to use this, do a quick convergence check and some timing tests on small N_clust values (~5 to start) first.
# note also that this is comparing against the (low) variance in the catalog which might not be a great comparison -no shape noise
N_clust = self.N_clust
nn = np.stack((catalog_data[self.ra][mask], catalog_data[self.dec][mask]), axis=1)
_, labs, _ = k_means(n_clusters=N_clust, random_state=0, X=nn) # check random state
print("computing jack-knife errors")
time_jack = time.time()
# jack-knife code
xip_jack = []
xim_jack = []
gg = treecorr.GGCorrelation(
nbins=self.nbins,
min_sep=self.min_sep,
max_sep=self.max_sep,
sep_units='arcmin',
bin_slop=self.bin_slop,
verbose=True)
for i in range(N_clust):
##### shear computation excluding each jack-knife region
mask_jack = (labs != i)
cat_s = treecorr.Catalog(
ra=catalog_data[self.ra][mask][mask_jack],
dec=catalog_data[self.dec][mask][mask_jack],
g1=catalog_data[self.e1][mask][mask_jack] - np.mean(catalog_data[self.e1][mask][mask_jack]),
g2=-(catalog_data[self.e2][mask][mask_jack] - np.mean(catalog_data[self.e2][mask][mask_jack])),
ra_units='deg',
dec_units='deg')
gg.process(cat_s)
xip_jack.append(gg.xip)
xim_jack.append(gg.xim)
## debugging outputs
print("xip_jack")
print(i)
print(gg.xip)
print("time = " + str(time.time() - time_jack))
### assign covariance matrix - loop is poor python syntax but compared to the time taken for the rest of the test doesn't really matter
cp_xip = np.zeros((self.nbins, self.nbins))
for i in range(self.nbins):
for j in range(self.nbins):
for k in range(N_clust):
cp_xip[i][j] += N_clust/(N_clust - 1.) * (xip[i] - xip_jack[k][i] * 1.e6) * (
xip[j] - xip_jack[k][j] * 1.e6)
cp_xim = np.zeros((self.nbins, self.nbins))
for i in range(self.nbins):
for j in range(self.nbins):
for k in range(N_clust):
cp_xim[i][j] += N_clust/(N_clust - 1.) * (xim[i] - xim_jack[k][i] * 1.e6) * (
xim[j] - xim_jack[k][j] * 1.e6)
return cp_xip, cp_xim
@staticmethod
def get_catalog_data(gc, quantities, filters=None):
'''
Get quantities from catalog
'''
data = {}
if not gc.has_quantities(quantities):
return TestResult(skipped=True, summary='Missing requested quantities')
data = gc.get_quantities(quantities, filters=filters)
#make sure data entries are all finite
data = GCRQuery(*((np.isfinite, col) for col in data)).filter(data)
return data
# define theory from within this class
def post_process_plot(self, ax, fig):
'''
Post-processing routines on plot
'''
# vmin and vmax are very rough DES-like limits (maximum and minimum scales)
for i in range(self.ntomo):
for ax_this, vmin, vmax in zip(ax[:, i], (2.5, 35), (200, 200)):
ax_this.set_xscale('log')
ax_this.axvline(vmin, ls='--', c='k')
ax_this.axvline(vmax, ls='--', c='k')
ax[-1][i].set_xlabel(r'$\theta \; {\rm (arcmin)}$', size=self.axsize)
ax[0][i].set_title('z = {:.2f}'.format(self.zmeans[i]), size=self.title_size)
ax[0][i].legend(fontsize=self.legend_size, frameon=True)
ax[-1][i].legend(fontsize=self.legend_size, frameon=True)
ax[0][0].set_ylabel(r'$\xi_{{{}}} \; (10^{{-6}})$'.format('+'), size=self.axsize)
ax[-1][0].set_ylabel(r'$\xi_{{{}}} \; (10^{{-6}})$'.format('-'), size=self.axsize)
fig.subplots_adjust(hspace=0)
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
'''
run test on a single catalog
'''
# check if needed quantities exist
if not catalog_instance.has_quantities([self.z, self.ra, self.dec]):
return TestResult(skipped=True, summary='do not have needed location quantities')
if not catalog_instance.has_quantities([self.e1, self.e2, self.kappa]):
return TestResult(skipped=True, summary='do not have needed shear quantities')
if not catalog_instance.has_quantities([self.mag]):
return TestResult(skipped=True, summary='do not have required magnitude quantities for cuts')
cosmo = getattr(catalog_instance, 'cosmology', WMAP7)
ntomo = self.ntomo
fig, ax = plt.subplots(nrows=2, ncols=ntomo, sharex=True, squeeze=False, figsize=(ntomo*5, 5))
zmeans = np.linspace(self.zlo, self.zhi, ntomo+2)[1:-1]
#zmeans = np.linspace(self.zlo, zhi, ntomo+2)[1:-1]
if self.truncate_cat_name:
catalog_name = catalog_name.partition('_')[0]
for ii in range(ntomo):
z_mean = zmeans[ii]
zlo2 = z_mean - self.z_range
zhi2 = z_mean + self.z_range
print(zlo2, zhi2)
filter_tomo = [(lambda z: (z > zlo2) & (z < zhi2), self.z)]
catalog_data = self.get_catalog_data(
catalog_instance, [self.z, self.ra, self.dec, self.e1, self.e2, self.kappa, self.mag], filters=filter_tomo)
# before this made sense since it was for the full catalog but now it doesnt for each tomo bin.
#z_max = np.max(catalog_data[self.z])
#if self.zhi>z_max:
# print("updating zhi to "+ str(z_max)+ " from "+ str(self.zhi))
# self.zhi = z_max
# zhi = z_max
#else:
# zhi = self.zhi
chi_max = cosmo.comoving_distance(self.zhi+1.0).value
mask = (catalog_data[self.mag][:]<self.maglim)
# read in shear values and check limits
max_e1 = np.max(catalog_data[self.e1])
min_e1 = np.min(catalog_data[self.e1])
max_e2 = np.max(catalog_data[self.e2])
min_e2 = np.min(catalog_data[self.e2])
if ((min_e1 < (-1.)) or (max_e1 > 1.0)):
return TestResult(skipped=True, summary='e1 values out of range [-1,+1]')
if ((min_e2 < (-1.)) or (max_e2 > 1.0)):
return TestResult(skipped=True, summary='e2 values out of range [-1,+1]')
# compute shear auto-correlation
cat_s = treecorr.Catalog(
ra=catalog_data[self.ra][mask],
dec=catalog_data[self.dec][mask],
g1=catalog_data[self.e1][mask] - np.mean(catalog_data[self.e1][mask]),
g2=-(catalog_data[self.e2][mask] - np.mean(catalog_data[self.e2][mask])),
ra_units='deg',
dec_units='deg')
gg = treecorr.GGCorrelation(
nbins=self.nbins,
min_sep=self.min_sep,
max_sep=self.max_sep,
sep_units='arcmin',
bin_slop=self.bin_slop,
verbose=True)
gg.process(cat_s)
r = np.exp(gg.meanlogr)
#NOTE: We are computing 10^6 x correlation function for easier comparison
xip = gg.xip * 1.e6
xim = gg.xim * 1.e6
print("npairs = ")
print(gg.npairs)
do_jackknife = self.do_jackknife
# Diagonal covariances for error bars on the plots. Use full covariance matrix for chi2 testing.
if do_jackknife:
cp_xip, cp_xim = self.jackknife(catalog_data, xip, xim, mask)
print(cp_xip)
sig_jack = np.zeros((self.nbins))
sigm_jack = np.zeros((self.nbins))
for i in range(self.nbins):
sig_jack[i] = np.sqrt(cp_xip[i][i])
sigm_jack[i] = np.sqrt(cp_xim[i][i])
else:
sig_jack = np.zeros((self.nbins))
sigm_jack = np.zeros((self.nbins))
for i in range(self.nbins):
sig_jack[i] = np.sqrt(gg.varxip[i])*1.e6
sigm_jack[i] = np.sqrt(gg.varxip[i])*1.e6
n_z = catalog_data[self.z][mask]
cosmo_cat = getattr(catalog_instance, 'cosmology', WMAP7)
xvals, theory_plus, theory_minus = self.theory_corr(n_z, r, 15000, chi_max,zlo2,zhi2, cosmo_cat)
theory_plus = theory_plus * 1.e6
theory_minus = theory_minus * 1.e6
if do_jackknife:
chi2_dof_1 = self.get_score(xip, theory_plus, cp_xip, opt='diagonal') #NOTE: correct this to opt=cov if you want full covariance matrix
else:
chi2_dof_1 = self.get_score(xip, theory_plus, 0, opt='nojack') # correct this
print(theory_plus)
print(theory_minus)
print(xip)
print(xim)
print(r)
print(xvals)
#The following are further treecorr correlation functions that could be added in later to extend the test
#treecorr.NNCorrelation(nbins=20, min_sep=2.5, max_sep=250, sep_units='arcmin')
#treecorr.NGCorrelation(nbins=20, min_sep=2.5, max_sep=250, sep_units='arcmin') # count-shear (i.e. <gamma_t>(R))
#treecorr.NKCorrelation(nbins=20, min_sep=2.5, max_sep=250, sep_units='arcmin') # count-kappa (i.e. <kappa>(R))
#treecorr.KKCorrelation(nbins=20, min_sep=2.5, max_sep=250, sep_units='arcmin') # count-kappa (i.e. <kappa>(R))
for ax_this in (ax, self.summary_ax):
ax_this[0, ii].errorbar(r, xip, sig_jack, lw=0.6, marker='o', ls='', color="#3f9b0b", label=r'$\xi_{+}$ ' + catalog_name)
ax_this[0, ii].plot(xvals, theory_plus, 'o', color="#9a0eea", label=r'$\xi_{+}$' + " theory")
ax_this[1, ii].errorbar(r, xim, sigm_jack, lw=0.6, marker='o', ls='', color="#3f9b0b", label=r'$\xi_{-}$ ' + catalog_name)
ax_this[1, ii].plot(xvals, theory_minus, 'o', color="#9a0eea", label=r'$\xi_{-}$' + " theory")
results = {'theta':r, 'xip ':xip, 'xim ':xim, 'theta_theory':xvals, 'xip_theory':theory_plus, 'xim_theory':theory_minus, 'npairs':gg.npairs}
if do_jackknife:
results['xip_err'] = sig_jack
results['xim_err'] = sigm_jack
#save results for catalog and validation data in txt files
filelabel = 'z_{:.2f}'.format(self.zmeans[ii])
theory_keys = [k for k in results.keys() if 'theory' in k]
keys = ['theta'] + [k for k in results.keys() if 'xi' in k and 'theory' not in k] + theory_keys + ['npairs']
with open(os.path.join(output_dir, 'Shear_vs_theta_' + filelabel + '.txt'), 'ab') as f_handle: #open file in append binary mode
self.save_quantities(keys, results, f_handle, comment='z = {:.2f}'.format(self.zmeans[ii]))
self.post_process_plot(ax, fig)
fig.savefig(os.path.join(output_dir, 'plot.png'))
plt.close(fig)
score = chi2_dof_1 #calculate your summary statistics
#TODO: This criteria for the score is effectively a placeholder if jackknifing isn't used and assumes a diagonal covariance if it is
# Proper validation criteria need to be assigned to this test
if score < 2:
return TestResult(score, inspect_only=True)
else:
return TestResult(score, passed=False)
@staticmethod
def save_quantities(keys, results, filename, comment=''):
header = 'Data columns for {} are:\n {}'.format(comment, ' '.join(keys))
np.savetxt(filename, np.vstack((results[k] for k in keys)).T, fmt='%12.4e', header=header)
def conclude_test(self, output_dir):
self.post_process_plot(self.summary_ax, self.summary_fig)
self.summary_fig.savefig(os.path.join(output_dir, 'summary.png'))
plt.close(self.summary_fig)
| 17,211
| 42.574684
| 163
|
py
|
descqa
|
descqa-master/descqa/ColorDistribution.py
|
from __future__ import unicode_literals, absolute_import, division
import os
import re
import numpy as np
import numexpr as ne
from astropy.table import Table
from scipy.ndimage.filters import uniform_filter1d
from GCR import GCRQuery
from .base import BaseValidationTest, TestResult
from .plotting import plt
from .stats import CvM_statistic
find_first_true = np.argmax
__all__ = ['ColorDistribution']
# Transformations of DES -> SDSS and DES -> CFHT are derived from Equations A9-12 and
# A19-22 the paper: arxiv.org/abs/1708.01531
# Transformations of SDSS -> CFHT are from:
# www1.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/community/CFHTLS-SG/docs/extra/filters.html
color_transformation = {'des2sdss': {}, 'des2cfht': {}, 'sdss2cfht': {}, 'lsst2cfht': {}, 'lsst2sdss':{}}
color_transformation['des2sdss']['g'] = '1.10421 * g - 0.104208 * r'
color_transformation['des2sdss']['r'] = '0.102204 * g + 0.897796 * r'
color_transformation['des2sdss']['i'] = '1.30843 * i - 0.308434 * z'
color_transformation['des2sdss']['z'] = '0.103614 * i + 0.896386 * z'
color_transformation['des2cfht']['g'] = '0.945614 * g + 0.054386 * r'
color_transformation['des2cfht']['r'] = '0.0684211 * g + 0.931579 * r'
color_transformation['des2cfht']['i'] = '1.18646 * i - 0.186458 * z'
color_transformation['des2cfht']['z'] = '0.144792 * i + 0.855208 * z'
color_transformation['sdss2cfht']['u'] = 'u - 0.241 * (u - g)'
color_transformation['sdss2cfht']['g'] = 'g - 0.153 * (g - r)'
color_transformation['sdss2cfht']['r'] = 'r - 0.024 * (g - r)'
color_transformation['sdss2cfht']['i'] = 'i - 0.085 * (r - i)'
color_transformation['sdss2cfht']['z'] = 'z + 0.074 * (i - z)'
class ColorDistribution(BaseValidationTest):
"""
Compare the mock galaxy color distribution with a validation catalog
"""
colors = ['u-g', 'g-r', 'r-i', 'i-z']
summary_output_file = 'summary.txt'
plot_pdf_file = 'plot_pdf.png'
plot_cdf_file = 'plot_cdf.png'
summary_pdf = 'summary.png'
summary_cdf = 'summary_cdf.png'
default_colors = ['orange', 'g', 'm', 'r', 'navy', 'y', 'purple', 'gray', 'c',\
'blue', 'violet', 'coral', 'gold', 'orchid', 'maroon', 'tomato', \
'sienna', 'chartreuse', 'firebrick', 'SteelBlue']
validation_color = 'black'
def __init__(self, **kwargs): # pylint: disable=W0231
# load test config options
self.kwargs = kwargs
self.obs_r_mag_limit = kwargs.get('obs_r_mag_limit', None)
self.lightcone = kwargs.get('lightcone', True)
if self.lightcone:
self.zlo = kwargs['zlo']
self.zhi = kwargs['zhi']
self.validation_catalog = kwargs.get('validation_catalog', None)
self.plot_pdf_q = kwargs.get('plot_pdf_q', True)
self.plot_cdf_q = kwargs.get('plot_cdf_q', True)
self.color_transformation_q = kwargs.get('color_transformation_q', True)
self.Mag_r_limit = kwargs.get('Mag_r_limit', None)
self.rest_frame = kwargs.get('rest_frame', bool(self.Mag_r_limit and not self.obs_r_mag_limit))
self.use_lsst = kwargs.get('use_lsst', False)
self.exclude_agn = kwargs.get('exclude_agn', False)
self.plot_shift = kwargs.get('plot_shift', True)
self.truncate_cat_name = kwargs.get('truncate_cat_name', False)
self.replace_cat_name = kwargs.get('replace_cat_name', {})
self.title_in_legend = kwargs.get('title_in_legend', False)
self.legend_location = kwargs.get('legend_location', 'upper left')
self.skip_statistic = kwargs.get('skip_statistic', False)
self.font_size = kwargs.get('font_size', 16)
self.legend_size = kwargs.get('legend_size', 10)
self.shorten_cat_name = kwargs.get('shorten_cat_name', True)
self.adjust_ylim = kwargs.get('adjust_ylim', 1.25)
self.suptitle_y = kwargs.get('suptitle_y', 1.0)
self.title_size = kwargs.get('title_size', 16)
self.add_suptitle = kwargs.get('add_suptitle', False)
# bins of color distribution
self.bins = np.linspace(-1, 4, 2000)
self.binsize = self.bins[1] - self.bins[0]
# Load validation catalog and define catalog-specific properties
self.sdss_path = os.path.join(self.external_data_dir, 'rongpu', 'SpecPhoto_sdss_mgs_extinction_corrected.fits')
self.deep2_path = os.path.join(self.external_data_dir, 'rongpu', 'DEEP2_uniq_Terapix_Subaru_trimmed_wights_added.fits')
# Load validation catalog and define catalog-specific properties
if self.validation_catalog == 'SDSS':
obs_path = self.sdss_path
obscat = Table.read(obs_path)
obs_translate = {'u':'modelMag_u', 'g':'modelMag_g', 'r':'modelMag_r', 'i':'modelMag_i', 'z':'modelMag_z'}
obs_zcol = 'z'
weights = None
elif self.validation_catalog == 'DEEP2':
obs_path = self.deep2_path
obscat = Table.read(obs_path)
obs_translate = {'u':'u_apercor', 'g':'g_apercor', 'r':'r_apercor', 'i':'i_apercor', 'z':'z_apercor'}
obs_zcol = 'zhelio'
weights = 1/np.array(obscat['p_onmask'])
elif self.validation_catalog is not None:
raise ValueError('Validation catalog not recognized')
# Magnitude and redshift cut
if self.validation_catalog is not None:
mask = obscat[obs_translate['r']] < self.obs_r_mag_limit
mask &= (obscat[obs_zcol] > self.zlo) & (obscat[obs_zcol] < self.zhi)
obscat = obscat[mask]
if self.validation_catalog == 'DEEP2':
# Remove unsecured redshifts
mask = obscat['zquality'] >= 3
# Remove CFHTLS-Wide objects
mask &= obscat['cfhtls_source'] == 0
obscat = obscat[mask]
# Selection weights
if self.validation_catalog == 'SDSS':
weights = None
elif self.validation_catalog == 'DEEP2':
weights = 1/np.array(obscat['p_onmask'])
# Compute color distribution (PDF, CDF etc.)
self.obs_color_dist = {}
if self.validation_catalog is not None:
self.obs_color_dist = self.get_color_dist(obscat, obs_translate, weights)
#setup a check for the one-time creation of summary plots and addition of validation data
self.first_pass = True
#init colors
self.plot_colors = iter(self.default_colors)
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
bands = set(sum((c.split('-') for c in self.colors), []))
if self.rest_frame:
possible_names = ('Mag_{}_lsst', 'Mag_{}_sdss', 'Mag_true_{}_lsst_z0', 'Mag_true_{}_sdss_z0')
else:
possible_lsst_names = (('mag_{}_noagn_lsst', 'mag_true_{}_noagn_lsst')
if self.exclude_agn else ('mag_{}_cModel', 'mag_{}_lsst', 'mag_true_{}_lsst'))
possible_non_lsst_names = ('mag_{}_sdss', 'mag_{}_des', 'mag_true_{}_sdss', 'mag_true_{}_des')
if self.use_lsst:
print('Selecting lsst magnitudes if available')
possible_names = possible_lsst_names + possible_non_lsst_names
else:
possible_names = possible_non_lsst_names + possible_lsst_names
labels = {band: catalog_instance.first_available(*(n.format(band) for n in possible_names)) for band in bands}
labels = {k: v for k, v in labels.items() if v}
if len(labels) < 2:
return TestResult(skipped=True, summary='magnitudes in mock catalog do not have at least two needed bands.')
filters = set((v.split('_')[(-2 if 'z0' in v else -1)] for v in labels.values()))
if len(filters) > 1:
return TestResult(skipped=True, summary='magnitudes in mock catalog have mixed filters.')
filter_this = filters.pop()
if self.lightcone:
labels['redshift'] = catalog_instance.first_available('redshift_true_galaxy', 'redshift_true', 'redshift')
if not labels['redshift']:
return TestResult(skipped=True, summary='mock catalog does not have redshift.')
# Load mock catalog data
filters = ['{} > {}'.format(labels['redshift'], self.zlo),
'{} < {}'.format(labels['redshift'], self.zhi)]
else:
filters = None
redshift = catalog_instance.redshift
data = catalog_instance.get_quantities(list(labels.values()), filters)
# filter catalog data further for matched object catalogs
if np.ma.isMaskedArray(data[labels['redshift']]):
galmask = np.ma.getmask(data[labels['redshift']])
data = {k:data[v][galmask] for k, v in labels.items()}
else:
data = {k: data[v] for k, v in labels.items()}
# Color transformation
color_trans = None
if self.color_transformation_q:
color_trans_name = None
if self.validation_catalog == 'DEEP2' and (filter_this == 'sdss' or filter_this == 'des'):
color_trans_name = '{}2cfht'.format(filter_this)
elif self.validation_catalog == 'SDSS' and filter_this == 'des':
color_trans_name = 'des2sdss'
if color_trans_name:
color_trans = color_transformation[color_trans_name]
filter_title = r'\mathrm{{{}}}'.format(filter_this.upper())
if color_trans:
data_transformed = {}
for band in bands:
try:
data_transformed[band] = ne.evaluate(color_trans[band], local_dict=data, global_dict={})
except KeyError:
continue
filter_title = (r'{}\rightarrow\mathrm{{{}}}'.format(filter_title, self.validation_catalog)
if data_transformed else filter_title)
data_transformed['redshift'] = data['redshift']
data = data_transformed
del data_transformed
if self.obs_r_mag_limit and not self.rest_frame:
data = GCRQuery('r < {}'.format(self.obs_r_mag_limit)).filter(data)
elif self.Mag_r_limit and self.rest_frame:
data = GCRQuery('r < {}'.format(self.Mag_r_limit)).filter(data)
# Compute color distribution (PDF, CDF etc.)
mock_color_dist = self.get_color_dist(data)
# Calculate Cramer-von Mises statistic
color_shift = {}
cvm_omega = {}
cvm_omega_shift = {}
if self.validation_catalog:
for color in self.colors:
if not ((color in self.obs_color_dist) and (color in mock_color_dist)):
continue
color_shift[color] = self.obs_color_dist[color]['median'] - mock_color_dist[color]['median']
cvm_omega[color] = CvM_statistic(
mock_color_dist[color]['nsample'], self.obs_color_dist[color]['nsample'],
mock_color_dist[color]['binctr'], mock_color_dist[color]['cdf'],
self.obs_color_dist[color]['binctr'], self.obs_color_dist[color]['cdf'])
cvm_omega_shift[color] = CvM_statistic(
mock_color_dist[color]['nsample'], self.obs_color_dist[color]['nsample'],
mock_color_dist[color]['binctr'] + color_shift[color], mock_color_dist[color]['cdf'],
self.obs_color_dist[color]['binctr'], self.obs_color_dist[color]['cdf'])
redshift_title = '{:.2f} < z < {:.2f}'.format(self.zlo,
self.zhi) if self.lightcone else 'z = {:.2f}'.format(redshift)
catalog_color = next(self.plot_colors)
self.make_plots(mock_color_dist, color_shift, cvm_omega, cvm_omega_shift, catalog_name,
output_dir, filter_title, redshift_title, catalog_color)
self.make_plots(mock_color_dist, color_shift, cvm_omega, cvm_omega_shift, catalog_name,
output_dir, filter_title, redshift_title, catalog_color, summary=True)
# Write to summary file
fn = os.path.join(output_dir, self.summary_output_file)
with open(fn, 'a') as f:
if color_trans:
f.write('Color transformation: {}\n'.format(color_trans_name))
else:
f.write('No color transformation\n')
f.write('{}\n'.format(redshift_title))
if self.obs_r_mag_limit:
f.write('r_mag < %2.3f\n\n'%(self.obs_r_mag_limit))
elif self.Mag_r_limit:
f.write('Mag_r < %2.3f\n\n'%(self.Mag_r_limit))
if self.validation_catalog:
for color in self.colors:
if self.validation_catalog and not ((color in self.obs_color_dist) and (color in mock_color_dist)):
continue
f.write("Median "+color+" difference (obs - mock) = %2.3f\n"%(color_shift[color]))
f.write(color+": {} = {:2.6f}\n".format('CvM statistic', cvm_omega[color]))
f.write(color+" (shifted): {} = {:2.6f}\n".format('CvM statistic', cvm_omega_shift[color]))
f.write("\n")
return TestResult(inspect_only=True)
@staticmethod
def init_plots(available_colors):
nrows = int(np.ceil(len(available_colors)/2.))
fig_pdf, axes_pdf = plt.subplots(nrows, 2, figsize=(8, 3.5*nrows))
fig_cdf, axes_cdf = plt.subplots(nrows, 2, figsize=(8, 3.5*nrows))
return fig_pdf, axes_pdf, fig_cdf, axes_cdf
def make_plots(self, mock_color_dist, color_shift, cvm_omega, cvm_omega_shift, catalog_name,
output_dir, filter_title, redshift_title, catalog_color, summary=False):
available_colors = [c for c in self.colors if c in mock_color_dist]
if summary:
if self.first_pass:
fig_pdf, axes_pdf, fig_cdf, axes_cdf = self.init_plots(available_colors)
self.summary_fig_pdf, self.summary_ax_pdf = fig_pdf, axes_pdf
self.summary_fig_cdf, self.summary_axes_cdf = fig_cdf, axes_cdf
else:
fig_pdf, axes_pdf = self.summary_fig_pdf, self.summary_ax_pdf
fig_cdf, axes_cdf = self.summary_fig_cdf, self.summary_axes_cdf
filter_title = ''
else:
fig_pdf, axes_pdf, fig_cdf, axes_cdf = self.init_plots(available_colors)
title = ''
if self.obs_r_mag_limit:
title = '$m_r^{{{}}} < {:2.1f}, {}$'.format(filter_title, self.obs_r_mag_limit, redshift_title)
elif self.Mag_r_limit:
title = '$M_r^{{{}}} < {:2.1f}, {}$'.format(filter_title, self.Mag_r_limit, redshift_title)
for ax_cdf, ax_pdf, color in zip(axes_cdf.flat, axes_pdf.flat, available_colors):
if color not in mock_color_dist or (self.validation_catalog and color not in self.obs_color_dist):
continue
mbinctr = mock_color_dist[color]['binctr']
mpdf_smooth = mock_color_dist[color]['pdf_smooth']
mcdf = mock_color_dist[color]['cdf']
if self.validation_catalog:
obinctr = self.obs_color_dist[color]['binctr']
opdf_smooth = self.obs_color_dist[color]['pdf_smooth']
ocdf = self.obs_color_dist[color]['cdf']
xmin = np.min([mbinctr[find_first_true(mcdf > 0.001)],
mbinctr[find_first_true(mcdf > 0.001)] + color_shift[color],
obinctr[find_first_true(ocdf > 0.001)]])
xmax = np.max([mbinctr[find_first_true(mcdf > 0.999)],
mbinctr[find_first_true(mcdf > 0.999)] + color_shift[color],
obinctr[find_first_true(ocdf > 0.999)]])
else:
xmin = np.min(mbinctr[find_first_true(mcdf > 0.001)])
xmax = np.max(mbinctr[find_first_true(mcdf > 0.999)])
# Plot PDF
# mock color distribution
spacing = '\n'
lgnd_title = None
if self.truncate_cat_name:
catalog_name = re.split('_', catalog_name)[0]
spacing = ', '
if self.replace_cat_name:
for k, v in self.replace_cat_name.items():
catalog_name = re.sub(k, v, catalog_name)
if cvm_omega.get(color, None) and not self.skip_statistic:
catalog_label = catalog_name + spacing + r'$\omega={:.3}$'.format(cvm_omega[color])
else:
catalog_label = catalog_name
ax_pdf.step(mbinctr, mpdf_smooth, where="mid", label=catalog_label, color=catalog_color)
if self.validation_catalog:
# validation data
if not summary or (summary and self.first_pass):
ax_pdf.step(obinctr, opdf_smooth, where="mid", label=self.validation_catalog, color=self.validation_color)
# color distribution after constant shift
if self.plot_shift:
ax_pdf.step(mbinctr + color_shift[color], mpdf_smooth,
label=catalog_name+' shifted\n'+r'$\omega={:.3}$'.format(cvm_omega_shift[color]),
linestyle='--', color=catalog_color)
ax_pdf.set_xlabel('${}$'.format(color), size=self.font_size)
ax_pdf.set_xlim(xmin, xmax)
# adjust y-limit of plot if needed
data_max = self.adjust_ylim*max(np.max(mpdf_smooth), np.max(opdf_smooth))
_, plot_max = ax_pdf.get_ylim()
plot_max = plot_max if plot_max > data_max else data_max
ax_pdf.set_ylim(bottom=0., top=plot_max)
if not self.title_in_legend:
if not summary:
ax_pdf.set_title(title)
elif self.first_pass and self.add_suptitle:
fig_pdf.suptitle(title, y=self.suptitle_y, fontsize=self.title_size)
else:
lgnd_title = title
ax_pdf.legend(loc=self.legend_location, title=lgnd_title, fontsize=self.legend_size, frameon=False)
# Plot CDF
# catalog distribution
ax_cdf.step(mbinctr, mcdf, where="mid", label=catalog_label, color=catalog_color)
if self.validation_catalog:
# validation distribution
if not summary or (summary and self.first_pass):
ax_cdf.step(obinctr, ocdf, label=self.validation_catalog, color=self.validation_color)
# color distribution after constant shift
if self.plot_shift:
ax_cdf.step(mbinctr + color_shift[color], mcdf, where="mid",
label=catalog_name+' shifted\n'+r'$\omega={:.3}$'.format(cvm_omega_shift[color]),
linestyle='--', color=catalog_color)
ax_cdf.set_xlabel('${}$'.format(color), size=self.font_size)
if not self.title_in_legend:
if not summary:
ax_cdf.set_title(title)
elif self.first_pass and self.add_suptitle:
fig_cdf.suptitle(title, y=self.suptitle_y, fontsize=self.title_size)
else:
lgnd_title = title
ax_cdf.set_xlim(xmin, xmax)
ax_cdf.set_ylim(0, 1)
ax_cdf.legend(loc=self.legend_location, title=lgnd_title, fontsize=self.legend_size, frameon=False)
if self.plot_pdf_q and not summary:
self.post_process_plot(fig_pdf, output_dir, self.plot_pdf_file)
if self.plot_cdf_q and not summary:
self.post_process_plot(fig_cdf, output_dir, self.plot_cdf_file)
if summary and self.first_pass:
self.first_pass = False
@staticmethod
def post_process_plot(fig, output_dir, filename):
fig.tight_layout()
fig.savefig(os.path.join(output_dir, filename))
print('Saving {} in {}'.format(filename, output_dir))
plt.close(fig)
def get_color_dist(self, cat, translate=None, weights=None):
'''
Return the color distribution information including PDF, smoothed PDF, and CDF.
'''
if translate is None:
translate = {}
color_dist = {}
for color in self.colors:
band1 = translate.get(color[0], color[0])
band2 = translate.get(color[-1], color[-1])
# Remove objects with invalid magnitudes from the analysis
try:
if self.rest_frame:
cat_mask = (cat[band1] < -10) & (cat[band1] > -30) & (cat[band2] < -10) & (cat[band2] > -30)
else:
cat_mask = (cat[band1] > 0) & (cat[band1] < 50) & (cat[band2] > 0) & (cat[band2] < 50)
except KeyError:
continue
pdf, bin_edges = np.histogram((cat[band1]-cat[band2])[cat_mask],
bins=self.bins,
weights=(None if weights is None else weights[cat_mask]))
pdf = pdf/np.sum(pdf)
binctr = (bin_edges[1:] + bin_edges[:-1])/2.
pdf_smooth = uniform_filter1d(pdf, 20)
color_dist[color] = {}
color_dist[color]['nsample'] = np.sum(cat_mask)
color_dist[color]['binctr'] = binctr
color_dist[color]['pdf'] = pdf
color_dist[color]['pdf_smooth'] = pdf_smooth
color_dist[color]['cdf'] = np.cumsum(pdf)
color_dist[color]['median'] = np.median((cat[band1]-cat[band2])[cat_mask])
return color_dist
def conclude_test(self, output_dir):
self.post_process_plot(self.summary_fig_pdf, output_dir, self.summary_pdf)
self.post_process_plot(self.summary_fig_cdf, output_dir, self.summary_cdf)
| 22,014
| 48.250559
| 127
|
py
|
descqa
|
descqa-master/descqa/BiasVersusRedshift.py
|
from __future__ import print_function, division, unicode_literals, absolute_import
import os
import numpy as np
import scipy.optimize as op
import re
from GCR import GCRQuery
import pyccl as ccl
from .base import TestResult
from .CorrelationsTwoPoint import CorrelationsAngularTwoPoint
from .plotting import plt
from .stats import chisq
__all__ = ['BiasValidation']
def neglnlike(b, x, y, yerr):
return 0.5*np.sum((b**2*x-y)**2/yerr**2) # We ignore the covariance
def wtheta(x, b):
return b**2*x
class BiasValidation(CorrelationsAngularTwoPoint):
"""
Validation test of 2pt correlation function
"""
possible_observations = {'SRD':{
'filename_template': 'galaxy_bias/bias_SRD.txt',
'label': 'SRD ($i<25.3$)',
'colnames': ('z', 'bias'),
'skip':2,
},
'nicola_27':{
'filename_template': 'galaxy_bias/bias_nicola_mlim27.txt',
'label': 'Nicola et al. \n($i<27$)',
'colnames': ('z', 'bias'),
'skip':2,
},
'nicola_25.3':{
'filename_template': 'galaxy_bias/bias_nicola_mlim25.3.txt',
'label': 'Nicola et al. \n($i<25.3$)',
'colnames': ('z', 'bias'),
'skip':2,
},
'nicola_25.3_errors':{
'filename_template': 'galaxy_bias/bias_nicola_mlim25.3_with_errors.txt',
'label': 'Nicola et al. \n($i<25.3$)',
'colnames': ('z', 'b_lo', 'bias', 'b_hi'),
'skip':1,
},
}
def __init__(self, **kwargs): #pylint: disable=W0231
super().__init__(**kwargs)
self.data_label = kwargs['data_label']
self.output_filename_template = kwargs['output_filename_template']
self.label_template = kwargs['label_template']
self.fig_xlabel = kwargs['fig_xlabel']
self.fig_ylabel = kwargs['fig_ylabel']
self.fig_ylim = kwargs['fig_ylim']
self.test_name = kwargs['test_name']
self.fit_range = kwargs['fit_range']
self.font_size = kwargs.get('font_size', 16)
self.legend_size = kwargs.get('legend_size', 12)
self.title_fontsize = kwargs.get('title_fontsize', 14)
self.ell_max = kwargs['ell_max'] if 'ell_max' in kwargs.keys() else 20000
validation_filepath = os.path.join(self.data_dir, kwargs['data_filename'])
self.validation_data = np.loadtxt(validation_filepath, skiprows=2)
self.truncate_cat_name = kwargs.get('truncate_cat_name', False)
self.title_in_legend = kwargs.get('title_in_legend', True)
self.observations = kwargs.get('observations', [])
self.validation_data = self.get_validation_data(self.observations)
def get_validation_data(self, observations):
validation_data = {}
if observations:
for obs in observations:
print(obs)
data_args = self.possible_observations[obs]
fn = os.path.join(self.data_dir, data_args['filename_template'])
validation_data[obs] = dict(zip(data_args['colnames'], np.loadtxt(fn, skiprows=data_args['skip'], unpack=True)))
validation_data[obs]['label'] = data_args['label']
validation_data[obs]['colnames'] = data_args['colnames']
print(validation_data)
return validation_data
def plot_bias_results(self, corr_data, corr_theory, bias, z, catalog_name, output_dir,
err=None, chisq=None, mag_label=''):
fig, ax = plt.subplots(1, 2, gridspec_kw={'width_ratios': [5, 3]})
colors = plt.cm.plasma_r(np.linspace(0.1, 1, len(self.test_samples))) # pylint: disable=no-member
for sample_name, color in zip(self.test_samples, colors):
sample_corr = corr_data[sample_name]
sample_label = self.test_sample_labels.get(sample_name)
sample_th = corr_theory[sample_name]
ax[0].loglog(sample_corr[0], sample_th, c=color)
_, caps, bars = ax[0].errorbar(sample_corr[0], sample_corr[1], sample_corr[2], marker='o', ls='', c=color,
label=sample_label)
# add transparency for error bars
[bar.set_alpha(0.2) for bar in bars]
[cap.set_alpha(0.2) for cap in caps]
#add shaded band for fit range
ax[0].fill_between([self.fit_range[sample_name]['min_theta'], self.fit_range[sample_name]['max_theta']],
[self.fig_ylim[0], self.fig_ylim[0]], [self.fig_ylim[1], self.fig_ylim[1]],
alpha=0.07, color='grey')
if self.title_in_legend:
lgnd_title = '{}\n$({})$'.format(catalog_name, mag_label)
title = self.data_label
else:
lgnd_title = '({})'.format(mag_label)
title = '{} vs. {}'.format(catalog_name, self.data_label)
ax[0].legend(loc='lower left', title=lgnd_title, framealpha=0.5,
fontsize=self.legend_size, title_fontsize=self.title_fontsize)
ax[0].set_xlabel(self.fig_xlabel, size=self.font_size)
ax[0].set_ylim(*self.fig_ylim)
ax[0].set_ylabel(self.fig_ylabel, size=self.font_size)
ax[0].set_title(title, fontsize='medium')
ax[1].errorbar(z, bias, err, marker='o', ls='', label='{}\n$({})$'.format(catalog_name, mag_label))
if not self.observations:
ax[1].plot(z, bias) #plot curve through points
#add validation data
for v in self.validation_data.values():
colz = v['colnames'][0]
colb = 'bias'
zmask = (v[colz] < np.max(z)*1.25)
ax[1].plot(v[colz][zmask], v[colb][zmask], label=v['label'])
print(v['colnames'])
if 'b_lo'in v['colnames'] and 'b_hi' in v['colnames']:
print('band', v[colz][zmask], v['b_lo'][zmask], v['b_hi'][zmask])
ax[1].fill_between(v[colz][zmask], v['b_lo'][zmask], v['b_hi'][zmask], alpha=.3, color='grey')
ax[1].set_title('Bias vs redshift', fontsize='medium')
ax[1].set_xlabel('$z$', size=self.font_size)
ax[1].set_ylabel('$b(z)$', size=self.font_size)
ax[1].legend(loc='upper right', framealpha=0.5, frameon=True, fontsize=self.legend_size-2)
if chisq:
ax[1].text(0.95, 0.05, r'$\chi^2/\rm{{d.o.f}}={}$'.format(', '.join(['{:.2g}'.format(c) for c in chisq])),
horizontalalignment='right', verticalalignment='bottom',
transform=ax[1].transAxes)
plt.subplots_adjust(wspace=.05)
fig.tight_layout()
fig.savefig(os.path.join(output_dir, '{:s}.png'.format(self.test_name)), bbox_inches='tight')
plt.close(fig)
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
'''
Loop over magnitude cuts and make plots
'''
catalog_data = self.load_catalog_data(catalog_instance=catalog_instance,
requested_columns=self.requested_columns,
test_samples=self.test_samples)
if not catalog_data:
return TestResult(skipped=True, summary='Missing requested quantities')
if self.truncate_cat_name:
catalog_name = catalog_name.partition("_")[0]
# Initialize catalog's cosmology
cosmo = ccl.Cosmology(Omega_c=catalog_instance.cosmology.Om0-catalog_instance.cosmology.Ob0,
Omega_b=catalog_instance.cosmology.Ob0,
h=catalog_instance.cosmology.h,
sigma8=0.8, # For now let's assume a value for 0.8
n_s=0.96 #We assume this value for the scalar index
)
rand_cat, rr = self.generate_processed_randoms(catalog_data)
correlation_data = dict()
nz_data = dict()
correlation_theory = dict()
best_fit_bias = []
z_mean = []
best_fit_err = []
for sample_name, sample_conditions in self.test_samples.items():
tmp_catalog_data = self.create_test_sample(
catalog_data, sample_conditions)
with open(os.path.join(output_dir, 'galaxy_count.dat'), 'a') as f:
f.write('{} {}\n'.format(sample_name, len(tmp_catalog_data['ra'])))
if not len(tmp_catalog_data['ra']):
continue
z_mean.append(np.mean(tmp_catalog_data['redshift']))
output_treecorr_filepath = os.path.join(output_dir,
self.output_filename_template.format(sample_name))
xi_rad, xi, xi_sig = self.run_treecorr(
catalog_data=tmp_catalog_data,
treecorr_rand_cat=rand_cat,
rr=rr,
output_file_name=output_treecorr_filepath)
correlation_data[sample_name] = (xi_rad, xi, xi_sig)
nz, be = np.histogram(tmp_catalog_data['redshift'], range=(0, 2), bins=100)
zcent = 0.5*(be[1:]+be[:-1])
nz_data[sample_name] = (zcent, nz*1.0)
# Generate CCL tracer object to compute Cls -> w(theta)
tracer = ccl.NumberCountsTracer(cosmo, has_rsd=False, dndz=(zcent, nz),
bias=(zcent, np.ones_like(zcent)))
ells = np.arange(0, self.ell_max) # Reduce ell_max to speed-up
cls = ccl.angular_cl(cosmo, tracer, tracer, ells)
w_th = ccl.correlation(cosmo, ells, cls, xi_rad)
angles = (xi_rad > self.fit_range[sample_name]['min_theta']) & \
(xi_rad < self.fit_range[sample_name]['max_theta']) # Select the fitting range
result = op.minimize(neglnlike, [1.0],
args=(w_th[angles], xi[angles],
xi_sig[angles]), bounds=[(0.1, 10)])
best_bias = result['x']
#extract covariance matrix
#use curve_fit to get error on fit which has documented normalization for covariance matrix
cfit = op.curve_fit(wtheta, w_th[angles], xi[angles], p0=1.0, sigma=xi_sig[angles], bounds=(0.1, 10))
#best_bias_obj = result.hess_inv*np.identity(1)[0] #unknown relative normalization
best_bias_err = np.sqrt(cfit[1][0][0])
correlation_theory[sample_name] = best_bias**2*w_th
best_fit_bias.append(best_bias[0])
best_fit_err.append(best_bias_err)
print(sample_name, best_fit_bias, w_th[angles], xi[angles], xi_sig[angles])
z_mean = np.array(z_mean)
best_fit_bias = np.array(best_fit_bias)
best_fit_err = np.array(best_fit_err)
chi_2 = []
# compute chi*2 between best_fit bias and validation data
for v in self.validation_data.values():
colz = v['colnames'][0]
colb = 'bias'
validation_data = np.interp(z_mean, v[colz], v[colb])
if 'b_lo'in v['colnames'] and 'b_hi' in v['colnames']:
val_err_lo = np.abs(np.interp(z_mean, v[colz], v['b_lo']) - validation_data)
val_err_hi = np.abs(np.interp(z_mean, v[colz], v['b_hi']) - validation_data)
print(val_err_lo, val_err_hi)
val_err = (val_err_lo + val_err_hi)/2 # mean of upper and lower errors
error_sq = best_fit_err**2 + val_err**2 # sum in quadrature
print(val_err, best_fit_err, error_sq)
else:
error_sq = best_fit_err**2
chi__2 = np.sum((best_fit_bias - validation_data)**2/error_sq/len(best_fit_bias))
print('\nchi**2(linear bias - bias data)={:.3g}'.format(chi__2))
chi_2.append(chi__2)
# get mag_cut for plot
mag_label = ''
if 'mag' in self.requested_columns.keys():
filt = self.requested_columns['mag'][0].split('_')[1]
mag_vals = self.test_samples[list(self.test_samples.keys())[0]]['mag'] # assume all cuts the same
#mag_label = '{:.2g} < {}'.format(mag_vals['min'], filt) if 'min' in mag_vals.keys() else filt
mag_label = filt + ' < {:.3g}'.format(mag_vals['max'])
self.plot_bias_results(corr_data=correlation_data,
catalog_name=catalog_name,
corr_theory=correlation_theory,
bias=best_fit_bias,
z=z_mean, mag_label=mag_label,
output_dir=output_dir,
err=best_fit_err, chisq=chi_2)
passed = np.all((best_fit_bias[1:]-best_fit_bias[:-1]) > 0)
score = np.count_nonzero((best_fit_bias[:-1]-best_fit_bias[1:])>0)*1.0/(len(best_fit_bias)-1.0)
return TestResult(score=score, passed=passed,
summary="Resulting linear bias obtained from the 2pcf")
| 13,544
| 48.615385
| 128
|
py
|
descqa
|
descqa-master/descqa/EllipticityDistribution.py
|
from __future__ import print_function, division, unicode_literals, absolute_import
import os
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from itertools import count
import re
import numpy as np
from scipy.interpolate import interp1d
from GCR import GCRQuery
from .base import BaseValidationTest, TestResult
from .plotting import plt
__all__ = ['EllipticityDistribution']
class EllipticityDistribution(BaseValidationTest):
"""
validation test to show total ellipticity distributions
"""
#setup dict with parameters needed to read in validation data
possible_observations = {
'COSMOS_2013': {
'label': 'COSMOS 2013',
'band_mag': 'i',
'band_Mag': ['V', 'r', 'g'],
'zlo': 0.0,
'zhi': 2.0,
'definition': 'e_squared',
'morphology': ('LRG', 'early', 'disk', 'late'),
'filename_template': 'ellipticity/COSMOS/joachimi_et_al_2013/{}{}_{}.dat',
'file-info': {
'LRG':{'prefix':'all', 'suffix':'mag24'},
'early':{'prefix':'histo_', 'suffix':'mag24_V17-21'},
'disk':{'prefix':'histo_', 'suffix':'mag24_V17-21'},
'late':{'prefix':'histo_', 'suffix':'mag24_V17-21'},
'irregular':{'prefix':'histo_', 'suffix':'mag24_V17-21'},
'usecols':0,
'skiprows':0,
},
'cuts':{
'LRG':{'B/T_min':0.7, 'B/T_max':1., 'mag_lo':24., 'Mag_hi':-np.inf, 'Mag_lo':-19.},
'early':{'B/T_min':0.7, 'B/T_max':1.0, 'mag_lo':24., 'Mag_hi':-21., 'Mag_lo':-17.},
'disk':{'B/T_min':0., 'B/T_max':0.2, 'mag_lo':24., 'Mag_hi':-21., 'Mag_lo':-17.},
'late':{'B/T_min':0.4, 'B/T_max':0.7, 'mag_lo':24., 'Mag_hi':-21., 'Mag_lo':-17.},
'irregular':{'B/T_min':0.0, 'B/T_max':1.0},
'ancillary_quantities':['bulge_to_total_ratio_i', 'bulge_to_total_ratio_stellar'],
'ancillary_keys':['B/T'],
},
},
}
#define ellipticity functions
@staticmethod
def e_default(e):
return e
@staticmethod
def e_squared(a, b):
q = b/a
return (1-q**2)/(1+q**2)
#plotting constants
lw2 = 2
fsize = 16
lsize = 10
validation_color = 'black'
validation_marker = 'o'
default_markers = ['v', 's', 'd', 'H', '^', 'D', 'h', '<', '>', '.']
msize = 4 #marker-size
yaxis_xoffset = 0.02
yaxis_yoffset = 0.5
def __init__(self, z='redshift_true', zlo=0., zhi=2., N_ebins=40, observation='', ncolumns=2,
morphology=('all',), band_mag='i', mag_lo=24, band_Mag='r', Mag_hi=-21, Mag_lo=-17, normed=False,
**kwargs):
#pylint: disable=W0231
#catalog quantities
self.filter_quantities = [z]
self.truncate_cat_name = kwargs.get('truncate_cat_name', False)
self.title_in_legend = kwargs.get('title_in_legend', False)
self.legend_location = kwargs.get('legend_location', 'lower left')
self.xfont_size = kwargs.get('xfont_size', 12)
self.yfont_size = kwargs.get('yfont_size', 14)
self.legend_size = kwargs.get('legend_size', 6)
self.legend_title_size = kwargs.get('legend_title_size', 8)
possible_mag_fields = ('mag_{}_lsst',
'mag_{}_sdss',
'mag_{}_des',
'mag_true_{}_lsst',
'mag_true_{}_sdss',
'mag_true_{}_des',
)
possible_Mag_fields = ('Mag_true_{}_z0',
'Mag_true_{}_lsst_z0',
'Mag_true_{}_sdss_z0',
'Mag_true_{}_des_z0',
)
possible_native_luminosities = {'V':'otherLuminosities/totalLuminositiesStellar:V:rest',
}
possible_ellipticity_definitions = {'e_default':{'possible_quantities':[['ellipticity', 'ellipticity_true']],
'function':self.e_default,
'xaxis_label': r'$e = (1-q)/(1+q)$',
'file_label':'e',
},
'e_squared':{'possible_quantities':[['size', 'size_true'], ['size_minor', 'size_minor_true']],
'function':self.e_squared,
'xaxis_label': r'$e = (1-q^2)/(1+q^2)$',
'file_label':'e2',
},
}
#binning
self.N_ebins = N_ebins
self.ebins = np.linspace(0., 1, N_ebins+1)
#validation data
self.validation_data = {}
self.observation = observation
#check for valid observations
if not observation:
print('Warning: no data file supplied, no observation requested; only catalog data will be shown')
elif observation not in self.possible_observations:
raise ValueError('Observation {} not available'.format(observation))
else:
self.validation_data = self.get_validation_data(observation)
#plotting variables
self.ncolumns = int(ncolumns)
self.normed = normed
#morphologies
self.morphology = self.validation_data.get('morphology', morphology)
#cuts
self.zlo = self.validation_data.get('zlo', float(zlo))
self.zhi = self.validation_data.get('zhi', float(zhi))
self.filters = [(lambda z: (z > self.zlo) & (z < self.zhi), z)]
self.band_mag = self.validation_data.get('band_mag', band_mag)
self.possible_mag_fields = [f.format(self.band_mag) for f in possible_mag_fields]
self.band_Mag = self.validation_data.get('band_Mag', [band_Mag])
self.possible_Mag_fields = [f.format(band) for f in possible_Mag_fields for band in self.band_Mag]
self.mag_lo = dict(zip(self.morphology, [self.validation_data.get('cuts', {}).get(m, {}).get('mag_lo', mag_lo) for m in self.morphology]))
self.Mag_lo = dict(zip(self.morphology, [self.validation_data.get('cuts', {}).get(m, {}).get('Mag_lo', Mag_lo) for m in self.morphology]))
self.Mag_hi = dict(zip(self.morphology, [self.validation_data.get('cuts', {}).get(m, {}).get('Mag_hi', Mag_hi) for m in self.morphology]))
#check for ellipticity definitions
self.possible_quantities = possible_ellipticity_definitions[self.validation_data.get('definition', 'e_default')]['possible_quantities']
self.ellipticity_function = possible_ellipticity_definitions[self.validation_data.get('definition', 'e_default')].get('function')
self.xaxis_label = possible_ellipticity_definitions[self.validation_data.get('definition', 'e_default')].get('xaxis_label')
self.file_label = possible_ellipticity_definitions[self.validation_data.get('definition', 'e_default')].get('file_label')
#check for native quantities
self.native_luminosities = dict(zip([band for band in possible_native_luminosities if band in self.band_Mag],\
[possible_native_luminosities[band] for band in possible_native_luminosities if band in self.band_Mag]))
#check for ancillary quantities
self.possible_ancillary_quantities = self.validation_data.get('cuts', {}).get('ancillary_quantities', None)
#setup subplot configuration
self.init_plots()
#setup summary plot
self.summary_fig, self.summary_ax = plt.subplots(self.nrows, self.ncolumns, sharex='col')
#could plot summary validation data here if available but would need to evaluate labels, bin values etc.
#otherwise setup a check so that validation data is plotted only once on summary plot
self.first_pass = True
self.validation_percentiles = {
'percentiles': kwargs['validation_percentile_points'],
'ranges': kwargs['validation_percentile_ranges']}
self._other_kwargs = kwargs
def init_plots(self):
#setup plots and determine number of rows required for subplots
self.nplots = len(self.morphology)
self.nrows = (self.nplots+self.ncolumns-1)//self.ncolumns
self._color_iterator = ('C{}'.format(i) for i in count())
#other plotting variables
self.markers = iter(self.default_markers)
if self.normed:
self.yaxis = '$P(e)$'
else:
self.yaxis = '$N$'
def get_validation_data(self, observation):
data_args = self.possible_observations[observation]
validation_data = {}
for m in data_args['morphology']:
file_info = data_args['file-info'][m]
data_path = os.path.join(self.data_dir, data_args['filename_template'].format(file_info['prefix'], m.lower(), file_info['suffix']))
if not os.path.exists(data_path):
raise ValueError("{} data file {} not found".format(m, data_path))
if not os.path.getsize(data_path):
raise ValueError("{} data file {} is empty".format(m, data_path))
validation_data[m] = np.loadtxt(data_path, unpack=True, usecols=data_args['file-info']['usecols'],\
skiprows=data_args['file-info']['skiprows'])
#collect remaining information
for arg in data_args:
if not 'file' in arg:
validation_data[arg] = data_args[arg]
return validation_data
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
#update color and marker to preserve catalog colors and markers across tests
catalog_color = next(self._color_iterator)
#add quantities to catalog if needed
for band in self.native_luminosities:
if catalog_instance.has_quantity(self.native_luminosities[band]):
catalog_instance.add_quantity_modifier('Mag_true_{}_z0'.format(band), (lambda x: -2.5*np.log10(x), self.native_luminosities[band]))
print('Checking for required quantities')
#check catalog data for required quantities
required_quantities = []
for pgroup in self.possible_quantities:
found_quantity = catalog_instance.first_available(*pgroup)
if found_quantity is not None:
required_quantities.append(found_quantity)
if not catalog_instance.has_quantities(required_quantities + self.filter_quantities):
return TestResult(skipped=True, summary='Missing some required quantities: {}'.format(', '.join(required_quantities)))
ancillary_quantity = None
if self.possible_ancillary_quantities is not None:
ancillary_quantity = catalog_instance.first_available(*self.possible_ancillary_quantities)
if ancillary_quantity is None:
return TestResult(skipped=True, summary='Missing some ancillary quantities: {}'.format(', '.join(self.possible_ancillary_quantities)))
mag_field = catalog_instance.first_available(*self.possible_mag_fields)
if not mag_field:
return TestResult(skipped=True, summary='Missing needed quantities to make magnitude cuts')
Mag_field = catalog_instance.first_available(*self.possible_Mag_fields)
if not Mag_field:
return TestResult(skipped=True, summary='Missing needed quantities to make magnitude cuts')
all_quantities = required_quantities +[mag_field, Mag_field] + self.filter_quantities
if ancillary_quantity is not None:
all_quantities = all_quantities + [ancillary_quantity]
print('Fetching quantities', all_quantities)
mag_filtername = str(mag_field.split('_')[-2])
Mag_filtername = str(Mag_field.split('_')[2])
filelabel = '_'.join(('m', mag_filtername, 'M', Mag_filtername))
#setup plots
fig, ax = plt.subplots(self.nrows, self.ncolumns, sharex='col')
fig.text(self.yaxis_xoffset, self.yaxis_yoffset, self.yaxis, va='center', rotation='vertical',
fontsize=self.yfont_size) #setup a common axis label
#initialize arrays for storing histogram sums
N_array = np.zeros((self.nrows, self.ncolumns, len(self.ebins)-1), dtype=np.int)
sume_array = np.zeros((self.nrows, self.ncolumns, len(self.ebins)-1))
sume2_array = np.zeros((self.nrows, self.ncolumns, len(self.ebins)-1))
#initialize boolean values for checking ellipticity endpoints
any_low = False
any_high = False
if self.truncate_cat_name:
catalog_name = re.split('_', catalog_name)[0]
#get catalog data by looping over data iterator (needed for large catalogs) and aggregate histograms
for catalog_data in catalog_instance.get_quantities(all_quantities, filters=self.filters, return_iterator=True):
catalog_data = GCRQuery(*((np.isfinite, col) for col in catalog_data)).filter(catalog_data)
for morphology, N, sume, sume2 in zip_longest(
self.morphology,
N_array.reshape(-1, N_array.shape[-1]), #flatten all but last dimension of array
sume_array.reshape(-1, sume_array.shape[-1]),
sume2_array.reshape(-1, sume2_array.shape[-1]),
):
#make cuts
if morphology is not None:
mask = (catalog_data[mag_field] < self.mag_lo.get(morphology))
mask &= (self.Mag_hi.get(morphology) < catalog_data[Mag_field]) & (catalog_data[Mag_field] < self.Mag_lo.get(morphology))
if ancillary_quantity is not None:
for aq, key in zip_longest([ancillary_quantity], self.validation_data['cuts'].get('ancillary_keys')):
mask &= (self.validation_data['cuts'][morphology].get(key+'_min') < catalog_data[aq]) &\
(catalog_data[aq] < self.validation_data['cuts'][morphology].get(key+'_max'))
print('Number of {} galaxies passing selection cuts for morphology {} = {}'.format(catalog_name, morphology, np.sum(mask)))
#compute ellipticity from definition
e_this = self.ellipticity_function(*[catalog_data[q][mask] for q in required_quantities])
#print('mm', np.min(e_this), np.max(e_this))
del mask
#accumulate histograms
N += np.histogram(e_this, bins=self.ebins)[0]
sume += np.histogram(e_this, bins=self.ebins, weights=e_this)[0]
sume2 += np.histogram(e_this, bins=self.ebins, weights=e_this**2)[0]
#check borders
if len(e_this)>0:
if np.min(e_this)<0:
any_low = True
print('Value<0 found for morphology {} in catalog {}: {}'.format(morphology, catalog_name, np.min(e_this)))
if np.max(e_this)>1:
any_high = True
print('Value>1 found for morphology {} in catalog {}: {}'.format(morphology, catalog_name, np.max(e_this)))
#check that catalog has entries for quantity to be plotted
if not np.asarray([N.sum() for N in N_array]).sum():
raise ValueError('No data found for quantities {}'.format(', '.join(required_quantities)))
#make plots
results = {}
n_fails = 0 + any_low + any_high
for n, (ax_this, summary_ax_this, morphology, N, sume, sume2) in enumerate(zip_longest(
ax.flat,
self.summary_ax.flat,
self.morphology,
N_array.reshape(-1, N_array.shape[-1]), #flatten all but last dimension of array
sume_array.reshape(-1, sume_array.shape[-1]),
sume2_array.reshape(-1, sume2_array.shape[-1]),
)):
if morphology is not None:
#get labels
cutlabel = '${} < {} < {}$; ${} < {}$'.format(str(self.Mag_hi.get(morphology)), Mag_filtername, str(self.Mag_lo.get(morphology)),\
mag_filtername, str(self.mag_lo.get(morphology)))
cutlabel = re.sub('-inf < ', '' , cutlabel) #truncate label with inf
ancillary_label = []
if ancillary_quantity is not None:
for key in self.validation_data['cuts'].get('ancillary_keys'):
ancillary_label.append('${}<{}<{}$'.format(str(self.validation_data['cuts'][morphology].get(key+'_min')),\
key, str(self.validation_data['cuts'][morphology].get(key+'_max'))))
ancillary_label = '; '.join(ancillary_label)
catalog_label = '; '.join((catalog_name, ancillary_label))
validation_label = ' '.join((self.validation_data.get('label', ''), morphology))
reskey = cutlabel.replace('$', '')
#get points to be plotted
e_values = sume/N
sumN = N.sum()
total = '(# of galaxies = {})'.format(sumN)
Nerrors = np.sqrt(N)
if self.normed:
binwidths = self.ebins[1:] - self.ebins[:-1]
N = N/sumN/binwidths
Nerrors = Nerrors/sumN/binwidths
results[reskey] = {'catalog':{'e_ave':e_values, 'N':N, 'N+':N+Nerrors, 'N-':N-Nerrors,\
'total':total, 'xtralabel':ancillary_label.replace('$', '')}}
self.catalog_subplot(ax_this, e_values, N, catalog_color, catalog_label)
results[reskey]['validation'] = self.validation_subplot(ax_this, self.validation_data.get(morphology), validation_label)
self.decorate_subplot(ax_this, n, label=cutlabel)
#add curve for this catalog to summary plot
self.catalog_subplot(summary_ax_this, e_values, N, catalog_color, catalog_label, errors=Nerrors)
if self.first_pass: #add validation data if evaluating first catalog
self.validation_subplot(summary_ax_this, self.validation_data.get(morphology), validation_label)
self.decorate_subplot(summary_ax_this, n, label=cutlabel)
#check ellipticity distributions
number_passed, percentiles = self.validate_percentiles(N)
print("Percentiles for morphology {} are: ".format(morphology)+', '.join([" {:.3f} ({})".format(p, v) for p,v in zip(percentiles, self.validation_percentiles['percentiles'])]))
if number_passed>0:
print("Ellipticity percentile check failed for morphology {}".format(morphology))
n_fails += number_passed
else:
#make empty subplots invisible
ax_this.set_visible(False)
summary_ax_this.set_visible(False)
#check overall ellipticity distribution
global_N = np.sum(np.sum(N_array, axis=1), axis=0)
number_passed, percentiles = self.validate_percentiles(global_N)
print("Percentiles for global distribution are: "+', '.join([" {:.3f} ({})".format(p, v) for p,v in zip(percentiles, self.validation_percentiles['percentiles'])]))
if number_passed>0:
print("Ellipticity percentile check failed for global distribution")
n_fails += number_passed
#save results for catalog and validation data in txt files
for filename, dkey, dtype, info in zip_longest((catalog_name, self.observation), ('catalog', 'validation'), ('N', 'data'), ('total',)):
if filename:
with open(os.path.join(output_dir, ''.join(['Nvs', self.file_label, '_', filelabel+'.txt'])), 'ab') as f_handle: #open file in append mode
#loop over cuts in results dict
for key, value in results.items():
self.save_quantities(dtype, value[dkey], f_handle, comment=' '.join((key, value[dkey].get('xtralabel', ''), value[dkey].get(info, ''))))
if self.first_pass: #turn off validation data plot in summary for remaining catalogs
self.first_pass = False
#make final adjustments to plots and save figure
self.post_process_plot(fig)
fig.savefig(os.path.join(output_dir, ''.join(['Nvs', self.file_label, '_', filelabel+'.png'])))
plt.close(fig)
return TestResult(score=n_fails, passed=(n_fails==0))
def catalog_subplot(self, ax, e_values, N, catalog_color, catalog_label, errors=None):
ax.plot(e_values, N, label=catalog_label, color=catalog_color)
if errors is not None:
ax.fill_between(e_values, N+errors, N-errors, alpha=0.3, facecolor=catalog_color)
def validation_subplot(self, ax, validation_data, validation_label):
results = dict()
if validation_data is not None:
N, _ = np.histogram(validation_data, bins=self.ebins)
sum_e, _ = np.histogram(validation_data, bins=self.ebins, weights=validation_data)
e_ave = sum_e/N
errors = np.sqrt(N)
if self.normed:
sumN = N.sum()
binwidths = self.ebins[1:] - self.ebins[:-1]
N = N/sumN/binwidths
errors = errors/sumN/binwidths
ax.errorbar(e_ave, N, yerr=errors, color=self.validation_color, label=validation_label, marker=self.validation_marker)
results['e_ave'] = e_ave
results['data'] = N
results['data+'] = N + errors
results['data-'] = N - errors
return results
def decorate_subplot(self, ax, nplot, label=None):
ax.tick_params(labelsize=8)
ax.set_yscale('log')
if label and not self.title_in_legend:
ax.set_title(label, fontsize='x-small')
#add axes and legend
if nplot+1 <= self.nplots-self.ncolumns: #x scales for last ncol plots only
for axlabel in ax.get_xticklabels():
axlabel.set_visible(False)
#prevent overlapping yaxis labels
ax.yaxis.get_major_ticks()[0].label1.set_visible(False)
else:
ax.set_xlabel(self.xaxis_label, size=self.xfont_size)
for axlabel in ax.get_xticklabels():
axlabel.set_visible(True)
legend = ax.legend(loc=self.legend_location, fancybox=True, framealpha=0.5, numpoints=1, fontsize=self.legend_size)
if self.title_in_legend:
legend.set_title(label, prop = {'size':self.legend_title_size})
def validate_percentiles(self, data):
cdf = np.zeros(self.N_ebins+1)
for i in range(self.N_ebins):
cdf[i+1:] += data[i]
cdf /= cdf[-1]
cdf *= 100 # because numpy percentile wants percentages
interpolator = interp1d(cdf, self.ebins)
percentiles = interpolator(self.validation_percentiles['percentiles'])
return np.sum([(p<=pmin) | (p>=pmax) for p, (pmin, pmax) in zip(
percentiles, self.validation_percentiles['ranges'])]), percentiles
def post_process_plot(self, fig):
if self.title_in_legend:
fig.subplots_adjust(hspace=0)
@staticmethod
def save_quantities(keyname, results, filename, comment=''):
if keyname in results:
if keyname+'-' in results and keyname+'+' in results:
fields = ('e_ave', keyname, keyname+'-', keyname+'+')
header = ', '.join(('Data columns are: <e>', keyname, keyname+'-', keyname+'+', ' '))
else:
fields = ('e_ave', keyname)
header = ', '.join(('Data columns are: <e>', keyname, ' '))
np.savetxt(filename, np.vstack((results[k] for k in fields)).T, fmt='%12.4e', header=header+comment)
def conclude_test(self, output_dir):
self.post_process_plot(self.summary_fig)
self.summary_fig.savefig(os.path.join(output_dir, 'summary.png'))
plt.close(self.summary_fig)
| 24,835
| 49.997947
| 192
|
py
|
descqa
|
descqa-master/descqa/utils.py
|
"""
utility functions for descqa
"""
from __future__ import unicode_literals, division, print_function, absolute_import
import numpy as np
import healpy as hp
__all__ = [
'get_sky_volume',
'get_opt_binpoints',
'get_healpixel_footprint',
'generate_uniform_random_ra_dec',
'generate_uniform_random_ra_dec_footprint',
'first',
'is_string_like',
]
def get_sky_volume(sky_area, zlo, zhi, cosmology):
"""
Parameters
----------
sky_area : float
sky area in sq. deg.
zlo : float
lower redshift
zhi : float
upper redshift
cosmology : astropy.Cosmology
Returns
-------
sky_volume : float
in unit of Mpc**3.0
"""
dhi = cosmology.comoving_distance(zhi).to('Mpc').value if zhi > 0 else 0.0
dlo = cosmology.comoving_distance(zlo).to('Mpc').value if zlo > 0 else 0.0
sky_area_rad = np.deg2rad(np.deg2rad(sky_area))
return (dhi**3.0 - dlo**3.0) * sky_area_rad / 3.0
def get_sky_area(catalog_instance, nside=1024):
"""
Parameters
----------
catalog_instance: GCRCatalogs intance
nside: nside parameter for healpy
Returns
-------
sky_area : float
in units of deg**2.0
"""
possible_area_qs = (('ra_true', 'ra'), ('dec_true', 'dec'))
area_qs = [catalog_instance.first_available(*a) for a in possible_area_qs]
pixels = set()
for d in catalog_instance.get_quantities(area_qs, return_iterator=True):
pixels.update(hp.ang2pix(nside, d[area_qs[0]], d[area_qs[1]], lonlat=True))
frac = len(pixels) / hp.nside2npix(nside)
sky_area = frac * np.rad2deg(np.rad2deg(4.0*np.pi))
return sky_area
def get_opt_binpoints(N, sumM, sumM2, bins):
"""
compute optimal values at which to plot bin counts
optimal point corresponds to location where function describing data
equals value of N_i given by (Taylor expansion of integral of over bin)/bin-width:
f(c_i) + offset*fprime(c_i) = f(c_i) + bin-width**2*fdblprime(c_i)/24
uses N (counts per bin)
sumM (first moment of points per bin)
sumM2 (second moment of points per bin)
"""
centers = (bins[1:]+bins[:-1])/2
Delta = -bins[:-1]+bins[1:] #bin widths
moment0 = N/Delta #(integrals over bins)/Delta = f(c_i) + Delta**2*fdblprime(c_i)/24
moment1 = N*(sumM/N - centers)/Delta #(first moments about bin centers)/Delta = Delta**2*fprime(c_i)/12
moment2 = N*(sumM2/N - 2*centers*sumM/N + centers**2)/Delta #(second moments about bin centers)/Delta = Delta**2(f(c_i)/12 + *2*fdblprime(c_i)/160)
fprime = 12.*moment1/Delta**2 #first derivative of function at bin center
fdblprime = 360*(moment2 - moment0*Delta**2/12)/Delta**4 #second derivative of function at bin center
offset = Delta**2*fdblprime/fprime/24 # offset*fprime(c_i) = Delta**2*fdblprime(c_i)/24
return centers + offset
def get_healpixel_footprint(ra, dec, nside, nest=False, count_threshold=None):
"""
Parameters
----------
ra : ndarray
RA in degrees
dec : ndarray
Dec in degrees
nside : int
number of healpixel nside, must be 2**k
nest : bool, optional
using healpixel nest or ring ordering
count_threshold : None or int (optional)
minimal number of points within a healpixel to count as part of the footprint
Returns
-------
pixels : ndarray
1d array that contains healpixel IDs
"""
pixels = hp.ang2pix(nside, ra, dec, nest=nest, lonlat=True)
if count_threshold and count_threshold > 1:
pixels, counts = np.unique(pixels, return_counts=True)
return pixels[counts >= count_threshold]
return np.unique(pixels)
def generate_uniform_random_ra_dec_min_max(n, ra_min, ra_max, dec_min, dec_max):
"""
Parameters
----------
n : int
number of random points needed
ra_min, ra_max, dec_min, dec_max: float
min and max of ra and dec
Returns
-------
ra : ndarray
1d array of length n that contains RA in degrees
dec : ndarray
1d array of length n that contains Dec in degrees
"""
ra = np.random.uniform(ra_min, ra_max, size=n)
dec = np.random.uniform(np.sin(np.deg2rad(dec_min)), np.sin(np.deg2rad(dec_max)), size=n)
dec = np.arcsin(dec, out=dec)
dec = np.rad2deg(dec, out=dec)
return ra, dec
def generate_uniform_random_ra_dec(n):
"""
Parameters
----------
n : int
number of random points needed
Returns
-------
ra : ndarray
1d array of length n that contains RA in degrees
dec : ndarray
1d array of length n that contains Dec in degrees
"""
return generate_uniform_random_ra_dec_min_max(n, 0, 360.0, -90.0, 90.0)
def generate_uniform_random_ra_dec_healpixel(n, pix, nside, nest=False):
"""
Parameters
----------
n : int
number of random points needed
pix : int
healpixel ID
nside : int
number of healpixel nside, must be 2**k
nest : bool, optional
using healpixel nest or ring ordering
Returns
-------
ra : ndarray
1d array of length n that contains RA in degrees
dec : ndarray
1d array of length n that contains Dec in degrees
"""
ra, dec = hp.vec2ang(hp.boundaries(nside, pix, 1, nest=nest).T, lonlat=True)
ra_dec_min_max = ra.min(), ra.max(), dec.min(), dec.max()
ra = np.empty(n)
dec = np.empty_like(ra)
n_needed = n
while n_needed > 0:
ra_this, dec_this = generate_uniform_random_ra_dec_min_max(n_needed*2, *ra_dec_min_max)
mask = np.where(hp.ang2pix(nside, ra_this, dec_this, nest=nest, lonlat=True) == pix)[0]
count_this = mask.size
if n_needed - count_this < 0:
count_this = n_needed
mask = mask[:n_needed]
s = slice(-n_needed, -n_needed+count_this if -n_needed+count_this < 0 else None)
ra[s] = ra_this[mask]
dec[s] = dec_this[mask]
n_needed -= count_this
return ra, dec
def generate_uniform_random_ra_dec_footprint(n, footprint=None, nside=None, nest=False):
"""
Parameters
----------
n : int
number of random points needed
footprint : 1d array, optional
unique healpixel IDs
nside : int, optional
number of healpixel nside as used in footprint, must be 2**k
nest : bool, optional
using healpixel nest or ring ordering
Returns
-------
ra : ndarray
1d array of length n that contains RA in degrees
dec : ndarray
1d array of length n that contains Dec in degrees
"""
if footprint is None or hp.nside2npix(nside) == len(footprint):
return generate_uniform_random_ra_dec(n)
n_per_pix_all = np.histogram(np.random.rand(n), np.linspace(0, 1, len(footprint)+1))[0]
ra = np.empty(n)
dec = np.empty_like(ra)
count = 0
for n_per_pix, pix in zip(n_per_pix_all, footprint):
ra_this, dec_this = generate_uniform_random_ra_dec_healpixel(n_per_pix, pix, nside, nest)
s = slice(count, count+n_per_pix)
ra[s] = ra_this
dec[s] = dec_this
count += n_per_pix
assert count == n
return ra, dec
def generate_uniform_random_dist(n, dlo, dhi):
"""
Parameters
----------
n : int
number of random points needed
dlo : float
lower distance
dhi : float
upper distance
Returns
-------
dist : ndarray
1d array of length n that contains distance
"""
d = np.random.rand(n)
d *= (dhi**3.0 - dlo**3.0)
d += dlo**3.0
d **= 1.0/3.0
return d
def first(iterable, default=None):
"""
returns the first element of `iterable`
"""
return next(iter(iterable), default)
def is_string_like(obj):
"""
test if `obj` is string like
"""
try:
obj + ''
except (TypeError, ValueError):
return False
return True
| 7,981
| 27.609319
| 152
|
py
|
descqa
|
descqa-master/descqa/DeltaSigmaTest.py
|
import os
import numpy as np
import treecorr
from scipy.interpolate import interp1d
from astropy import units as u
from astropy.coordinates import SkyCoord, search_around_sky
import astropy.constants as cst
from astropy.cosmology import WMAP7 # pylint: disable=no-name-in-module
from .base import BaseValidationTest, TestResult
from .plotting import plt
__all__ = ['DeltaSigma']
class DeltaSigma(BaseValidationTest):
"""
This validation test looks at galaxy-shear correlations by comparing DeltaSigma.
"""
def __init__(self, **kwargs):
# pylint: disable=super-init-not-called
# validation data
validation_filepath = os.path.join(self.data_dir, kwargs['data_filename'])
self.data = kwargs['data']
self.zmin_l = kwargs['zmin_l']
self.zmax_l = kwargs['zmax_l']
self.zmin_s = kwargs['zmin_s']
self.zmax_s = kwargs['zmax_s']
self.max_background_galaxies = int(float(kwargs['max_background_galaxies']))
self.zmax = kwargs['zmax']
self.Rmin = kwargs['Rmin']
self.Rmax = kwargs['Rmax']
self.nR = kwargs['nR']
self.validation_data = np.loadtxt(validation_filepath)
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
# pylint: disable=no-member
# Try to read cosmology from catalog, otherwise defualts to WMAP7
try:
cosmo = catalog_instance.cosmology
except AttributeError:
cosmo = WMAP7
# Create interpolation tables for efficient computation of sigma crit
z = np.linspace(0, self.zmax, int(self.zmax * 100) + 1)
d1 = cosmo.angular_diameter_distance(z) # in Mpc
angular_diameter_distance = interp1d(z, d1, kind='quadratic')
d2 = cosmo.comoving_transverse_distance(z) # in Mpc
comoving_transverse_distance = interp1d(z, d2, kind='quadratic')
# Now figure out the lenses, for the validation data available,
# each have slightly non-trivial cuts, so we do them separately... not totally ideal
if self.data == 'sdss_lowz':
# Singh et al (2015) (http://adsabs.harvard.edu/abs/2015MNRAS.450.2195S) measurements on the SDSS LOWZ sample.
res = catalog_instance.get_quantities(['redshift_true', 'ra', 'dec', 'shear_1', 'shear_2',
'mag_true_i_sdss', 'mag_true_z_sdss','mag_true_g_sdss', 'mag_true_r_sdss'])
# Compute mask for lowz sample
# These cuts are defined in section 3 of https://arxiv.org/pdf/1509.06529.pdf
# and summarised here: http://www.sdss.org/dr14/algorithms/boss_galaxy_ts/#TheBOSSLOWZGalaxySample
# Definition of auxiliary colors:
cperp = (res['mag_true_r_sdss'] - res['mag_true_i_sdss']) - (res['mag_true_g_sdss'] - res['mag_true_r_sdss'])/4.0 - 0.18
cpar = 0.7*(res['mag_true_g_sdss'] - res['mag_true_r_sdss']) + 1.2*((res['mag_true_r_sdss'] - res['mag_true_i_sdss'])-0.18)
# LOWZ selection cuts:
mask_lens = np.abs(cperp) < 0.2 # color boundaries
mask_lens &= res['mag_true_r_sdss'] < (13.5 + cpar/0.3) # sliding magnitude cut
mask_lens &= (res['mag_true_r_sdss'] > 16) &(res['mag_true_r_sdss'] < 19.6)
# Additional redshift cuts used in Singh et al. (2015)
mask_lens &= (res['redshift_true'] > self.zmin_l) & (res['redshift_true'] < self.zmax_l)
Mask_lens = [mask_lens]
fig = plt.figure()
if self.data == 'cfhtlens':
res = catalog_instance.get_quantities(['redshift_true', 'ra', 'dec', 'shear_1', 'shear_2',
'Mag_true_g_lsst_z0', 'Mag_true_r_lsst_z0'])
Mr_min = np.array([-21.0,-22.0,-23.0,-24.0])
Mr_max = np.array([-20.0,-21.5,-22.5,-23.5])
blue_frac = np.array([0.7,0.32,0.11,0.03])*100
gr = res['Mag_true_g_lsst_z0'] - res['Mag_true_r_lsst_z0'] # larger number means redder
Mask_lens = []
for i in range(4):
mask_lens = (res['redshift_true']>self.zmin_l) & (res['redshift_true']<self.zmax_l) & (res['Mag_true_r_lsst_z0']>Mr_min[i]) & (res['Mag_true_r_lsst_z0']<Mr_max[i])
gr_threshold = np.percentile(gr[mask_lens], blue_frac[i])
Mask_lens.append(mask_lens & (gr>gr_threshold))
Mask_lens.append(mask_lens & (gr<gr_threshold))
fig1 = plt.figure(1, figsize=(12,9))
fig2 = plt.figure(2, figsize=(12,5))
if self.data == 'sdss_main':
res = catalog_instance.get_quantities(['redshift_true', 'ra', 'dec', 'shear_1', 'shear_2',
'mag_true_i_sdss', 'mag_true_z_sdss','mag_true_g_sdss', 'mag_true_r_sdss', 'stellar_mass_bulge', 'stellar_mass_disk','Mag_true_g_sdss_z0','Mag_true_r_sdss_z0'])
gr = res['Mag_true_g_sdss_z0'] - res['Mag_true_r_sdss_z0'] # larger number means redder
sm = res['stellar_mass_bulge'] + res['stellar_mass_disk']
SM_min = np.array([10,10.7,11.2,11.6])
SM_max = np.array([10.4,11.0,11.4,15.0])
Mask_lens = []
for i in range(4):
mask_lens = (res['redshift_true']>self.zmin_l) & (res['redshift_true']<self.zmax_l) & (res['mag_true_r_sdss']< 17.7) & (np.log10(sm)>SM_min[i]) & (np.log10(sm)<SM_max[i])
Mask_lens.append(mask_lens & (gr>0.7)) # for the data, 0.7 is used for k-correct colors at z=0.1
Mask_lens.append(mask_lens & (gr<0.7))
fig1 = plt.figure(1, figsize=(12,9))
fig2 = plt.figure(2, figsize=(12,5))
# Computing mask for source sample, this only serves to keep the number of galaxies managable
mask_source = (res['redshift_true'] > self.zmin_s) & (res['redshift_true'] < self.zmax_s)
inds = np.where(mask_source)[0]
if len(inds) > int(self.max_background_galaxies):
mask_source[inds[np.random.choice(len(inds),
size=len(inds) - int(self.max_background_galaxies),
replace=False)]] = False
coords = SkyCoord(ra=res['ra']*u.degree, dec=res['dec']*u.degree)
coords_s = coords[mask_source]
# run gammat in thin redshift bins, loop over lens bins of different stellar mass and colors
for i in range(len(Mask_lens)):
nlens = len(np.where(Mask_lens[i])[0]) / catalog_instance.sky_area
with open(os.path.join(output_dir, 'galaxy_density_'+str(self.data)+'.dat'), 'a') as f:
f.write('{} \n'.format(nlens))
# Create astropy coordinate objects
coords_l = coords[Mask_lens[i]]
# Search for neighbours
idx1, idx2, sep2d, _ = search_around_sky(coords_l, coords_s, 3.*u.deg)
# Computing sigma crit for each pair
zl = res['redshift_true'][Mask_lens[i]][idx1]
zs = res['redshift_true'][mask_source][idx2]
# Warning: this assumes a flat universe
# See http://docs.astropy.org/en/v0.3/_modules/astropy/cosmology/core.html#FLRW.angular_diameter_distance_z1z2
dm1 = comoving_transverse_distance(zl)
dm2 = comoving_transverse_distance(zs)
angular_diameter_distance_z1z2 = u.Quantity((dm2 - dm1)/(1. + zs), u.Mpc)
sigcrit = cst.c**2 / (4.*np.pi*cst.G) * angular_diameter_distance(zs) / \
((1. + zl)**2. * angular_diameter_distance_z1z2 * angular_diameter_distance(zl))
# NOTE: the validation data is in comoving coordinates, the next few
# lines take care of proper unit conversions
# Apply unit conversion to obtain sigma crit in h Msol /pc^2 (comoving)
cms = u.Msun / u.pc**2
sigcrit = sigcrit*(u.kg/(u.Mpc* u.m)).to(cms) / cosmo.h
# Computing the projected separation for each pairs, in Mpc/h (comoving)
r = sep2d.rad*angular_diameter_distance(zl)*(1. + zl) * cosmo.h
# Computing the tangential shear
thetac = np.arctan2((coords_s[idx2].dec.rad - coords_l[idx1].dec.rad) / np.cos((coords_s[idx2].dec.rad + coords_l[idx1].dec.rad) / 2.0),coords_s[idx2].ra.rad - coords_l[idx1].ra.rad)
gammat = -(res['shear_1'][mask_source][idx2] * np.cos(2*thetac) - res['shear_2'][mask_source][idx2] * np.sin(2*thetac))
# Binning the tangential shear
bins = np.logspace(np.log10(self.Rmin), np.log10(self.Rmax), self.nR, endpoint=True)
counts = np.histogram(r, bins=bins)[0]
gt, b = np.histogram(r, bins=bins, weights=gammat*sigcrit)
rp = 0.5*(b[1:]+b[:-1])
gt = gt/counts
outfile = os.path.join(output_dir, 'DS_'+str(self.data)+'_'+str(i)+'.dat')
np.savetxt(outfile, np.vstack((rp, gt)).T)
if self.data == 'sdss_lowz':
ax = plt.subplot(111)
plt.errorbar(self.validation_data[:,0], self.validation_data[:,1], yerr=self.validation_data[:,2], label='SDSS LOWZ from Singh et al. (2015)',c='k', lw=1, marker='.', fmt='.', capthick=0.8, capsize=2.2)
plt.loglog(rp, gt, label=catalog_name)
plt.title('Lens number density: '+str(nlens)[:4]+' per sq. deg')
ax.set_xlabel('$r_p$ [Mpc/h]')
ax.set_ylabel(r'$\Delta \Sigma [h \ M_\odot / pc^2]$')
ax.set_xlim(self.Rmin*0.7, self.Rmax*1.3)
ax.set_ylim(0.5, 100)
if self.data == 'cfhtlens':
ii = np.mod(i,2)
iii = int(i/2)
plt.figure(1)
ax = plt.subplot(2,2,iii+1)
if ii==0:
plt.loglog(rp, gt, label=str(Mr_min[int(i/2)])+'< Mr < '+str(Mr_max[int(i/2)])+'; red; '+catalog_name, lw=2, color='r', alpha=0.5)
plt.errorbar(self.validation_data[:,0]/1000*(7./10.), self.validation_data[:,iii*2+1]/(7./10.), color='darkred', lw=2, marker='x', fmt='.', label='Velander et al. (2013)')
plt.text(self.Rmin*0.7*1.5, 1.5,'Red: '+str(nlens)[:4]+' per sq. deg')
else:
plt.loglog(rp, gt, label=str(Mr_min[int(i/2)])+'< Mr < '+str(Mr_max[int(i/2)])+'; blue', lw=2, color='b', alpha=0.5)
plt.errorbar(self.validation_data[:,0]/1000*(7./10.), self.validation_data[:,iii*2+2]/(7./10.), color='darkblue', lw=2, marker='x', fmt='.')
plt.title('Lens number density: '+str(nlens)[:4]+' per sq. deg')
plt.text(self.Rmin*0.7*1.5, 1.0,'Blue: '+str(nlens)[:4]+' per sq. deg')
ax.legend()
ax.set_xlabel('$r_p$ [Mpc/h]')
ax.set_ylabel(r'$\Delta \Sigma [h \ M_\odot / pc^2]$')
ax.set_xlim(self.Rmin*0.7, self.Rmax*1.3)
ax.set_ylim(0.5, 1000)
plt.tight_layout()
plt.figure(2)
ax = plt.subplot(1,2,ii+1)
plt.loglog(rp, gt, label='['+str(Mr_min[int(i/2)])+', '+str(Mr_max[int(i/2)])+']')
if ii==0:
plt.title('red')
else:
plt.title('blue')
if i==(len(Mask_lens)-1):
plt.legend()
ax.set_xlabel('$r_p$ [Mpc/h]')
ax.set_ylabel(r'$\Delta \Sigma [h \ M_\odot / pc^2]$')
ax.set_xlim(self.Rmin*0.7, self.Rmax*1.3)
ax.set_ylim(0.5, 500)
if self.data=='sdss_main':
ii = np.mod(i,2)
iii = int(i/2)
plt.figure(1)
ax = plt.subplot(2,2,iii+1)
if ii==0:
plt.loglog(rp, gt, label=str(SM_min[int(i/2)])+'< log10(M*) < '+str(SM_max[int(i/2)])+'; red; '+catalog_name, lw=2, color='r', alpha=0.5)
plt.errorbar(self.validation_data[:15,0], self.validation_data[ii*15:(ii+1)*15,int(i/2)*4+1], yerr=self.validation_data[ii*15:(ii+1)*15,int(i/2)*4+2], color='darkred', lw=2, marker='x', fmt='.', label='Mandelbaum et al. (2016)')
plt.text(self.Rmin*0.7*1.5, 1.5,'Red: '+str(nlens)[:4]+' per sq. deg')
else:
plt.loglog(rp, gt, label=str(SM_min[int(i/2)])+'< log10(M*) < '+str(SM_max[int(i/2)])+'; blue', lw=2, color='b', alpha=0.5)
plt.errorbar(self.validation_data[:15,0], self.validation_data[ii*15:(ii+1)*15,int(i/2)*4+1], yerr=self.validation_data[ii*15:(ii+1)*15,int(i/2)*4+2], color='darkblue', lw=2, marker='x', fmt='.')
plt.text(self.Rmin*0.7*1.5, 1,'Blue: '+str(nlens)[:4]+' per sq. deg')
ax.legend()
ax.set_xlabel('$r_p$ [Mpc/h]')
ax.set_ylabel(r'$\Delta \Sigma [h \ M_\odot / pc^2]$')
ax.set_xlim(self.Rmin*0.7, self.Rmax*1.3)
ax.set_ylim(0.5, 1000)
plt.tight_layout()
plt.figure(2)
ax = plt.subplot(1,2,ii+1)
plt.loglog(rp, gt, label='['+str(SM_min[int(i/2)])+', '+str(SM_max[int(i/2)])+']')
if ii==0:
plt.title('red')
else:
plt.title('blue')
if i==(len(Mask_lens)-1):
plt.legend()
ax.set_xlabel('$r_p$ [Mpc/h]')
ax.set_ylabel(r'$\Delta \Sigma [h \ M_\odot / pc^2]$')
ax.set_xlim(self.Rmin*0.7, self.Rmax*1.3)
ax.set_ylim(0.5, 500)
plt.tight_layout()
print(self.data)
if self.data=='cfhtlens' or self.data=='sdss_main':
fig1.savefig(os.path.join(output_dir, 'delta_sigma_'+str(catalog_name)+'1.png'))
plt.close(fig1)
fig2.savefig(os.path.join(output_dir, 'delta_sigma_'+str(catalog_name)+'2.png'))
plt.close(fig2)
else:
fig.savefig(os.path.join(output_dir, 'delta_sigma_'+str(catalog_name)+'.png'))
plt.close(fig)
return TestResult(inspect_only=True)
| 14,192
| 47.773196
| 248
|
py
|
descqa
|
descqa-master/descqa/CheckColors.py
|
from __future__ import print_function, unicode_literals, absolute_import, division
import os
import sys
import re
import numpy as np
import numexpr as ne
from .base import BaseValidationTest, TestResult
from .plotting import plt
from astropy.table import Table
from scipy.spatial import distance_matrix
import ot
from numba import jit
import matplotlib as mpl
__all__ = ['CheckColors']
# Transformations of DES -> SDSS and DES -> CFHT are derived from Equations A9-12 and
# A19-22 the paper: arxiv.org/abs/1708.01531
# Transformations of SDSS -> CFHT are from:
# http://www.cadc-ccda.hia-iha.nrc-cnrc.gc.ca/en/megapipe/docs/filt.html
color_transformation = {'des2sdss': {}, 'des2cfht': {}, 'sdss2cfht': {}, 'lsst2cfht': {}, 'lsst2sdss':{}, 'sdss2lsst':{}, 'cfht2sdss':{}, 'cfht2lsst':{}}
color_transformation['des2sdss']['g'] = '1.10421 * g - 0.104208 * r'
color_transformation['des2sdss']['r'] = '0.102204 * g + 0.897796 * r'
color_transformation['des2sdss']['i'] = '1.30843 * i - 0.308434 * z'
color_transformation['des2sdss']['z'] = '0.103614 * i + 0.896386 * z'
color_transformation['des2cfht']['g'] = '0.945614 * g + 0.054386 * r'
color_transformation['des2cfht']['r'] = '0.0684211 * g + 0.931579 * r'
color_transformation['des2cfht']['i'] = '1.18646 * i - 0.186458 * z'
color_transformation['des2cfht']['z'] = '0.144792 * i + 0.855208 * z'
color_transformation['sdss2cfht']['u'] = 'u - 0.241 * (u - g)'
color_transformation['sdss2cfht']['g'] = 'g - 0.153 * (g - r)'
color_transformation['sdss2cfht']['r'] = 'r - 0.024 * (g - r)'
color_transformation['sdss2cfht']['i'] = 'i - 0.085 * (r - i)'
color_transformation['sdss2cfht']['z'] = 'z + 0.074 * (i - z)'
color_transformation['cfht2sdss']['u'] = 'u + 0.342 * (u - g)'
color_transformation['cfht2sdss']['g'] = 'g + 0.014 + 0.133 * (g - r) + 0.031 * (g - r) * (g - r)'
color_transformation['cfht2sdss']['r'] = 'r + 0.05 * (r - i)'
color_transformation['cfht2sdss']['i'] = 'i + 0.087 * (r - i)'
color_transformation['cfht2sdss']['z'] = 'z - 0.057 * (i - z)'
#these were derived from cosmoDC2 GCRCatalogs version = 0.14.4
color_transformation['lsst2sdss']['u'] = '0.203 * (u - g) + u + 0.04'
color_transformation['lsst2sdss']['g'] = '0.119 * (g - r) + g + 0.001'
color_transformation['lsst2sdss']['r'] = '0.025 * (r - i) + r + 0.001'
color_transformation['lsst2sdss']['i'] = '0.013 * (i - z) + i + 0.001'
color_transformation['lsst2sdss']['z'] = '-0.031 * (z - y) + z + 0.001'
color_transformation['sdss2lsst']['u'] = '0.932 * u + 1.865'
color_transformation['sdss2lsst']['g'] = '-0.11 * (g - r) + g + 0.001'
color_transformation['sdss2lsst']['r'] = '-0.026 * (r - i) + r - 0.001'
color_transformation['sdss2lsst']['i'] = '-0.01 * (i - z) + i'
color_transformation['sdss2lsst']['z'] = '1.001 * z + 0.043'
#for these I combined the transformations above, CFHT actually should be MegaCam
color_transformation['cfht2lsst']['u'] = '1.251 * u - 0.319 * g + 1.865'
color_transformation['cfht2lsst']['g'] = 'g + 0.00837 * (g - r) + 0.028 * (g - r) * (g - r) + 0.0055 * (r - i) + 0.013'
color_transformation['cfht2lsst']['r'] = 'r - 0.02 * (r - i) - 0.001'
color_transformation['cfht2lsst']['i'] = 'i + 0.086 * (r - i) - 0.00943 * (i - z)'
color_transformation['cfht2lsst']['z'] = '1.058 * z - 0.057 * i + 0.043'
class kernelCompare:
def __init__(self,D1, D2):
self._D1 = D1
self._D2 = D2
self._XY = np.vstack((D1, D2))
self._scale = self._computeScale(self._XY)
self._n1 = len(D1)
self._n2 = len(D2)
def _computeScale(self,XY):
'''Compute and determine the kernel parameter by
mean absolute deviation
'''
Z = XY - np.mean(XY,0)
Z = np.abs(Z)
scaleXY = np.median(Z, 0)
return scaleXY
def _rbf(self,z1, z2):
diff = z1 - z2
diff /= self._scale
diffSq = np.sum(diff * diff,1)
res = np.exp(-diffSq)
return res
@staticmethod
@jit(nopython=True)
def _MMD2ufast( X, Y, scale):
'''Compute the unbiased MMD2u statistics in the paper.
$$Ek(x,x') + Ek(y,y') - 2Ek(x,y)$$
This function implemnts a fast version in linear time.
'''
n1 = len(X)
n2 = len(Y)
k1 = 0.0
for i in range(n1-1):
diff = (X[i,:] - X[i+1,:])/scale
diffSq = np.sum(diff * diff)
k1 += np.exp(-diffSq)
k1 /= n1 - 1
k2 = 0.0
for i in range(n2-1):
diff = (Y[i,:] - Y[i+1,:])/scale
diffSq = np.sum(diff * diff)
k2 += np.exp(-diffSq)
k2 /= n2 - 1
k3 = 0.0
p = min(n1, n2)
for i in range(p):
diff = (X[i,:] - Y[i,:])/scale
diffSq = np.sum(diff * diff)
k3 += np.exp(-diffSq)
k3 /= p
result = k1 + k2 - 2*k3
return result
def _compute_null_dist(self,iterations=500):
'''Compute the bootstrap null-distribution of MMD2u.
'''
mmd2u_null = np.zeros(iterations)
for i in range(iterations):
idx = np.random.permutation(self._n1 + self._n2)
XY_i = self._XY[idx, :]
mmd2u_null[i] = self._MMD2ufast(XY_i[:self._n1,:], XY_i[self._n1:,], self._scale)
return mmd2u_null
def compute(self,iterations=500):
'''Compute MMD^2_u, its null distribution and the p-value of the
kernel two-sample test.
'''
mmd2u = self._MMD2ufast(self._D1, self._D2, self._scale)
mmd2u_null = self._compute_null_dist(iterations)
p_value = max(1.0/iterations,
(mmd2u_null > mmd2u).sum() /float(iterations))
return mmd2u, p_value
def plotDiff(self, coord1, coord2):
v0min = np.min(self._XY[:,coord1])
v1min = np.min(self._XY[:,coord2])
v0max = np.max(self._XY[:,coord1])
v1max = np.max(self._XY[:,coord2])
nSeq = 50
xSeq = np.linspace(v0min, v0max, nSeq)
ySeq = np.linspace(v1min, v1max, nSeq)
#xySeq = np.array(np.meshgrid(xSeq, ySeq)).T.reshape(-1,2)
fGrid = np.zeros((nSeq, nSeq))
znew = np.mean(self._XY, 0)
for i in range(nSeq):
for j in range(nSeq):
znew[coord1] = xSeq[i]
znew[coord2] = ySeq[j]
#fGrid[i,j] = xSeq[i] * xSeq[i] ySeq[j] * ySeq[j]
fpart1 = np.mean(self._rbf(znew, self._D1))
fpart2 = np.mean(self._rbf(znew, self._D2))
fGrid[i,j] = fpart1 - fpart2
fig, ax = plt.subplots()
vmax = np.max(np.abs(fGrid))
vmax = max(vmax, 0.0005)
cs = plt.contourf(xSeq, ySeq, fGrid.T,
cmap = plt.get_cmap("RdBu"),
norm = mpl.colors.Normalize(vmin=-vmax, vmax=vmax))
fig.colorbar(cs, ax=ax, shrink=0.9)
def wass1dim(data1, data2, numBins = 200):
''' Compare two one-dimensional arrays by the
Wasserstein metric (https://en.wikipedia.org/wiki/Wasserstein_metric).
The input data should have outliers removed.
Parameters
----------
data1, data2: two one-dimensional arrays to compare.
numBins: the number of bins.
Outputs
-------
result: the computed Wasserstein metric.
'''
numBins = 200 ## number of bins
upper = np.max( (data1.max(), data2.max() ) )
lower = np.min( (data1.min(), data2.min() ) )
xbins = np.linspace(lower, upper, numBins + 1)
density1, _ = np.histogram(data1, density = False, bins = xbins)
density2, _ = np.histogram(data2, density = False, bins = xbins)
density1 = density1 / np.sum(density1)
density2 = density2 / np.sum(density2)
# pairwise distance matrix between bins
distMat = distance_matrix(xbins[1:].reshape(numBins,1),
xbins[1:].reshape(numBins,1))
M = distMat
T = ot.emd(density1, density2, M) # optimal transport matrix
result = np.sum(T*M) # the objective data
return result
def CompareDensity(data1, data2):
''' Compare two multi-dimensional arrays by the
Wasserstein metric (https://en.wikipedia.org/wiki/Wasserstein_metric).
The input data should have outliers removed before applying this function.
The multidimensional input data is projected onto multiple directions.
The Wasserstein metric is computed on each projected result.
This function returns the averaged metrics and its standard error.
Parameters
----------
data1: the first multi-dimensional dataset. Each row is
an observation. Each column is a covariate.
data2: the second multi-dimensional dataset.
numBins: the number of bins.
K: the number of trial random projections.
Outputs
-------
mu, sigma: the average discrepancy measure and its standard error.
'''
K = 40 #4000
result = np.zeros(K)
pCovariate = data1.shape[1]
for i in range(K):
# random projection onto one dimension
transMat = np.random.normal(size = (pCovariate, 1))
transMat = transMat / np.linalg.norm(transMat, 'fro')
data1_proj = data1 @ transMat
data2_proj = data2 @ transMat
# record the discrepency on the projected dimension
# between two datasets.
result[i] = wass1dim(data1_proj, data2_proj)
return result.mean(), result.std()/np.sqrt(K)
class CheckColors(BaseValidationTest):
"""
Inspection test to represent 2D color plots
"""
def __init__(self, **kwargs): # pylint: disable=W0231
self.kwargs = kwargs
self.test_name = kwargs.get('test_name', 'CheckColors')
self.mag_fields_to_check = kwargs['mag_fields_to_check']
self.redshift_cut = kwargs['redshift_cut']
self.validation_catalog = kwargs['validation_catalog']
self.redshift_cut_val = kwargs['redshift_cut_val']
self.mag_fields_val = kwargs['mag_fields_val']
self.path_val = kwargs['path_val']
self.truncate_cat_name = kwargs.get('truncate_cat_name', False)
self.truncate_z_label = kwargs.get('truncate_z_label', False)
self.truncate_color_labels = kwargs.get('truncate_color_labels', False)
if len(kwargs['xcolor']) != 2 or len(kwargs['ycolor']) != 2:
print('Warning: color string is longer than 2 characters. Only first and second bands will be used.')
self.xcolor = kwargs['xcolor']
self.ycolor = kwargs['ycolor']
self.bands = set(kwargs['xcolor'] + kwargs['ycolor'])
self.bands_val = kwargs['bands_val']
self.zlo = kwargs['zlo']
self.zhi = kwargs['zhi']
self.zbins = kwargs['zbins']
self.magcut = kwargs['magcut']
self.magcut_band = kwargs['magcut_band']
self.levels = kwargs['levels']
self.kernel_iterations = kwargs['kernel_iterations']
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
has_results = False
redshift_bins = np.linspace(self.zlo, self.zhi, num=self.zbins+1)
cat_path = os.path.join(self.external_data_dir, self.path_val)
catval = Table.read(cat_path)
labels_val = {band: self.mag_fields_val.format(band) for band in self.bands_val}
datamag_val = {k: catval[v] for k, v in labels_val.items()}
camlist = ['lsst','des','cfht','sdss']
filter_this = None
# plot on both this plot and any summary plots
if self.truncate_cat_name:
catalog_name = re.split('_', catalog_name)[0]
for mag_field in self.mag_fields_to_check:
for cam in camlist:
if cam in mag_field:
filter_this = cam
quantity_list = [mag_field.format(band) for band in self.bands]
quantity_list.append(self.redshift_cut)
if not catalog_instance.has_quantities(quantity_list):
print('Catalog is missing a quantity from',quantity_list)
continue
dataall = catalog_instance.get_quantities(quantity_list)
#labels = {band: mag_field.format(band) for band in self.bands}
#datamag = {k: dataall[v] for k, v in labels.items()}
### Color transformation
color_trans = None
color_trans_name = None
if self.validation_catalog == 'DEEP2' and filter_this != 'lsst' and filter_this != 'cfht':
color_trans_name = '{}2cfht'.format(filter_this) #not sure this is right
elif self.validation_catalog == 'DEEP2' and filter_this == 'lsst':
color_trans_name = 'cfht2lsst'
elif self.validation_catalog == 'SDSS' and filter_this == 'des':
color_trans_name = 'des2sdss' #not sure this is right
elif self.validation_catalog == 'SDSS' and filter_this == 'lsst':
color_trans_name = 'sdss2lsst'
if color_trans_name:
color_trans = color_transformation[color_trans_name]
filter_title = r'\mathrm{{{}}}'.format(filter_this.upper())
if color_trans:
#print('Transforming from %s to %s\n' % (self.validation_catalog,filter_this))
datamag_val_transformed = {}
for band in self.bands_val:
try:
datamag_val_transformed[band] = ne.evaluate(color_trans[band], local_dict=datamag_val, global_dict={})
except KeyError:
continue
filter_title = (r'{}\rightarrow\mathrm{{{}}}'.format(filter_title, self.validation_catalog)
if datamag_val_transformed else filter_title)
datamag_val_transformed['redshift'] = catval[self.redshift_cut_val] #to avoid confusion between z and redshift
catval = datamag_val_transformed
del datamag_val_transformed
del datamag_val
else:
datamag_val['redshift'] = catval[self.redshift_cut_val]
catval = datamag_val
del datamag_val
for i,zlo in enumerate(redshift_bins):
if i == len(redshift_bins)-1:
continue
zhi = redshift_bins[i+1]
mask = (dataall[self.redshift_cut] > zlo) & (dataall[self.redshift_cut] < zhi) & (dataall[mag_field.format(self.magcut_band)] < self.magcut)
mask_val = (catval['redshift'] > zlo) & (catval['redshift'] < zhi) & (catval[self.magcut_band] < self.magcut)
try:
xcolor = np.array(dataall[mag_field.format(self.xcolor[0])][mask] - dataall[mag_field.format(self.xcolor[1])][mask])
ycolor = np.array(dataall[mag_field.format(self.ycolor[0])][mask] - dataall[mag_field.format(self.ycolor[1])][mask])
xcolor_val = np.array(catval['{}'.format(self.xcolor[0])][mask_val] - catval['{}'.format(self.xcolor[1])][mask_val])
ycolor_val = np.array(catval['{}'.format(self.ycolor[0])][mask_val] - catval['{}'.format(self.ycolor[1])][mask_val])
except KeyError:
print('Key not found')
sys.exit()
has_results = True
### plot hexbin plot for catalog
fig, ax = plt.subplots()
hb = ax.hexbin(xcolor, ycolor, gridsize=(100), cmap='GnBu', mincnt=1, bins='log')
cb = fig.colorbar(hb, ax=ax)
cb.set_label(catalog_name)
# plot contour plot for validation
xmin = -1.0
xmax = 1.5
ymin = -0.5
ymax = 3.0
hrange = [[xmin,xmax],[ymin,ymax]]
counts,xbins,ybins = np.histogram2d(xcolor_val,ycolor_val,range=hrange,bins=[30,30])
print(xbins,ybins)
cntr1 = ax.contour(counts.transpose(), extent=[xmin,xmax,ymin,ymax],
colors='black',linestyles='solid',levels=self.levels)
ax.clabel(cntr1, inline=True, fmt='%1.1f', fontsize=10)
h1,_ = cntr1.legend_elements()
### CompareDensity block (Wasserstein metric)
simdata = np.column_stack([xcolor,ycolor])
valdata = np.column_stack([xcolor_val,ycolor_val])
cd = CompareDensity(simdata,valdata)
print('Compare density with Wasserstein metric',cd)
### kernel comparison block
obj = kernelCompare(simdata, valdata)
MMD, pValue = obj.compute(iterations=self.kernel_iterations)
print("MMD statistics is {}".format(MMD))
print("The p-value of the test is {}".format(pValue))
if self.truncate_color_labels:
ax.set_xlabel('${} - {}$'.format(self.xcolor[0], self.xcolor[1]))
ax.set_ylabel('${} - {}$'.format(self.ycolor[0], self.ycolor[1]))
else:
ax.set_xlabel('{} - {}'.format(mag_field.format(self.xcolor[0]), mag_field.format(self.xcolor[1])))
ax.set_ylabel('{} - {}'.format(mag_field.format(self.ycolor[0]), mag_field.format(self.ycolor[1])))
if self.truncate_z_label:
title = '${:.2} < z < {:.2}$'.format(zlo, zhi)
else:
title = "{} = {:.2} - {:.2}".format(self.redshift_cut, zlo, zhi)
ax.text(0.05, 0.95, title, transform=ax.transAxes,
verticalalignment='top', color='black', fontsize='small')
title1 = "Compare metric {:.4} +- {:.4}".format(cd[0],cd[1])
title2 = "Kernel comparison MMD {:.4}".format(MMD)
title3 = "p-value = {:.3}".format(pValue)
ax.text(0.05, 0.85, title1, transform=ax.transAxes,
verticalalignment='top', color='black', fontsize='small')
ax.text(0.05, 0.80, title2, transform=ax.transAxes,
verticalalignment='top', color='black', fontsize='small')
ax.text(0.05, 0.75, title3, transform=ax.transAxes,
verticalalignment='top', color='black', fontsize='small')
#ax.set_title('{} vs {}'.format(catalog_name, self.validation_catalog))
plt.legend([h1[0]], [self.validation_catalog], loc=4)
fig.tight_layout()
fig.savefig(os.path.join(output_dir, '{}_{}_{}_{}.png'.format(self.xcolor, self.ycolor,str(i),mag_field.replace('_{}_', '_'))))
plt.close(fig)
if not has_results:
return TestResult(skipped=True)
return TestResult(inspect_only=True)
| 19,030
| 45.079903
| 157
|
py
|
descqa
|
descqa-master/descqa/EmlineRatioTest.py
|
# pylint: disable=E1101,E0611,W0231,W0201
# E1101 throws errors on my setattr() stuff and astropy.units.W and astropy.units.Hz
# E0611 throws an error when importing astropy.cosmology.Planck15
# W0231 gives a warning because __init__() is not called for BaseValidationTest
# W0201 gives a warning when defining attributes outside of __init__()
from __future__ import unicode_literals, absolute_import, division
from os import path
import numpy as np
from astropy import units as u
from astropy.cosmology import Planck15 as cosmo
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from GCR import GCRQuery
from pandas import read_csv
from descqa import BaseValidationTest, TestResult
emline_names = {'ha': r'H$\alpha$', 'hb': r'H$\beta$', 'oii': '[OII]', 'oiii': '[OIII]'}
__all__ = ['EmlineRatioTest']
class EmlineRatioTest(BaseValidationTest):
"""
Validation test for the relaive luminosity of emission lines in a catalog
Parameters
----------
emline_ratio1: str, optional, (default: 'oii/oiii')
The emission line luminosity ratio to be plotted on the x-axis
emline_ratio2: str, optional, (default: 'hb/oiii')
The emission line luminosity ratio to be plotted on the y-axis
sdss_file: str, optional, (default: 'sdss_emission_lines/sdss_query_snr10_ew.csv')
Location of the SDSS data file that will be passed into the sdsscat class. Looks
in the 'data/' folder.
loz_lo: float, optional, (default: 0.0)
This test allows for magnitude cuts in different filters over two redshift regions.
This defines the low end of the low redshift region.
loz_hi: float, optional, (default: 0.0)
This test allows for magnitude cuts in different filters over two redshift regions.
This defines the high end of the low redshift region.
hiz_lo: float, optional, (default: 0.0)
This test allows for magnitude cuts in different filters over two redshift regions.
This defines the low end of the high redshift region.
hiz_hi: float, optional, (default: 0.0)
This test allows for magnitude cuts in different filters over two redshift regions.
This defines the high end of the high redshift region.
loz_band: str, optional, (default: 'r')
The band name to which to apply the low redshift magnitude limit.
hiz_band: str, optional, (default: 'i')
The band name to which to apply the high redshift magnitude limit.
loz_magcut: float, optional, (default: 19.5)
The magnitude cut applied to the band specified by loz_band.
hiz_magcut: float, optional, (default: 19.9)
The magnitude cut applied to the band specified by hiz_band.
sdss_drawnum: int, optional, (default: 30000)
The number of galaxies to draw from the SDSS data file to perform the comparison.
The default number is chosen to (hopefully) not make the 2-D KS test too stringent.
sim_drawnum: int, optional, (default: 30000)
The number of galaxies to draw from the simulated data to perform the comparison.
The default number is chosen to (hopefully) not make the 2-D KS test too stringent.
truncate_cat_name: Bool, optional, (default: False)
Specifies whether the catalog name displayed in the summary figure should be
shortened.
"""
def __init__(self, **kwargs):
np.random.seed(0)
# load test config options
self.kwargs = kwargs
self.emline_ratio1 = kwargs.get('emline_ratio1', 'oii/oiii') # Currently does not support other emission line ratios
self.emline_ratio2 = kwargs.get('emline_ratio2', 'hb/oiii') # Currently does not support other emission line ratios
sdss_file = kwargs.get('sdss_file', 'descqa/data/sdss_emission_lines/sdss_query_snr10_ew.csv')
self.sdsscat = sdsscat(sdss_file)
# The magnitude cuts for galaxies pulled from the catalog. These numbers correspond to
# SDSS spectroscopic detection limits in https://arxiv.org/pdf/1207.7326.pdf
self.loz_lo = kwargs.get('lowz_lo', 0.0)
self.loz_hi = kwargs.get('lowz_hi', 0.4)
self.hiz_lo = kwargs.get('hiz_lo', 0.4)
self.hiz_hi = kwargs.get('hiz_hi', 0.7)
self.loz_band = 'mag_' + kwargs.get('loz_band', 'r') + '_lsst'
self.hiz_band = 'mag_' + kwargs.get('hiz_band', 'i') + '_lsst'
self.loz_magcut = kwargs.get('loz_magcut', 19.5)
self.hiz_magcut = kwargs.get('hiz_magcut', 19.9)
self.ha_cut = kwargs.get('ha_cut', 2.6e7)
# These numbers dictate how large the two samples will be. I have found that
# if the numbers get much larger than this, the 2-D KS test becomes more discriminatory
# than desired, but they can be changed if necessary
self.sdss_drawnum = kwargs.get('sdss_drawnum', 30000)
self.sim_drawnum = kwargs.get('sim_drawnum', 30000)
self.figlist = []
self.runcat_name = []
self.truncate_cat_name = kwargs.get('truncate_cat_name', False)
self.legend_size = kwargs.get('legend_size', 14)
self.vmax = kwargs.get('vmax', 1.5e3)
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
#=========================================
# Begin Reading in Data
#=========================================
# check if needed quantities exist
if not catalog_instance.has_quantities(['redshift_true',
self.loz_band,
self.hiz_band,
'emissionLines/totalLineLuminosity:oxygenII3726',
'emissionLines/totalLineLuminosity:oxygenII3729',
'emissionLines/totalLineLuminosity:balmerAlpha6563',
'emissionLines/totalLineLuminosity:balmerBeta4861',
'emissionLines/totalLineLuminosity:nitrogenII6584',
'emissionLines/totalLineLuminosity:oxygenIII4959',
'emissionLines/totalLineLuminosity:oxygenIII5007',
'emissionLines/totalLineLuminosity:sulfurII6716',
'emissionLines/totalLineLuminosity:sulfurII6731']):
return TestResult(skipped=True, summary='Necessary quantities are not present')
loz_filter = GCRQuery((np.isfinite, 'redshift_true'), 'redshift_true > %f' % self.loz_lo, 'redshift_true < %f' % self.loz_hi)
hiz_filter = GCRQuery((np.isfinite, 'redshift_true'), 'redshift_true > %f' % self.hiz_lo, 'redshift_true < %f' % self.hiz_hi)
loz_magcut_filter = GCRQuery((np.isfinite, self.loz_band), self.loz_band + ' < %.1f' % self.loz_magcut)
hiz_magcut_filter = GCRQuery((np.isfinite, self.hiz_band), self.hiz_band + ' < %.1f' % self.hiz_magcut)
ha_fluxlim = GCRQuery((np.isfinite, 'emissionLines/totalLineLuminosity:balmerAlpha6563'),(lambda x: x > self.ha_cut, 'emissionLines/totalLineLuminosity:balmerAlpha6563'))
data = catalog_instance.get_quantities(['redshift_true',
'emissionLines/totalLineLuminosity:oxygenII3726',
'emissionLines/totalLineLuminosity:oxygenII3729',
'emissionLines/totalLineLuminosity:balmerAlpha6563',
'emissionLines/totalLineLuminosity:balmerBeta4861',
'emissionLines/totalLineLuminosity:nitrogenII6584',
'emissionLines/totalLineLuminosity:oxygenIII4959',
'emissionLines/totalLineLuminosity:oxygenIII5007',
'emissionLines/totalLineLuminosity:sulfurII6716',
'emissionLines/totalLineLuminosity:sulfurII6731',
self.loz_band,
self.hiz_band], filters=((loz_filter & loz_magcut_filter) | (hiz_filter & hiz_magcut_filter) & ha_fluxlim))
# data = data[data['emissionLines/totalLineLuminosity:balmerAlpha6563'] > self.ha_cut]
z = data['redshift_true']
Halpha = (data['emissionLines/totalLineLuminosity:balmerAlpha6563'] * 3.839e26*u.W).value
Hbeta = (data['emissionLines/totalLineLuminosity:balmerBeta4861'] * 3.839e26*u.W).value
NII6584 = (data['emissionLines/totalLineLuminosity:nitrogenII6584'] * 3.839e26*u.W).value
OIII5007 = (data['emissionLines/totalLineLuminosity:oxygenIII5007'] * 3.839e26*u.W).value
OIII4959 = (data['emissionLines/totalLineLuminosity:oxygenIII4959'] * 3.839e26*u.W).value
OII3726 = (data['emissionLines/totalLineLuminosity:oxygenII3726'] * 3.839e26*u.W).value
OII3729 = (data['emissionLines/totalLineLuminosity:oxygenII3729'] * 3.839e26*u.W).value
SII6716 = (data['emissionLines/totalLineLuminosity:sulfurII6716'] * 3.839e26*u.W).value
SII6731 = (data['emissionLines/totalLineLuminosity:sulfurII6731'] * 3.839e26*u.W).value
SIItot = SII6716 + SII6731
OIIItot = OIII5007 + OIII4959
OIItot = OII3726 + OII3729
# Reduce the sample size by drawing self.sim_drawnum galaxies
# indices = np.random.choice(np.arange(len(Halpha)), size=self.sim_drawnum, replace=False)
indices = self.sdsscat.drawinds(z, size = self.sim_drawnum, catname = catalog_name)
self.z = z[indices]
self.ha = Halpha[indices]
self.hb = Hbeta[indices]
self.oii = OIItot[indices]
self.oiii = OIIItot[indices]
self.nii6584 = NII6584[indices]
self.oiii5007 = OIII5007[indices]
self.oiii4959 = OIII4959[indices]
self.oii3726 = OII3726[indices]
self.oii3729 = OII3729[indices]
self.sii6716 = SII6716[indices]
self.sii6731 = SII6731[indices]
self.siitot = SIItot[indices]
#=========================================
# End Reading in Data
#=========================================
#=========================================
# Perform the Test and Return Results
#=========================================
if self.truncate_cat_name:
thisfig, pvalue, medianshift = self.makeplot(catalog_name.split('_')[0])
else:
thisfig, pvalue, medianshift = self.makeplot(catalog_name)
self.figlist.append(thisfig)
self.runcat_name.append(catalog_name)
if np.log10(pvalue) >= -4. and np.linalg.norm(medianshift) <= 0.25:
return TestResult(pvalue, passed=True)
elif np.linalg.norm(medianshift) <= 0.25:
return TestResult(pvalue, passed=False, summary='P-value must exceed 1e-4.')
elif np.log10(pvalue) >= -4.:
return TestResult(pvalue, passed=False, summary='Total median shift must be less than or equal to 0.25 dex.')
else:
return TestResult(pvalue, passed=False, summary='P-value must exceed 1e-4 and total median shift must be less than or equal to 0.25 dex.')
def makeplot(self, catalog_name):
"""
Make a summary plot of the test results
"""
#=========================================
# Begin Test and Plotting
#=========================================
fig = plt.figure(figsize=(16, 6.5))
sp1 = fig.add_subplot(121)
sp2 = fig.add_subplot(122)
dist1 = [[], []]
dist2 = [[], []]
xlabel = ''
ylabel = ''
# Generate each distribution
# dist1 is SDSS data
# dist2 is simulation data
for cat, dist in [[self.sdsscat, dist1], [self, dist2]]:
emline1 = getattr(cat, self.emline_ratio1.split('/')[0])
emline2 = getattr(cat, self.emline_ratio1.split('/')[1])
er1 = np.log10(emline1/emline2)
emline1 = getattr(cat, self.emline_ratio2.split('/')[0])
emline2 = getattr(cat, self.emline_ratio2.split('/')[1])
er2 = np.log10(emline1/emline2)
good_inds = np.where(np.isfinite(er1) & np.isfinite(er2))
dist[0] = er1[good_inds]
dist[1] = er2[good_inds]
xlabel = r'$\log_{10}$(' + emline_names[self.emline_ratio1.split('/')[0]] + '/' + emline_names[self.emline_ratio1.split('/')[1]] + ')'
ylabel = r'$\log_{10}$(' + emline_names[self.emline_ratio2.split('/')[0]] + '/' + emline_names[self.emline_ratio2.split('/')[1]] + ')'
dist1 = np.array(dist1)
dist2 = np.array(dist2)
# Draw a number of SDSS galaxies equal to self.sdss_drawnum
sdss_draw_inds = np.random.choice(np.arange(len(dist1[0])), size=self.sdss_drawnum)
dist1 = dist1[:, sdss_draw_inds]
h1 = sp1.hist2d(*dist1, bins=50, range=[[-1.2, 1.2], [-1.25, 1]], norm=LogNorm(vmax=self.vmax), cmap='plasma_r')
h2 = sp2.hist2d(*dist2, bins=50, range=[[-1.2, 1.2], [-1.25, 1]], norm=LogNorm(vmax=self.vmax), cmap='plasma_r')
print(' Maximum number densities: SDSS {:0.3g}; {} {:0.3g}'.format(np.max(h1[0]), catalog_name, np.max(h2[0])))
# Shift the median of the simulated galaxies to match that of the SDSS galaxies
# before performing the comparison
medianshift = np.nanmedian(dist1, axis=1).reshape(2, 1) - np.nanmedian(dist2, axis=1).reshape(2, 1)
medianmatch_dist2 = dist2 + medianshift
pvalue, KSstat = kstest_2d(dist1, medianmatch_dist2)
# Plotting stuff
sp1.set_xlabel(xlabel, fontsize=20)
sp1.set_ylabel(ylabel, fontsize=20)
sp2.set_xlabel(xlabel, fontsize=20)
sp1.tick_params(labelsize=16)
sp2.tick_params(labelsize=16)
sp1.set_xlim(-1.2, 1.2)
sp1.set_ylim(-1.25, 1)
sp2.set_xlim(-1.2, 1.2)
sp2.set_ylim(-1.25, 1)
sp2.set_yticklabels([])
fig.subplots_adjust(right=0.9)
cbar_ax = fig.add_axes([0.95, 0.1, 0.03, 0.75])
fig.colorbar(h2[3], cax=cbar_ax)
cbar_ax.tick_params(labelsize=16)
plt.subplots_adjust(wspace=0.0)
sp2.text(0.95, 0.98, 'log p = %.1f\n' % np.log10(pvalue) + r'D$_\mathrm{KS}$' + ' = %.2f\nMed Shift = [%.2f, %.2f]' % (KSstat, *medianshift.T[0]),
fontsize=18, transform=sp2.transAxes, ha='right', va='top', bbox=dict(boxstyle='round', facecolor='white', alpha=0.8))
sp1.text(0.98, 0.02, 'SDSS', fontsize=24, ha='right', va='bottom', transform=sp1.transAxes)
sp2.text(0.98, 0.02, catalog_name, fontsize=24, ha='right', va='bottom', transform=sp2.transAxes)
return fig, pvalue, medianshift
def summary_file(self, output_dir):
"""
Saves a summary file with information about the cuts performed on the data in order to
perform the test
"""
with open(path.join(output_dir, 'Emline_Lum_Ratio_Summary.txt'), 'w') as writefile:
writefile.write('Simulation Galaxies Drawn: %i\n' % self.sim_drawnum)
writefile.write('SDSS Galaxies Drawn: %i\n' % self.sdss_drawnum)
writefile.write('loz_lo: %.1f\n' % self.loz_lo)
writefile.write('loz_hi: %.1f\n' % self.loz_hi)
writefile.write('hiz_lo: %.1f\n' % self.hiz_lo)
writefile.write('hiz_hi: %.1f\n' % self.hiz_hi)
writefile.write('loz_band: ' + self.loz_band + '\n')
writefile.write('hiz_band: ' + self.hiz_band + '\n')
writefile.write('loz_magcut: %.1f' % self.loz_magcut + '\n')
writefile.write('hiz_magcut: %.1f' % self.hiz_magcut + '\n')
writefile.write('\n')
writefile.write('=================\n')
writefile.write(' Catalogs Tested \n')
writefile.write('=================\n')
for thiscat in self.runcat_name:
writefile.write(thiscat + '\n')
def conclude_test(self, output_dir):
# Save a summary file with the details of the test
self.summary_file(output_dir)
# Save all of the summary plots into output_dir
for thisfig, thiscat in zip(self.figlist, self.runcat_name):
thisfig.savefig(path.join(output_dir, thiscat + '_emline_ratios.png'), bbox_inches='tight')
plt.close(thisfig)
def fhCounts(x, edge):
# computes local CDF at a given point considering all possible axis orderings
templist = [np.sum((x[0, 0:] >= edge[0]) & (x[1, 0:] >= edge[1])),
np.sum((x[0, 0:] <= edge[0]) & (x[1, 0:] >= edge[1])),
np.sum((x[0, 0:] <= edge[0]) & (x[1, 0:] <= edge[1])),
np.sum((x[0, 0:] >= edge[0]) & (x[1, 0:] <= edge[1]))]
return templist
def kstest_2d(dist1, dist2):
"""
Perform the 2-D KS-test on dist1 and dist2.
"""
num1 = dist1.shape[1]
num2 = dist2.shape[1]
KSstat = -np.inf
for iX in np.arange(0, num1+num2):
if iX < num1:
edge = dist1[0:, iX]
else:
edge = dist2[0:, iX-num1]
vfCDF1 = np.array(fhCounts(dist1, edge)) / num1
vfCDF2 = np.array(fhCounts(dist2, edge)) / num2
vfThisKSTS = np.abs(vfCDF1 - vfCDF2)
fKSTS = np.amax(vfThisKSTS)
if fKSTS > KSstat:
KSstat = fKSTS
# Peacock Z calculation and P estimation
n = num1 * num2 /(num1 + num2)
Zn = np.sqrt(n) * KSstat
Zinf = Zn / (1 - 0.53 * n**(-0.9))
pValue = 2 *np.exp(-2 * (Zinf - 0.5)**2)
# Clip invalid values for P
if pValue > 1.0:
pValue = 1.0
# H = (pValue <= alpha)
return pValue, KSstat
class sdsscat:
"""
This class holds the SDSS data in an easily accessible form, and also dust corrects
the emission lines using the Balmer Decrement.
"""
def __init__(self, infile):
self.Calzetti2000 = np.vectorize(self.Calzetti2000_novec)
data = read_csv(infile)
usecols = ['z', 'z_err', 'oii_flux', 'oii_flux_err', 'oiii_flux', 'oiii_flux_err',
'h_alpha_flux', 'h_alpha_flux_err', 'h_beta_flux', 'h_beta_flux_err',
'lgm_tot_p50', 'lgm_tot_p16', 'lgm_tot_p84', 'sfr_tot_p50', 'sfr_tot_p16', 'sfr_tot_p84',
'oh_p50', 'h_alpha_eqw', 'oiii_4959_eqw', 'oiii_5007_eqw', 'oii_3726_eqw', 'oii_3729_eqw', 'h_beta_eqw',
'h_alpha_eqw_err', 'oiii_4959_eqw_err', 'oiii_5007_eqw_err', 'oii_3726_eqw_err', 'oii_3729_eqw_err', 'h_beta_eqw_err']
newnames = ['z', 'z_err', 'oii_uncorr', 'oii_err_uncorr', 'oiii_uncorr', 'oiii_err_uncorr', 'ha_uncorr', 'ha_err_uncorr', 'hb_uncorr', 'hb_err_uncorr',
'logmstar', 'logmstar_lo', 'logmstar_hi', 'sfr', 'sfr_lo', 'sfr_hi', 'o_abundance', 'ha_ew_uncorr', 'oiii4959_ew_uncorr', 'oiii5007_ew_uncorr', 'oii3726_ew_uncorr', 'oii3729_ew_uncorr', 'hb_ew_uncorr',
'ha_ew_err', 'oiii4959_ew_err_uncorr', 'oiii5007_ew_err_uncorr', 'oii3726_ew_err_uncorr', 'oii3729_ew_err_uncorr', 'hb_ew_err_uncorr']
for col, name in zip(usecols, newnames):
setattr(self, name, data[col].values)
for x, colname in enumerate(newnames):
if 'flux' in usecols[x]:
setattr(self, colname, getattr(self, colname)/10**17) # Units are 10**-17 erg/s/cm^2
# Dust correction
# E(B-V) = log_{10}(ha_uncorr/(hb_uncorr*2.86)) *(-0.44/0.4) / (k(lam_ha) - k(lam_hb))
self.EBV = np.log10(self.ha_uncorr/(self.hb_uncorr*2.86)) * (-.44/0.4) / (self.Calzetti2000(6563.) - self.Calzetti2000(4863.))
# A_oiii = self.Calzetti2000(4980.) * self.EBV / 0.44
# A_oii = self.Calzetti2000(3727.) * self.EBV / 0.44
# A_ha = self.Calzetti2000(6563.) * self.EBV / 0.44
# A_hb = self.Calzetti2000(4863.) * self.EBV / 0.44
for x, colname in enumerate(newnames):
if 'ha_' in colname:
wave = 6563.
elif 'hb_' in colname:
wave = 4863.
elif 'oii_' in colname:
wave = 3727.
elif 'oiii_' in colname:
wave = 4980.
elif 'oii3726_' in colname:
wave = 3726.
elif 'oii3729_' in colname:
wave = 3729.
elif 'oiii4959_' in colname:
wave = 4969.
elif 'oiii5007_' in colname:
wave = 5007.
if 'uncorr' in colname and 'ew' not in colname:
A_line = self.Calzetti2000(wave) * self.EBV / 0.44
newflux = getattr(self, colname) * np.power(10, 0.4*A_line)
setattr(self, colname[:-7], newflux)
elif 'uncorr' in colname and 'ew' in colname:
multiplier = np.power(10, 0.4 * self.Calzetti2000(wave) * self.EBV * ((1./.44) - 1.))
setattr(self, colname[:-7], getattr(self, colname)*multiplier)
self.ha_lum = self.ha * 4 * np.pi * (cosmo.luminosity_distance(self.z).to('cm').value)**2
goodind = np.where(np.log10(self.ha_lum) < 45)[0]
for x, colname in enumerate(list(self.__dict__.keys())):
if colname != 'Calzetti2000':
setattr(self, colname, getattr(self, colname)[goodind])
self.oiii_ew = self.oiii4959_ew + self.oiii5007_ew
self.oii_ew = self.oii3726_ew + self.oii3729_ew
self.oiii_ew_err = np.sqrt(self.oiii4959_ew_err**2. + self.oiii5007_ew_err**2.)
self.oii_ew_err = np.sqrt(self.oii3726_ew_err**2. + self.oii3729_ew_err**2.)
def Calzetti2000_novec(self, lam):
# Plug in lam in angstroms
# From Calzetti2000
# Returns k(lam)
lam = lam * 0.0001 # Convert angstroms to microns
# Rprime_v = 4.88 # pm 0.98 from Calzetti 1997b
Rprime_v = 4.05
if lam > 0.1200 and lam < 0.6300:
return 2.659 * (-2.156 + (1.509/lam) - (0.198/(lam**2.)) + (0.011/(lam**3.))) + Rprime_v
elif lam > 0.6300 and lam < 2.2000:
return 2.659 * (-1.857 + (1.04/lam)) + Rprime_v
else:
return np.NaN
def drawinds(self, z, size, catname = None, cache = 'descqa/data/sdss_emission_lines/cache/'):
if catname:
if path.isfile(cache + catname + str(int(size))):
print('WARNING: READING REDSHIFT MATCHES FROM FILE')
return np.loadtxt(cache + catname + str(int(size)), dtype = int)
else:
indices = np.arange(len(z))
sdss_z = np.copy(self.z)
np.random.shuffle(sdss_z)
sdss_z = sdss_z[:size]
return_inds = []
for thisz in sdss_z:
close_inds = indices[np.where(np.abs(z-thisz) <= 0.01)]
return_inds.append(np.random.choice(close_inds))
return_inds = np.array(return_inds, dtype = int)
np.savetxt(cache + catname + str(int(size)), return_inds, fmt = '%i')
return return_inds
else:
indices = np.arange(len(z))
sdss_z = np.copy(self.z)
np.random.shuffle(sdss_z)
sdss_z = sdss_z[:size]
return_inds = []
for thisz in sdss_z:
close_inds = indices[np.where(np.abs(z-thisz) <= 0.05)]
return_inds.append(np.random.choice(close_inds))
return_inds = np.array(return_inds, dtype = int)
return return_inds
| 23,850
| 41.139576
| 221
|
py
|
descqa
|
descqa-master/descqa/version.py
|
__version__ = '2.0.0-0.7.0'
| 28
| 13.5
| 27
|
py
|
descqa
|
descqa-master/descqa/QuickBkgTest.py
|
from __future__ import unicode_literals, absolute_import, division
import os
import sqlite3
import numpy as np
from .base import BaseValidationTest, TestResult
from .plotting import plt
__all__ = ['QuickBkgTest']
def compute_bkg(image):
"""
Routine to give an estimate of the mean, median and std
of the background level from a given image
Args:
-----
image : np.array
Returns:
--------
mean_bkg : Mean background level
median_bkg : Median background level
bkg_noise: Background noise level
"""
image = image.flatten()
q_low, q_high = np.percentile(image, [5, 95]) # This is kind of arbitrary but it works fine
image = image[(image > q_low) & (image < q_high)]
return np.mean(image), np.median(image), np.std(image)
def get_predicted_bkg(visit, validation_dataset, db_file, band):
if validation_dataset.lower() == 'opsim':
return get_opsim_bkg(visit, db_file, band)
else:
raise NotImplementedError('only "opsim" is currently supported')
# TODO add imSim option
#if validation_dataset == 'imSim':
# return get_imsim_bkg(visit,band)
def compute_sky_counts(mag, band, nsnap):
# Data from https://github.com/lsst-pst/syseng_throughputs/blob/master/plots/table2
if band == 'u':
mag0 = 22.95
counts0 = 50.2
if band == 'g':
mag0 = 22.24
counts0 = 384.6
if band == 'r':
mag0 = 21.20
counts0 = 796.2
if band == 'i':
mag0 = 20.47
counts0 = 1108.1
if band == 'z':
mag0 = 19.60
counts0 = 1687.9
if band == 'y':
mag0 = 18.63
counts0 = 2140.8
return nsnap * counts0 * 10**(-0.4 * (mag - mag0))
def get_airmass_raw_seeing(visit, db_file):
conn = sqlite3.connect(db_file)
cur = conn.cursor()
cur.execute(
"SELECT airmass, filtSkyBrightness, finSeeing, rawSeeing, visitExpTime, fiveSigmaDepth FROM ObsHistory WHERE obsHistID==%d"
% (visit))
rows = cur.fetchall()
return rows[0]
def get_opsim_bkg(visit,db_file,band):
skybrightness = get_airmass_raw_seeing(int(visit),db_file)[1]
# We are going to compute the background counts given OpSim's sky-brightness
mean_bkg = compute_sky_counts(skybrightness,band,1)
median_bkg = mean_bkg # We assume that the background is completely homogeneous
bkg_noise = np.sqrt(mean_bkg) # We assume Poisson noise
return mean_bkg, median_bkg, bkg_noise
class QuickBkgTest(BaseValidationTest):
"""
Check of mean, median and standard deviation of the image background.
We compare to expeted values by OpSim or imSim.
Args:
-----
label (str): x-label for the validation plots
visit (int): Visit numbr to analyze
band (str): Filter/band to analyze
bkg_validation_dataset (str): Name of the validation data to which compare, for now,
only opsim is available.
"""
def __init__(self, label, bkg_validation_dataset, visit, band, db_file, **kwargs):
# pylint: disable=W0231
self.validation_data = get_predicted_bkg(visit, bkg_validation_dataset, db_file, band)
self.label = label
self.visit = visit
self.band = band
self.bkg_validation_dataset = bkg_validation_dataset
def post_process_plot(self, ax):
ymin, ymax = ax[0].get_ylim()
ax[0].plot(
np.ones(3) * self.validation_data[0],
np.linspace(ymin, ymax, 3),
label='{}-Mean'.format(self.bkg_validation_dataset))
ax[0].plot(
np.ones(3) * self.validation_data[1],
np.linspace(ymin, ymax, 3),
label='{}-Median'.format(self.bkg_validation_dataset))
ax[0].legend()
ymin, ymax = ax[1].get_ylim()
ax[1].plot(
np.ones(3) * self.validation_data[2],
np.linspace(ymin, ymax, 3),
label='{}'.format(self.bkg_validation_dataset))
ax[1].legend()
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
# Pass one focal plane and analyze sensor by sensor
rafts = catalog_instance.focal_plane.rafts
median_bkg = {}
mean_bkg = {}
bkg_noise = {}
for rname, r in rafts.items():
for sname, s in r.sensors.items():
aux1, aux2, aux3 = compute_bkg(s.get_data())
mean_bkg.update({'%s-%s' % (rname, sname): aux1})
median_bkg.update({'%s-%s' % (rname, sname): aux2})
bkg_noise.update({'%s-%s' % (rname, sname): aux3})
fig, ax = plt.subplots(2, 1)
ax[0].hist(list(mean_bkg.values()), histtype='step', label='Mean')
ax[0].hist(list(median_bkg.values()), histtype='step', label='Median')
ax[0].set_xlabel('{} [ADU]'.format(self.label))
ax[0].set_ylabel('Number of sensors')
ax[1].hist(list(bkg_noise.values()), histtype='step')
ax[1].set_xlabel('{} noise [ADU]'.format(self.label))
ax[1].set_ylabel('Number of sensors')
score = sum(median_bkg.values()) / len(median_bkg) / self.validation_data[0] - 1.
score = np.fabs(score)
self.post_process_plot(ax)
fig.savefig(os.path.join(output_dir, 'plot_png'))
plt.close(fig)
return TestResult(score, passed=score < 0.2)
| 5,339
| 34.131579
| 131
|
py
|
descqa
|
descqa-master/descqa/CorrelationsTwoPoint.py
|
from __future__ import print_function, division, unicode_literals, absolute_import
import os
from collections import defaultdict
import re
import numpy as np
import scipy.special as scsp
import treecorr
import healpy as hp
from sklearn.cluster import k_means
from GCR import GCRQuery
from .base import BaseValidationTest, TestResult
from .plotting import plt
from .utils import (generate_uniform_random_ra_dec_footprint,
get_healpixel_footprint,
generate_uniform_random_dist)
__all__ = ['CorrelationsAngularTwoPoint', 'CorrelationsProjectedTwoPoint',
'DEEP2StellarMassTwoPoint']
def redshift2dist(z, cosmology):
""" Convert redshift to comoving distance in units Mpc/h.
Parameters
----------
z : float array like
cosmology : astropy.cosmology instance
Returns
-------
float array like of comoving distances
"""
return cosmology.comoving_distance(z).to('Mpc').value * cosmology.h
class CorrelationUtilities(BaseValidationTest):
"""
Base class for Correlation classes that loads catalogs, cuts a catalog
sample, plots the correlation results, and scores the the results of the
correlation measurements by comparing them to test data.
Init of the function takes in a loaded yaml file containing the settings
for this tests. See the following file for an example:
descqa/configs/tpcf_Zehavi2011_rSDSS.yaml
"""
# pylint: disable=super-init-not-called,abstract-method
def __init__(self, **kwargs):
self.test_name = kwargs['test_name']
self.requested_columns = kwargs['requested_columns']
self.test_samples = kwargs['test_samples']
self.test_sample_labels = kwargs['test_sample_labels']
self.Mag_units = kwargs.get('Mag_units', None)
self.output_filename_template = kwargs['output_filename_template']
validation_filepath = os.path.join(self.data_dir, kwargs['data_filename'])
self.validation_data = np.loadtxt(validation_filepath, skiprows=2)
self.data_label = kwargs['data_label']
self.test_data = kwargs['test_data']
self.fig_xlabel = kwargs['fig_xlabel']
self.fig_ylabel = kwargs['fig_ylabel']
self.fig_ylim = kwargs.get('fig_ylim', None)
self.fig_subplots_nrows, self.fig_subplots_ncols = kwargs.get('fig_subplots', (1, 1))
self.fig_subplot_groups = kwargs.get('fig_subplot_groups', [None])
self.fig_xlim = kwargs.get('fig_xlim', None)
self.tick_size = kwargs.get('tick_size', 12)
self.mask_large_errors = kwargs.get('mask_large_errors', False)
self.treecorr_config = {
'min_sep': kwargs['min_sep'],
'max_sep': kwargs['max_sep'],
'bin_size': kwargs['bin_size'],
}
if kwargs.get('var_method', None):
self.treecorr_config['var_method'] = kwargs['var_method']
self.npatch = kwargs.get('npatch', 1)
self.random_nside = kwargs.get('random_nside', 1024)
self.random_mult = kwargs.get('random_mult', 3)
# jackknife errors
self.jackknife = kwargs.get('jackknife', False)
if self.jackknife:
self.N_jack = kwargs.get('N_jack', 30)
jackknife_quantities = kwargs.get('jackknife_quantities',
{'ra':['ra', 'ra_true'], 'dec':['dec', 'dec_true']})
if 'ra' not in self.requested_columns or 'dec' not in self.requested_columns:
self.requested_columns.update(jackknife_quantities)
self.use_diagonal_only = kwargs.get('use_diagonal_only', True)
self.r_validation_min = kwargs.get('r_validation_min', 1)
self.r_validation_max = kwargs.get('r_validation_max', 10)
self.truncate_cat_name = kwargs.get('truncate_cat_name', False)
self.title_in_legend = kwargs.get('title_in_legend', False)
self.font_size = kwargs.get('font_size', 16)
self.legend_size = kwargs.get('legend_size', 10)
self.survey_label = kwargs.get('survey_label', '')
self.no_title = kwargs.get('no_title', False)
self.legend_title = kwargs.get('legend_title', '')
@staticmethod
def load_catalog_data(catalog_instance, requested_columns, test_samples, h=1):
""" Load requested columns from a Generic Catalog Reader instance and
trim to the min and max of the requested cuts in test_samples.
Parameters
----------
catalog_instance : a Generic Catalog object.
requested_columns : dictionary of lists of strings
A dictionary keyed on a simple column name (e.g. mag, z)
with values of lists containing string names to try to load from
the GCR catalog instance.
Example:
{Mag': ['Mag_true_r_sdss_z0', 'Mag_true_r_des_z0'], ...}
test_samples : dictionary of dictionaries
Dictionaries containing simple column names and min max values to
cut on.
Examples:
{'Mr_-23_-22": {'Mag': {'min': -23, 'max': -22}
'z': {'min': 0.1031, 'max': 0.2452}}
Returns
-------
GRC catalog instance containing simplified column names and cut to the
min/max of all requested test samples.
"""
colnames = dict()
for col_key, possible_names in requested_columns.items():
colnames[col_key] = catalog_instance.first_available(*possible_names)
if not all(v for v in colnames.values()):
return None
col_value_mins = defaultdict(list)
col_value_maxs = defaultdict(list)
Mag_shift = 5*np.log10(h) # Magnitude shift to adjust for h=1 units in data (eg Zehavi et. al.)
print('Magnitude shift for h={:.2f} = {:.2f}'.format(h, Mag_shift))
for conditions in test_samples.values():
for col_key, condition in conditions.items():
if not isinstance(condition, dict):
continue
if 'min' in condition:
col_value_mins[col_key].append(condition['min'])
if 'max' in condition:
col_value_maxs[col_key].append(condition['max'])
filters = [(np.isfinite, c) for c in colnames.values()]
if catalog_instance.has_quantity('extendedness'):
filters.append('extendedness == 1')
# can remove ultra-faint synthetics if present in catalog by cutting on negative halo_id
for col_key, col_name in colnames.items():
if col_key in col_value_mins and col_value_mins[col_key]:
min_value = min(col_value_mins[col_key]) + Mag_shift if 'Mag' in col_key else min(col_value_mins[col_key])
filters.append('{} >= {}'.format(col_name, min_value))
if col_key in col_value_maxs and col_value_maxs[col_key]:
max_value = max(col_value_maxs[col_key]) + Mag_shift if 'Mag' in col_key else max(col_value_maxs[col_key])
filters.append('{} < {}'.format(col_name, max_value))
print('Catalog filters:', filters)
catalog_data = catalog_instance.get_quantities(list(colnames.values()), filters=filters)
catalog_data = {k: catalog_data[v] for k, v in colnames.items()}
return catalog_data
@staticmethod
def create_test_sample(catalog_data, test_sample, h=1):
""" Select a subset of the catalog data an input test sample.
This function should be overloaded in inherited classes for more
complex cuts (e.g. color cuts).
Parameters
----------
catalog_data : a GenericCatalogReader catalog instance
test_sample : dictionary of dictionaries
A dictionary specifying the columns to cut on and the min/max values of
the cut.
Example:
{Mag: {min: -23, max: -22}
z: {min: 0.1031, max: 0.2452}}
Returns
-------
A GenericCatalogReader catalog instance cut to the requested bounds.
"""
filters = []
Mag_shift = 5*np.log10(h) # Magnitude shift to adjust for h=1 units in data (eg Zehavi et. al.)
for key, condition in test_sample.items():
if isinstance(condition, dict):
if 'max' in condition:
max_value = condition['max'] + Mag_shift if 'Mag' in key else condition['max']
filters.append('{} < {}'.format(key, max_value))
if 'min' in condition:
min_value = condition['min'] + Mag_shift if 'Mag' in key else condition['min']
filters.append('{} >= {}'.format(key, min_value))
else: #customized filter
if 'Mag_shift' in condition:
condition = re.sub('Mag_shift', '{:0.2f}'.format(Mag_shift), condition)
print('Substituted filter to adjust for Mag shifts: {}'.format(condition))
filters.append(condition)
print('Test sample filters for {}'.format(test_sample), filters)
return GCRQuery(*filters).filter(catalog_data)
def plot_data_comparison(self, corr_data, catalog_name, output_dir):
""" Plot measured correlation functions and compare them against test
data.
Parameters
----------
corr_data : list of float array likes
List containing resultant data from correlation functions computed
in the test.
Example:
[[np.array([...]), np.array([...]), np.array([...])], ...]
catalog_name : string
Name of the catalog used in the test.
output_dir : string
Full path of the directory to write results to.
"""
# pylint: disable=no-member
fig_xsize = 5 if self.fig_subplots_ncols==1 else 7 #widen figure for subplots
fig_ysize = 5 if self.fig_subplots_ncols==1 else 4 #narrow y-axis for subplots
fig, ax_all = plt.subplots(self.fig_subplots_nrows, self.fig_subplots_ncols, squeeze=False,
figsize=(min(2, self.fig_subplots_ncols)*fig_xsize,
min(2, self.fig_subplots_nrows)*fig_ysize))
for nx, (ax, this_group) in enumerate(zip(ax_all.flat, self.fig_subplot_groups)):
if this_group is None:
this_group = self.test_samples
colors = plt.cm.plasma_r(np.linspace(0.1, 1, len(this_group)))
if not this_group:
ax.set_visible(False)
continue
for sample_name, color in zip(this_group, colors):
cat_data = True
try:
sample_corr = corr_data[sample_name]
except KeyError:
cat_data = False
sample_data = self.test_data[sample_name]
sample_label = self.test_sample_labels.get(sample_name)
ax.loglog(self.validation_data[:, 0],
self.validation_data[:, sample_data['data_col']],
c=color,
label=' '.join([self.survey_label, sample_label]))
if 'data_err_col' in sample_data:
y1 = (self.validation_data[:, sample_data['data_col']] +
self.validation_data[:, sample_data['data_err_col']])
y2 = (self.validation_data[:, sample_data['data_col']] -
self.validation_data[:, sample_data['data_err_col']])
if self.fig_ylim is not None:
y2[y2 <= 0] = self.fig_ylim[0]*0.9
ax.fill_between(self.validation_data[:, 0], y1, y2, lw=0, color=color, alpha=0.25)
if cat_data:
if self.mask_large_errors and self.fig_ylim is not None:
mask = (sample_corr[1] - sample_corr[2]) > min(self.fig_ylim)
else:
mask = np.ones(len(sample_corr[1]), dtype=bool)
ax.errorbar(sample_corr[0][mask], sample_corr[1][mask], sample_corr[2][mask],
label=' '.join([catalog_name, sample_label]),
marker='o', ls='', c=color)
self.decorate_plot(ax, catalog_name, n=nx)
fig.tight_layout()
fig.subplots_adjust(hspace=0, wspace=0)
fig.savefig(os.path.join(output_dir, '{:s}.png'.format(self.test_name)), bbox_inches='tight')
plt.close(fig)
def get_legend_title(self, test_samples, exclude='mstellar'):
"""
"""
legend_title = ''
filter_ids = list(set([k for v in test_samples.values() for k in v.keys() if exclude not in k]))
for filter_id in filter_ids:
legend_title = self.get_legend_subtitle(test_samples, filter_id=filter_id, legend_title=legend_title)
return legend_title
@staticmethod
def get_legend_subtitle(test_samples, filter_id='z', legend_title=''):
"""
"""
legend_title = legend_title if len(legend_title) == 0 else '{}; '.format(legend_title)
min_values = [test_samples[k][filter_id].get('min', None) for k in test_samples if test_samples[k].get(filter_id, None) is not None]
max_values = [test_samples[k][filter_id].get('max', None) for k in test_samples if test_samples[k].get(filter_id, None) is not None]
min_title = ''
if len(min_values) > 0 and any([k is not None for k in min_values]):
min_title = '{} < {}'.format(min([k for k in min_values if k is not None]), filter_id)
max_title = ''
if len(max_values) > 0 and any([k is not None for k in max_values]):
max_values = [k for k in max_values if k is not None]
max_title = '${} < {}$'.format(filter_id, max(max_values)) if len(min_title) == 0 else '${} < {}$'.format(min_title, max(max_values))
return legend_title + max_title
def decorate_plot(self, ax, catalog_name, n=0):
"""
Decorates plot with axes labels, title, etc.
"""
title = '{} vs. {}'.format(catalog_name, self.data_label)
lgnd_title = None
if self.title_in_legend:
lgnd_title = self.get_legend_title(self.test_samples) if not self.legend_title else self.legend_title
ax.legend(loc='lower left', fontsize=self.legend_size, title=lgnd_title)
ax.tick_params(labelsize=self.tick_size)
# check for multiple subplots and label
if n+1 >= self.fig_subplots_ncols*(self.fig_subplots_nrows - 1):
ax.tick_params(labelbottom=True)
for axlabel in ax.get_xticklabels():
axlabel.set_visible(True)
ax.set_xlabel(self.fig_xlabel, size=self.font_size)
else:
for axlabel in ax.get_xticklabels():
axlabel.set_visible(False)
if self.fig_ylim is not None:
ax.set_ylim(*self.fig_ylim)
if self.fig_xlim is not None:
ax.set_xlim(*self.fig_xlim)
# suppress labels for multiple subplots
if n % self.fig_subplots_ncols == 0: #1st column
ax.set_ylabel(self.fig_ylabel, size=self.font_size)
else:
for axlabel in ax.get_yticklabels():
axlabel.set_visible(False)
if not self.no_title:
ax.set_title(title, fontsize='medium')
@staticmethod
def score_and_test(corr_data): # pylint: disable=unused-argument
""" Given the resultant correlations, compute the test score and return
a TestResult
Parameters
----------
corr_data : list of float array likes
List containing resultant data from correlation functions computed
in the test.
Example:
[[np.array([...]), np.array([...]), np.array([...])], ...]
Returns
-------
descqa.TestResult
"""
return TestResult(inspect_only=True)
@staticmethod
def get_jackknife_randoms(N_jack, catalog_data, generate_randoms, ra='ra', dec='dec'):
"""
Computes the jackknife regions and random catalogs for each region
Parameters
----------
N_jack : number of regions
catalog_data : input catalog
generate_randoms: function to generate randoms (eg self.generate_processed_randoms)
Returns
-------
jack_labels: array of regions in catalog data
randoms: dict of randoms labeled by region
"""
#cluster
nn = np.stack((catalog_data[ra], catalog_data[dec]), axis=1)
_, jack_labels, _ = k_means(n_clusters=N_jack, random_state=0, X=nn)
randoms = {}
for nj in range(N_jack):
catalog_data_jk = dict(zip(catalog_data.keys(), [v[(jack_labels != nj)] for v in catalog_data.values()]))
rand_cat, rr = generate_randoms(catalog_data_jk) #get randoms for this footprint
randoms[str(nj)] = {'ran': rand_cat, 'rr':rr}
return jack_labels, randoms
def get_jackknife_errors(self, N_jack, catalog_data, sample_conditions, r, xi, jack_labels, randoms,
run_treecorr, diagonal_errors=True):
"""
Computes jacknife errors
Parameters
----------
N_jack : number of regions
catalog_data : input catalog
sample_conditions : sample selections
r : r data for full region
xi : correlation data for full region
jack_labels: array of regions in catalog data
randoms: dict of randoms labeled by region
run_treecorr: method to run treecorr
Returns
--------
covariance : covariance matrix
"""
#run treecorr for jackknife regions
Nrbins = len(r)
Njack_array = np.zeros((N_jack, Nrbins), dtype=np.float)
print(sample_conditions)
for nj in range(N_jack):
catalog_data_jk = dict(zip(catalog_data.keys(),
[v[(jack_labels != nj)] for v in catalog_data.values()]))
tmp_catalog_data = self.create_test_sample(catalog_data_jk, sample_conditions) #apply sample cut
# run treecorr
_, Njack_array[nj], _ = run_treecorr(catalog_data=tmp_catalog_data,
treecorr_rand_cat=randoms[str(nj)]['ran'],
rr=randoms[str(nj)]['rr'],
output_file_name=None)
covariance = np.zeros((Nrbins, Nrbins))
for i in range(Nrbins):
if diagonal_errors:
for njack in Njack_array:
covariance[i][i] += (N_jack - 1.)/N_jack * (xi[i] - njack[i]) ** 2
else:
for j in range(Nrbins):
for njack in Njack_array:
covariance[i][j] += (N_jack - 1.)/N_jack * (xi[i] - njack[i]) * (xi[j] - njack[j])
return covariance
def check_footprint(self, catalog_data):
"""
"""
pix_footprint = get_healpixel_footprint(catalog_data['ra'],
catalog_data['dec'], self.random_nside)
area_footprint = 4.*np.pi*(180./np.pi)**2*len(pix_footprint)/hp.nside2npix(self.random_nside)
return area_footprint
class CorrelationsAngularTwoPoint(CorrelationUtilities):
"""
Validation test for an angular 2pt correlation function.
"""
def __init__(self, **kwargs):
super(CorrelationsAngularTwoPoint, self).__init__(**kwargs)
self.treecorr_config['metric'] = 'Arc'
self.treecorr_config['sep_units'] = 'deg'
print(self.legend_title)
def generate_processed_randoms(self, catalog_data):
""" Create and process random data for the 2pt correlation function.
Parameters
----------
catalog_data : dict
Returns
-------
tuple of (random catalog treecorr.Catalog instance,
processed treecorr.NNCorrelation on the random catalog)
"""
rand_ra, rand_dec = generate_uniform_random_ra_dec_footprint(
catalog_data['ra'].size * self.random_mult,
get_healpixel_footprint(catalog_data['ra'], catalog_data['dec'], self.random_nside),
self.random_nside,
)
rand_cat = treecorr.Catalog(ra=rand_ra, dec=rand_dec, ra_units='deg', dec_units='deg',
npatch= self.npatch,
)
rr = treecorr.NNCorrelation(**self.treecorr_config)
rr.process(rand_cat)
return rand_cat, rr
def run_treecorr(self, catalog_data, treecorr_rand_cat, rr, output_file_name):
""" Run treecorr on input catalog data and randoms.
Produce measured correlation functions using the Landy-Szalay
estimator.
Parameters
----------
catalog_data : a GCR catalog instance
treecorr_rand_cat : treecorr.Catalog
Catalog of random positions over the same portion of sky as the
input catalog_data.
rr : treecorr.NNCorrelation
A processed NNCorrelation of the input random catalog.
output_file_name : string
Full path name of the file to write the resultant correlation to.
Returns
-------
tuple of array likes
Resultant correlation function. (separation, amplitude, amp_err).
"""
cat = treecorr.Catalog(
ra=catalog_data['ra'],
dec=catalog_data['dec'],
ra_units='deg',
dec_units='deg',
npatch= self.npatch,
)
dd = treecorr.NNCorrelation(**self.treecorr_config)
dr = treecorr.NNCorrelation(**self.treecorr_config)
rd = treecorr.NNCorrelation(**self.treecorr_config)
dd.process(cat)
dr.process(treecorr_rand_cat, cat)
rd.process(cat, treecorr_rand_cat)
if output_file_name is not None:
dd.write(output_file_name, rr, dr, rd)
xi, var_xi = dd.calculateXi(rr, dr, rd)
xi_rad = np.exp(dd.meanlogr)
xi_sig = np.sqrt(var_xi)
return xi_rad, xi, xi_sig
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
catalog_data = self.load_catalog_data(catalog_instance=catalog_instance,
requested_columns=self.requested_columns,
test_samples=self.test_samples)
if not catalog_data:
cols = [i for c in self.requested_columns.values() for i in c]
return TestResult(skipped=True,
summary='Missing requested quantities {}'.format(', '.join(cols)))
if self.truncate_cat_name:
catalog_name = re.split('_', catalog_name)[0]
rand_cat, rr = self.generate_processed_randoms(catalog_data) #assumes ra and dec exist
with open(os.path.join(output_dir, 'galaxy_count.dat'), 'a') as f:
f.write('Total (= catalog) Area = {:.1f} sq. deg.\n'.format(self.check_footprint(catalog_data)))
f.write('NOTE: 1) assuming catalog is of equal depth over the full area\n')
f.write(' 2) assuming sample contains enough galaxies to measure area\n')
if self.jackknife: #evaluate randoms for jackknife footprints
jack_labels, randoms = self.get_jackknife_randoms(self.N_jack, catalog_data,
self.generate_processed_randoms)
correlation_data = dict()
for sample_name, sample_conditions in self.test_samples.items():
tmp_catalog_data = self.create_test_sample(
catalog_data, sample_conditions)
if not len(tmp_catalog_data['ra']):
continue
output_treecorr_filepath = os.path.join(
output_dir, self.output_filename_template.format(sample_name))
xi_rad, xi, xi_sig = self.run_treecorr(
catalog_data=tmp_catalog_data,
treecorr_rand_cat=rand_cat,
rr=rr,
output_file_name=output_treecorr_filepath)
#jackknife errors
if self.jackknife:
covariance = self.get_jackknife_errors(self.N_jack, catalog_data, sample_conditions,
xi_rad, xi, jack_labels, randoms,
self.run_treecorr,
diagonal_errors=self.use_diagonal_only)
xi_sig = np.sqrt(np.diag(covariance))
correlation_data[sample_name] = (xi_rad, xi, xi_sig)
self.plot_data_comparison(corr_data=correlation_data,
catalog_name=catalog_name,
output_dir=output_dir)
return self.score_and_test(correlation_data)
class CorrelationsProjectedTwoPoint(CorrelationUtilities):
"""
Validation test for an radial 2pt correlation function.
"""
def __init__(self, **kwargs):
super(CorrelationsProjectedTwoPoint, self).__init__(**kwargs)
self.pi_maxes = kwargs['pi_maxes']
self.treecorr_config['metric'] = 'Rperp'
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
h = catalog_instance.cosmology.H(0).value/100 if self.Mag_units == 'h1' else 1
catalog_data = self.load_catalog_data(catalog_instance=catalog_instance,
requested_columns=self.requested_columns,
test_samples=self.test_samples, h=h)
if not catalog_data:
return TestResult(skipped=True, summary='Missing requested quantities')
if self.truncate_cat_name:
catalog_name = re.split('_', catalog_name)[0]
rand_ra, rand_dec = generate_uniform_random_ra_dec_footprint(
catalog_data['ra'].size*self.random_mult,
get_healpixel_footprint(catalog_data['ra'], catalog_data['dec'], self.random_nside),
self.random_nside,
)
correlation_data = dict()
for sample_name, sample_conditions in self.test_samples.items():
output_treecorr_filepath = os.path.join(
output_dir, self.output_filename_template.format(sample_name))
tmp_catalog_data = self.create_test_sample(
catalog_data, sample_conditions, h=h)
with open(os.path.join(output_dir, 'galaxy_count.dat'), 'a') as f:
f.write('{} {}\n'.format(sample_name, len(tmp_catalog_data['ra'])))
if not len(tmp_catalog_data['ra']):
continue
xi_rad, xi, xi_sig = self.run_treecorr_projected(
catalog_data=tmp_catalog_data,
rand_ra=rand_ra,
rand_dec=rand_dec,
cosmology=catalog_instance.cosmology,
pi_max=self.pi_maxes[sample_name],
output_file_name=output_treecorr_filepath)
correlation_data[sample_name] = (xi_rad, xi, xi_sig)
self.plot_data_comparison(corr_data=correlation_data,
catalog_name=catalog_name,
output_dir=output_dir)
return self.score_and_test(correlation_data)
def run_treecorr_projected(self, catalog_data, rand_ra, rand_dec,
cosmology, pi_max, output_file_name):
""" Run treecorr on input catalog data and randoms.
Produce measured correlation functions using the Landy-Szalay
estimator.
Parameters
----------
catalog_data : a GCR catalog instance
rand_ra : float array like
Random RA positions on the same sky as covered by catalog data.
rand_dec : float array like
Random DEC positions on the same sky as covered by catalog data.
cosmology : astropy.cosmology
An astropy.cosmology instance specifying the catalog cosmology.
pi_max : float
Maximum comoving distance along the line of sight to correlate.
output_file_name : string
Full path name of the file to write the resultant correlation to.
Returns
-------
tuple of array likes
Resultant correlation function. (separation, amplitude, amp_err).
"""
treecorr_config = self.treecorr_config.copy()
treecorr_config['min_rpar'] = -pi_max
treecorr_config['max_rpar'] = pi_max
cat = treecorr.Catalog(
ra=catalog_data['ra'],
dec=catalog_data['dec'],
ra_units='deg',
dec_units='deg',
npatch=self.npatch,
r=redshift2dist(catalog_data['z'], cosmology),
)
z_min = catalog_data['z'].min()
z_max = catalog_data['z'].max()
rand_cat = treecorr.Catalog(
ra=rand_ra,
dec=rand_dec,
ra_units='deg',
dec_units='deg',
npatch=self.npatch,
r=generate_uniform_random_dist(
rand_ra.size, *redshift2dist(np.array([z_min, z_max]), cosmology)),
)
dd = treecorr.NNCorrelation(treecorr_config)
dr = treecorr.NNCorrelation(treecorr_config)
rd = treecorr.NNCorrelation(treecorr_config)
rr = treecorr.NNCorrelation(treecorr_config)
dd.process(cat)
dr.process(rand_cat, cat)
rd.process(cat, rand_cat)
rr.process(rand_cat)
dd.write(output_file_name, rr, dr, rd)
xi, var_xi = dd.calculateXi(rr, dr, rd)
xi_rad = np.exp(dd.meanlogr)
xi_sig = np.sqrt(var_xi)
return xi_rad, xi * 2. * pi_max, xi_sig * 2. * pi_max
class DEEP2StellarMassTwoPoint(CorrelationsProjectedTwoPoint):
""" Test simulated data against the power laws fits to Stellar Mass
selected samples in DEEP2. This class also serves as an example of creating
a specific test from the two correlation classes in the test suite.
In the future this could also include a color cut, however absolute U and B
band magnitudes are not stored in the simulated catalogs currently and
converting the current fluxes to those is currently out of scope.
"""
@staticmethod
def power_law(r, r0, g):
""" Compute the power law of a simple 2 parameter projected correlation
function.
Parameters
---------
r : float array like
Comoving positions to compute the power law at.
r0 : float
Amplitude of the correlation function
g : float
Power law of the correlation function.
Returns
-------
float array like
"""
gamma_func_ratio = scsp.gamma(1/2.) * scsp.gamma((g - 1) / 2) / scsp.gamma(g / 2)
return r * (r0 / r) ** g * gamma_func_ratio
@staticmethod
def power_law_err(r, r0, g, r0_err, g_err):
""" Compute the error on the power law model given errors on r0 and g.
function.
Parameters
---------
r : float array like
Comoving positions to compute the power law at.
r0 : float
Amplitude of the correlation function
g : float
Power law of the correlation function.
r0_err : float
Error on r0
g_err : float
Error on the power law slope.
Returns
-------
float array like
"""
gamma_func_ratio = scsp.gamma(1/2.) * scsp.gamma((g - 1) / 2) / scsp.gamma(g / 2)
p_law = r * (r0 / r) ** g * gamma_func_ratio
dev_r0 = r ** (1 - g) * r0 ** (g - 1) * g * gamma_func_ratio * r0_err
dev_g = (p_law * np.log(r) +
2 * p_law * scsp.polygamma(0, (g - 1) / 2) +
-2 * p_law * scsp.polygamma(0, g / 2)) * g_err
return np.sqrt(dev_r0 ** 2 + dev_g ** 2)
def plot_data_comparison(self, corr_data, catalog_name, output_dir):
fig, ax = plt.subplots()
colors = plt.cm.plasma_r(np.linspace(0.1, 1, len(self.test_samples))) # pylint: disable=no-member
for sample_name, color in zip(self.test_samples, colors):
sample_corr = corr_data[sample_name]
sample_data = self.test_data[sample_name]
sample_label = self.test_sample_labels.get(sample_name)
p_law = self.power_law(sample_corr[0],
self.validation_data[sample_data['row'],
sample_data['r0']],
self.validation_data[sample_data['row'],
sample_data['g']])
p_law_err = self.power_law_err(sample_corr[0],
self.validation_data[sample_data['row'],
sample_data['r0']],
self.validation_data[sample_data['row'],
sample_data['g']],
self.validation_data[sample_data['row'],
sample_data['r0_err']],
self.validation_data[sample_data['row'],
sample_data['g_err']])
ax.loglog(sample_corr[0],
p_law,
c=color,
label=' '.join([self.survey_label, sample_label]))
ax.fill_between(sample_corr[0],
p_law - p_law_err,
p_law + p_law_err,
lw=0, color=color, alpha=0.2)
ax.errorbar(sample_corr[0], sample_corr[1], sample_corr[2], marker='o', ls='', c=color,
label=' '.join([catalog_name, sample_label]))
ax.fill_between([self.r_validation_min, self.r_validation_max], [0, 0], [10**4, 10**4],
alpha=0.15, color='grey') #validation region
self.decorate_plot(ax, catalog_name)
fig.tight_layout()
fig.savefig(os.path.join(output_dir, '{:s}.png'.format(self.test_name)), bbox_inches='tight')
plt.close(fig)
def score_and_test(self, corr_data):
""" Test the average chi^2 per degree of freedom against power law fits
to the DEEP2 dataset.
"""
chi_per_nu = 0
total_sample = 0
rbins = list(corr_data.values()).pop()[0]
r_idx_min = np.searchsorted(rbins, self.r_validation_min)
r_idx_max = np.searchsorted(rbins, self.r_validation_max, side='right')
for sample_name in self.test_samples:
sample_corr = corr_data[sample_name]
sample_data = self.test_data[sample_name]
r_data = sample_corr[0][r_idx_min:r_idx_max]
p_law = self.power_law(r_data,
self.validation_data[sample_data['row'],
sample_data['r0']],
self.validation_data[sample_data['row'],
sample_data['g']])
p_law_err = self.power_law_err(r_data,
self.validation_data[sample_data['row'],
sample_data['r0']],
self.validation_data[sample_data['row'],
sample_data['g']],
self.validation_data[sample_data['row'],
sample_data['r0_err']],
self.validation_data[sample_data['row'],
sample_data['g_err']])
chi_per_nu = np.sum(((sample_corr[1][r_idx_min:r_idx_max] - p_law) / p_law_err) ** 2)
chi_per_nu /= len(r_data)
total_sample += 1
score = chi_per_nu / total_sample
# Made up value. Assert that average chi^2/nu is less than 2.
test_pass = score < 2
return TestResult(score=score,
passed=test_pass,
summary="Ave chi^2/nu value comparing to power law fits to stellar mass threshold "
"DEEP2 data. Test threshold set to 2.")
| 37,232
| 41.796552
| 145
|
py
|
descqa
|
descqa-master/descqa/ImgPkTest.py
|
from __future__ import unicode_literals, absolute_import, division
import os
import numpy as np
from scipy.stats import binned_statistic, chi2
from astropy.table import Table
from .base import BaseValidationTest, TestResult
from .plotting import plt
from .utils import first, is_string_like
__all__ = ['ImgPkTest']
class ImgPkTest(BaseValidationTest):
"""
Validation test that computes the power spectrum
of a given raft image
Args:
-----
raft: str, list of str, or None
Raft number to analyze (e.g., 'R01', 'R10', 'R22').
rebinning: int, or None
rebinning image by this factor
validation_data_path: str, or None
path to validation data
validation_data_label: str
label of validation data
pixel_scale : float
pixel scale in arcmin
"""
def __init__(self, raft=None, rebinning=None, validation_data_path=None,
validation_data_label=None, pixel_scale=(0.2/60.0),
**kwargs):
# pylint: disable=W0231
self.raft = raft
self.rebinning = rebinning
if validation_data_path is None:
self.validation_data = None
else:
validation_data_path = os.path.join(self.external_data_dir, validation_data_path)
self.validation_data = Table.read(validation_data_path)
self.validation_data_label = validation_data_label
self.pixel_scale = pixel_scale
def calc_psd(self, image_data, rebinning=1, bins=200):
FT = np.fft.fft2(image_data / image_data.mean() - 1)
n_kx, n_ky = FT.shape
psd = np.square(np.abs(FT)).ravel()
spacing = self.pixel_scale * rebinning
k_rad = np.hypot(*np.meshgrid(np.fft.fftfreq(n_kx, spacing), np.fft.fftfreq(n_ky, spacing), indexing='ij')).ravel()
k_rad /= (2.0 * np.pi)
psd *= (spacing / n_kx) * (spacing / n_ky)
return binned_statistic(k_rad, [k_rad, psd], bins=bins)[0]
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
if hasattr(catalog_instance, 'focal_plane'):
focal_plane = catalog_instance.focal_plane
elif hasattr(catalog_instance, 'focal_planes'):
focal_plane = first(catalog_instance.focal_planes.values())
else:
return TestResult(skipped=True, summary='Not an e-image!')
rafts = focal_plane.rafts
if self.rebinning is None:
rebinning = catalog_instance.default_rebinning
else:
rebinning = self.rebinning
if self.raft is None:
raft_names = list(rafts)
elif is_string_like(self.raft):
raft_names = [self.raft]
else:
raft_names = list(self.raft)
if not all(raft_name in rafts for raft_name in raft_names):
return TestResult(skipped=True, summary='Not all rafts exist!')
sensor_names = ['S%d%d'%(i,j) for i in range(3) for j in range(3)]
total_chi2 = 0
total_dof = 0
for raft_name in raft_names:
raft = rafts[raft_name]
data = [raft.sensors[name].get_data(rebinning) if name in raft.sensors else None for name in sensor_names]
fig, ax = plt.subplots(2, 1, figsize=(7, 8))
for sensor, data_this in zip(sensor_names, data):
if data_this is None:
continue
ax[0].hist(data_this.ravel(), np.linspace(200, 2000, 181),
histtype='step', log=True, label=sensor)
ax[1].loglog(*self.calc_psd(data_this, rebinning), label=sensor, alpha=0.8)
if sum((1 for data_this in data if data_this is not None)) == 9:
data = np.array(data)
xdim, ydim = data.shape[1:]
data = data.reshape(3, 3, xdim, ydim).swapaxes(1, 2).reshape(3*xdim, 3*ydim) # pylint: disable=too-many-function-args
k, psd = self.calc_psd(data, rebinning)
ax[1].loglog(k, psd, label='all', c='k')
else:
psd = None
if self.validation_data is not None:
ax[1].loglog(self.validation_data['k'], self.validation_data['Pk'], label=self.validation_data_label, c='r', ls=':')
if self.validation_data is not None and psd is not None:
psd_log_interp = np.interp(self.validation_data['k'], k, np.log(psd), left=np.nan, right=np.nan)
mask = np.isfinite(psd_log_interp)
total_dof += np.count_nonzero(mask)
total_chi2 += np.square((psd_log_interp[mask] - np.log(self.validation_data['Pk'][mask]))).sum()
ax[0].legend(ncol=3)
ax[1].legend(ncol=3)
ax[0].set_title('{} - {}'.format(raft_name, catalog_name))
ax[0].set_xlabel('Background level [ADU]')
ax[0].set_ylabel('Number of pixels')
ax[0].set_ylim(None, 1e5)
ax[1].set_xlabel('k [arcmin$^{-1}$]')
ax[1].set_ylabel('P(k)')
ax[1].set_xlim(0.005, 2)
ax[1].set_ylim(1.0e-4, 2)
fig.tight_layout()
fig.savefig(os.path.join(output_dir, 'plot_{}.png'.format(raft_name)))
plt.close(fig)
score = chi2.cdf(total_chi2, total_dof)
# Check criteria to pass or fail (images in the edges of the focal plane
# will have way more power than the ones in the center if they are not
# flattened, we require the power to be within 2-sigma ( p < 0.95)
return TestResult(score=score, passed=(score < 0.95))
| 5,597
| 39.273381
| 134
|
py
|
descqa
|
descqa-master/descqa/NumberDensityVersusRedshift.py
|
from __future__ import print_function, division, unicode_literals, absolute_import
import os
import math
import re
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
import numpy as np
from GCR import GCRQuery
from sklearn.cluster import k_means
from .base import BaseValidationTest, TestResult
from .plotting import plt
__all__ = ['NumberDensityVersusRedshift']
class NumberDensityVersusRedshift(BaseValidationTest):
"""
Validation test to show redshift distribution P(z) or N(z)
Parameters
----------
z : str, optional, (default: 'redshift_true')
label for redshift column
band : str, optional (default: 'i')
band to test
N_zbins : int, optional (default: 10)
number of redshift bins between `zlo` and `zhi`
should be smaller than `N_jack` if `jackknife` is set to `True`
zlo : float, optional, (default: 0)
lower redshift limit
zhi : float, optional (default: 1.1)
upper redshift limit
observation : str, optional (default: '')
observation dataset to compare to
mag_lo : float, optional (default: 27)
faint-end magnitude limit
mag_hi : float, optional (default: 18)
bright-end magnitude limit
ncolumns : int, optional (default: 2)
number of subplot columns
normed : bool, optional (default: True)
normalize the redshift distribution (i.e. plotting P(z)).
Note that when `normed` set to `False` the comparision with validation data
does not make much sense since the validation data is normalized.
jackknife : bool, optional (default: False)
turn on jackknife error. When set to `False` use Poisson error.
N_jack : int, optional (default: 20)
number of jackknife regions
`N_jack` should be much larger than `N_zbins` for the jackknife errors to be stable
ra : str, optional, (default: 'ra')
label of RA column (used if `jackknife` is `True`)
dec : str, optional, (default: 'dec')
label of Dec column (used if `jackknife` is `True`)
pass_limit : float, optional (default: 2.)
chi^2 value needs to be less than this value to pass the test
use_diagonal_only : bool, optional (default: False)
use only the diagonal terms of the convariance matric when calculating chi^2
rest_frame: boolean, optional (default: False)
use rest-frame magnitudes for cuts
Note that mag_lo and mag_hi need to be adjusted if rest_frame is set to `True`
"""
#setup dict with parameters needed to read in validation data
possible_observations = {
'Coil2004_magbin': {
'filename_template': 'N_z/DEEP2/Coil_et_al_2004_Table3_{}.txt',
'usecols': (0, 1, 2, 4),
'colnames': ('mag_hi', 'mag_lo', 'z0values', 'z0errors'),
'skiprows': 2,
'label': 'Coil et. al. 2004',
},
'Coil2004_maglim': {
'filename_template': 'N_z/DEEP2/Coil_et_al_2004_Table4_{}.txt',
'usecols': (0, 1, 2),
'colnames': ('mag_hi', 'mag_lo', 'z0values'),
'skiprows': 3,
'label': 'Coil et. al. 2004',
},
'DEEP2_JAN': {
'filename_template': 'N_z/DEEP2/JANewman_{}.txt',
'usecols': (0, 1, 2, 3),
'colnames': ('mag_hi_lim', 'mag_lo_lim', 'z0const', 'z0linear'),
'skiprows': 1,
'label': 'DEEP2',
},
}
#plotting constants
figx_p = 9
figy_p = 11
lw2 = 2
msize = 6 #markersize
default_colors = ['blue', 'r', 'm', 'g', 'navy', 'y', 'purple', 'gray', 'c',\
'orange', 'violet', 'coral', 'gold', 'orchid', 'maroon', 'tomato', \
'sienna', 'chartreuse', 'firebrick', 'SteelBlue']
validation_color = 'black'
default_markers = ['o', 'v', 's', 'd', 'H', '^', 'D', 'h', '<', '>', '.']
def __init__(self, band='i', N_zbins=10, zlo=0., zhi=1.1,
observation='', mag_lo=27, mag_hi=18, ncolumns=2, normed=True,
jackknife=False, N_jack=20, ra='ra', dec='dec', pass_limit=2.,
use_diagonal_only=False, rest_frame=False, **kwargs):
# pylint: disable=W0231
#catalog quantities
self.truncate_cat_name = kwargs.get('truncate_cat_name', False)
self.replace_cat_name = kwargs.get('replace_cat_name', {})
self.title_in_legend = kwargs.get('title_in_legend', False)
self.legend_location = kwargs.get('legend_location', 'upper left')
self.font_size = kwargs.get('font_size', 16)
self.legend_size = kwargs.get('legend_size', 10)
self.tick_size = kwargs.get('tick_size', 12)
self.adjust_ylim = kwargs.get('adjust_ylim', 1.3)
self.rest_frame = rest_frame
if self.rest_frame:
possible_mag_fields = ('Mag_true_{}_lsst_z0',
'Mag_true_{}_sdss_z0',
'Mag_true_{}_des_z0',
)
else:
possible_mag_fields = ('mag_{}_cModel',
'mag_{}_lsst',
'mag_{}_sdss',
'mag_{}_des',
'mag_true_{}_lsst',
'mag_true_{}_sdss',
'mag_true_{}_des',
)
self.possible_mag_fields = [f.format(band) for f in possible_mag_fields]
self.possible_redshifts = ['redshift_true_galaxy', 'redshift_true', 'redshift_truth']
self.band = band
#z-bounds and binning
self.zlo = zlo
self.zhi = zhi
self.N_zbins = N_zbins
self.zbins = np.linspace(zlo, zhi, N_zbins+1)
#errors
self.jackknife = jackknife
self.N_jack = N_jack
self.ra = ra
self.dec = dec
self.use_diagonal_only = use_diagonal_only
#scores
self.pass_limit = pass_limit
#validation data
self.validation_data = {}
self.observation = observation
#check for valid observations
if not observation:
print('Warning: no data file supplied, no observation requested; only catalog data will be shown')
elif observation not in self.possible_observations:
raise ValueError('Observation {} not available'.format(observation))
else:
self.validation_data = self.get_validation_data(band, observation)
#plotting variables
self.normed = normed
self.ncolumns = int(ncolumns)
#setup subplot configuration and get magnitude cuts for each plot
self.mag_lo, self.mag_hi = self.init_plots(mag_lo, mag_hi)
#setup summary plot
self.summary_fig, self.summary_ax = plt.subplots(self.nrows, self.ncolumns, figsize=(self.figx_p, self.figy_p), sharex='col')
#could plot summary validation data here if available but would need to evaluate labels, bin values etc.
#otherwise setup a check so that validation data is plotted only once on summary plot
self.first_pass = True
self._other_kwargs = kwargs
def init_plots(self, mlo, mhi):
#get magnitude cuts based on validation data or default limits (only catalog data plotted)
mag_lo = self.validation_data.get('mag_lo', [float(m) for m in range(int(mhi), int(mlo+1))])
mag_hi = self.validation_data.get('mag_hi', [])
print(mag_lo, mag_hi)
#check if supplied limits differ from validation limits and adjust
mask = (mag_lo <= float(mlo)) & (mag_lo >= float(mhi))
if np.count_nonzero(mask) < len(mag_lo):
if len(mag_hi) > 0:
mag_hi = mag_hi[mask]
self.validation_data['mag_hi'] = mag_hi
mag_lo = mag_lo[mask]
self.validation_data['mag_lo'] = mag_lo
if 'z0values' in self.validation_data:
self.validation_data['z0values'] = self.validation_data['z0values'][mask]
if 'z0errors' in self.validation_data:
self.validation_data['z0errors'] = self.validation_data['z0errors'][mask]
#setup number of plots and number of rows required for subplots
self.nplots = len(mag_lo)
self.nrows = (self.nplots+self.ncolumns-1)//self.ncolumns
#other plotting variables
self.colors = iter(self.default_colors)
self.markers = iter(self.default_markers)
self.yaxis = 'P(z|m)' if self.normed else 'N(z|m)'
return mag_lo, mag_hi
def get_validation_data(self, band, observation):
data_args = self.possible_observations[observation]
data_path = os.path.join(self.data_dir, data_args['filename_template'].format(band))
if not os.path.exists(data_path):
raise ValueError("{}-band data file {} not found".format(band, data_path))
if not os.path.getsize(data_path):
raise ValueError("{}-band data file {} is empty".format(band, data_path))
data = np.loadtxt(data_path, unpack=True, usecols=data_args['usecols'], skiprows=data_args['skiprows'])
validation_data = dict(zip(data_args['colnames'], data))
validation_data['label'] = data_args['label']
#set mag_lo and mag_hi for cases where range of magnitudes is given
if 'mag_lo' not in validation_data:
validation_data['mag_hi'] = []
validation_data['mag_lo'] = np.asarray([float(m) for m in range(int(validation_data['mag_hi_lim']),
int(validation_data['mag_lo_lim'])+1)])
return validation_data
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
#check catalog data for required quantities
mag_field = catalog_instance.first_available(*self.possible_mag_fields)
if not mag_field:
return TestResult(skipped=True, summary='Missing required mag_field option')
self.zlabel = catalog_instance.first_available(*self.possible_redshifts)
if not self.zlabel:
return TestResult(skipped=True, summary='Missing required redhsift option')
self.filters = [(lambda z: (z > self.zlo) & (z < self.zhi), self.zlabel)]
jackknife_quantities = [self.zlabel, self.ra, self.dec] if self.jackknife else [self.zlabel]
for jq in jackknife_quantities:
if not catalog_instance.has_quantity(jq):
return TestResult(skipped=True, summary='Missing required {} quantity'.format(jq))
required_quantities = jackknife_quantities + [mag_field]
filtername = mag_field.split('_')[(-1 if mag_field.startswith('m') else -2)].upper() #extract filtername
filelabel = '_'.join((filtername, self.band))
#setup plots
if self.truncate_cat_name:
catalog_name = re.split('_', catalog_name)[0]
if self.replace_cat_name:
for k, v in self.replace_cat_name.items():
catalog_name = re.sub(k, v, catalog_name)
fig, ax = plt.subplots(self.nrows, self.ncolumns, figsize=(self.figx_p, self.figy_p), sharex='col')
catalog_color = next(self.colors)
catalog_marker = next(self.markers)
#initialize arrays for storing histogram sums
N_array = np.zeros((self.nrows, self.ncolumns, len(self.zbins)-1), dtype=np.int)
sumz_array = np.zeros((self.nrows, self.ncolumns, len(self.zbins)-1))
jackknife_data = {}
#get catalog data by looping over data iterator (needed for large catalogs) and aggregate histograms
for catalog_data in catalog_instance.get_quantities(required_quantities, filters=self.filters, return_iterator=True):
catalog_data = GCRQuery(*((np.isfinite, col) for col in catalog_data)).filter(catalog_data)
# filter catalog data further for matched object catalogs
if np.ma.isMaskedArray(catalog_data[self.zlabel]):
galmask = np.ma.getmask(catalog_data[self.zlabel])
catalog_data = {k: v[galmask] for k, v in catalog_data.items()}
for n, (cut_lo, cut_hi, N, sumz) in enumerate(zip_longest(
self.mag_lo,
self.mag_hi,
N_array.reshape(-1, N_array.shape[-1]), #flatten all but last dimension of array
sumz_array.reshape(-1, sumz_array.shape[-1]),
)):
if cut_lo:
mask = (catalog_data[mag_field] < cut_lo)
if cut_hi:
mask &= (catalog_data[mag_field] >= cut_hi)
z_this = catalog_data[self.zlabel][mask]
#save data for jackknife errors
if self.jackknife: #store all the jackknife data in numpy arrays for later processing
if str(n) not in jackknife_data.keys(): #initialize sub-dict
jackknife_data[str(n)] = dict(zip(required_quantities, [np.asarray([]) for jq in jackknife_quantities]))
for jkey in jackknife_data[str(n)].keys():
jackknife_data[str(n)][jkey] = np.hstack((jackknife_data[str(n)][jkey], catalog_data[jkey][mask]))
del mask
#bin catalog_data and accumulate subplot histograms
N += np.histogram(z_this, bins=self.zbins)[0]
sumz += np.histogram(z_this, bins=self.zbins, weights=z_this)[0]
#loop over magnitude cuts and make plots
results = {}
scores = np.array([self.pass_limit]*self.nplots)
for n, (ax_this, summary_ax_this, cut_lo, cut_hi, N, sumz, z0, z0err) in enumerate(zip_longest(
ax.flat,
self.summary_ax.flat,
self.mag_lo,
self.mag_hi,
N_array.reshape(-1, N_array.shape[-1]),
sumz_array.reshape(-1, sumz_array.shape[-1]),
self.validation_data.get('z0values', []),
self.validation_data.get('z0errors', []),
)):
if cut_lo is None: #cut_lo is None if self.mag_lo is exhausted
if ax_this is not None:
ax_this.set_visible(False)
if summary_ax_this is not None:
summary_ax_this.set_visible(False)
else:
cut_label = '{} $< {}$'.format(self.band, cut_lo)
if cut_hi:
cut_label = '${} \\leq $ {}'.format(cut_hi, cut_label) #also appears in txt file
if z0 is None and 'z0const' in self.validation_data: #alternate format for some validation data
z0 = self.validation_data['z0const'] + self.validation_data['z0linear'] * cut_lo
N = N.astype(np.float64)
if self.jackknife:
covariance = self.get_jackknife_errors(self.N_jack, jackknife_data[str(n)], N)
else:
covariance = np.diag(N)
meanz = sumz / N
sumN = N.sum()
total = '(# of galaxies = {})'.format(sumN)
if self.normed:
scale = sumN * (self.zbins[1:] - self.zbins[:-1])
N /= scale
covariance /= np.outer(scale, scale)
Nerrors = np.sqrt(np.diag(covariance))
#make subplot
catalog_label = ' '.join((catalog_name, cut_label.replace(self.band, filtername + ' ' + self.band)))
validation_label = ' '.join((self.validation_data.get('label', ''), cut_label))
key = cut_label.replace('$', '').replace('\\leq', '<=')
results[key] = {'meanz': meanz, 'total':total, 'N':N, 'N+-':Nerrors}
self.catalog_subplot(ax_this, meanz, N, Nerrors, catalog_color, catalog_marker, catalog_label)
if z0 and z0 > 0: # has validation data
fits = self.validation_subplot(ax_this, meanz, z0, z0err, validation_label)
results[key].update(fits)
scores[n], inverse_cov = self.get_score(N, fits['fit'], covariance, use_diagonal_only=self.use_diagonal_only)
results[key]['score'] = 'Chi_sq/dof = {:11.4g}'.format(scores[n])
if self.jackknife:
results[key]['inverse_cov_matrix'] = inverse_cov
self.decorate_subplot(ax_this, n)
#add curve for this catalog to summary plot
self.catalog_subplot(summary_ax_this, meanz, N, Nerrors, catalog_color, catalog_marker, catalog_label)
if self.first_pass and z0 and z0 > 0:
self.validation_subplot(summary_ax_this, meanz, z0, z0err, validation_label) #add validation data if evaluating first catalog
self.decorate_subplot(summary_ax_this, n)
#save results for catalog and validation data in txt files
for filename, dtype, comment, info, info2 in zip_longest((filelabel, self.observation), ('N', 'fit'),
(filtername,), ('total', 'z0'), ('score', 'z0err')):
if filename:
with open(os.path.join(output_dir, 'Nvsz_' + filename + '.txt'), 'ab') as f_handle: #open file in append binary mode
#loop over magnitude cuts in results dict
for key, value in results.items():
self.save_quantities(dtype, value, f_handle, comment=' '.join(((comment or ''),
key, value.get(info, ''), value.get(info2, ''))))
if self.jackknife:
with open(os.path.join(output_dir, 'Nvsz_' + filename + '.txt'), 'a') as f_handle: #open file in append mode
f_handle.write('\nInverse Covariance Matrices:\n')
for key in results.keys():
self.save_matrix(results[key]['inverse_cov_matrix'], f_handle, comment=key)
if self.first_pass: #turn off validation data plot in summary for remaining catalogs
self.first_pass = False
#make final adjustments to plots and save figure
self.post_process_plot(fig)
fig.savefig(os.path.join(output_dir, 'Nvsz_' + filelabel + '.png'))
plt.close(fig)
#compute final score
#final_scores = (scores < self.pass_limit)
#pass or fail on average score rather than demanding that all distributions pass
score_ave = np.mean(scores)
return TestResult(score_ave, passed=score_ave < self.pass_limit)
def get_jackknife_errors(self, N_jack, jackknife_data, N):
nn = np.stack((jackknife_data[self.ra], jackknife_data[self.dec]), axis=1)
_, jack_labels, _ = k_means(n_clusters=N_jack, random_state=0, X=nn)
#make histograms for jackknife regions
Njack_array = np.zeros((N_jack, len(self.zbins)-1), dtype=np.int)
for nj in range(N_jack):
Njack_array[nj] = np.histogram(jackknife_data[self.zlabel][jack_labels != nj], self.zbins)[0]
covariance = np.zeros((self.N_zbins, self.N_zbins))
for i in range(self.N_zbins):
for j in range(self.N_zbins):
for njack in Njack_array:
covariance[i][j] += (N_jack - 1.)/N_jack * (N[i] - njack[i]) * (N[j] - njack[j])
return covariance
def catalog_subplot(self, ax, meanz, data, errors, catalog_color, catalog_marker, catalog_label):
ax.errorbar(meanz, data, yerr=errors, label=catalog_label, color=catalog_color, fmt=catalog_marker, ms=self.msize)
ymax = np.max(data + errors)
ax.set_ylim(0., self.adjust_ylim*ymax)
def validation_subplot(self, ax, meanz, z0, z0err, validation_label):
#plot validation data if available
ndata = meanz**2*np.exp(-meanz/z0)
norm = self.nz_norm(self.zhi, z0) - self.nz_norm(self.zlo, z0)
ax.plot(meanz, ndata/norm, label=validation_label, ls='--', color=self.validation_color, lw=self.lw2)
fits = {'fit': ndata/norm, 'z0':'z0 = {:.3f}'.format(z0)}
if z0err and z0err > 0:
nlo = meanz**2*np.exp(-meanz/(z0-z0err))
nhi = meanz**2*np.exp(-meanz/(z0+z0err))
normlo = self.nz_norm(self.zhi, z0-z0err) - self.nz_norm(self.zlo, z0-z0err)
normhi = self.nz_norm(self.zhi, z0+z0err) - self.nz_norm(self.zlo, z0+z0err)
ax.fill_between(meanz, nlo/normlo, nhi/normhi, alpha=0.3, facecolor=self.validation_color)
fits['fit+'] = nhi/normhi
fits['fit-'] = nlo/normlo
fits['z0err'] = 'z0err = {:.3f}'.format(z0err)
return fits
def decorate_subplot(self, ax, nplot):
#add axes and legend
ax.tick_params(labelsize=self.tick_size)
if nplot % self.ncolumns == 0: #1st column
ax.set_ylabel(self.yaxis, size=self.font_size)
if nplot+1 <= self.nplots-self.ncolumns: #x scales for last ncol plots only
ax.tick_params(direction='in', which='both')
#prevent overlapping yaxis labels
ax.yaxis.get_major_ticks()[0].label1.set_visible(False)
else:
print(nplot, ' visible')
ax.set_xlabel('z', size=self.font_size)
ax.tick_params(labelbottom=True)
ax.legend(loc='best', fancybox=True, framealpha=0.5, fontsize=self.legend_size, numpoints=1)
@staticmethod
def get_score(catalog, validation, cov, use_diagonal_only=True):
#remove bad values
mask = np.isfinite(catalog) & np.isfinite(validation)
if not mask.any():
return np.nan
catalog = catalog[mask]
validation = validation[mask]
cov = cov[mask][:, mask]
inverse_cov = np.diag(1.0 / np.diag(cov))
if not use_diagonal_only:
try:
inverse_cov = np.linalg.inv(cov)
except np.linalg.LinAlgError:
print('Covariance matrix inversion failed: diagonal errors only will be used')
d = catalog - validation
chi2 = np.einsum('i,ij,j', d, inverse_cov, d)
chi2_reduced = chi2 / float(len(catalog))
return chi2_reduced, inverse_cov
@staticmethod
def nz_norm(z, z0):
return z0*math.exp(-z/z0)*(-z*z-2.*z*z0-2.*z0*z0)
@staticmethod
def post_process_plot(fig):
fig.subplots_adjust(hspace=0.0)
@staticmethod
def save_matrix(matrix, fhandle, comment=''):
fhandle.write('{}:\n'.format(comment))
for row in matrix:
fhandle.write(' '.join(['{:10.3g}'.format(element) for element in row])+'\n')
@staticmethod
def save_quantities(keyname, results, filename, comment=''):
if keyname in results:
if keyname+'-' in results and keyname+'+' in results:
fields = ('meanz', keyname, keyname+'-', keyname+'+')
header = ', '.join(('Data columns are: <z>', keyname, keyname+'-', keyname+'+', ' '))
elif keyname+'+-' in results:
fields = ('meanz', keyname, keyname+'+-')
header = ', '.join(('Data columns are: <z>', keyname, keyname+'+-', ' '))
else:
fields = ('meanz', keyname)
header = ', '.join(('Data columns are: <z>', keyname, ' '))
np.savetxt(filename, np.vstack((results[k] for k in fields)).T, fmt='%12.4e', header=header+comment)
def conclude_test(self, output_dir):
self.post_process_plot(self.summary_fig)
self.summary_fig.savefig(os.path.join(output_dir, 'summary.png'))
plt.close(self.summary_fig)
| 24,043
| 44.973231
| 145
|
py
|
descqa
|
descqa-master/descqa/StellarMassFunction.py
|
from __future__ import print_function, division, unicode_literals, absolute_import
import os
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
from itertools import count
import numpy as np
from GCR import GCRQuery
from .base import BaseValidationTest, TestResult
from .plotting import plt
from .utils import get_sky_volume, get_opt_binpoints
__all__ = ['StellarMassFunction']
class StellarMassFunction(BaseValidationTest):
"""
validation test to show N(z) distributions
"""
#setup dict with parameters needed to read in validation data
possible_observations = {
'PRIMUS_2013': {
'filename_template': 'SMF/moustakas_et_al_2013/Table{}.txt',
'file-info': {
'0. < z < .1': {'zlo':0., 'zhi':.1, 'table#':3, 'usecols':[0,1,2,3]},
'.2 < z < .3': {'zlo':.2, 'zhi':.3, 'table#':4, 'usecols':[0,1,2,3]},
'.3 < z < .4': {'zlo':.3, 'zhi':.4, 'table#':4, 'usecols':[0,6,7,8]},
'.4 < z < .5': {'zlo':.4, 'zhi':.5, 'table#':4, 'usecols':[0,11,12,13]},
'.5 < z < .65': {'zlo':.5, 'zhi':.65, 'table#':4, 'usecols':[0,16,17,18]},
'.65 < z < .8': {'zlo':.65, 'zhi':.8, 'table#':4, 'usecols':[0,21,22,23]},
'.8 < z < 1.0': {'zlo':.8, 'zhi':1.0, 'table#':4, 'usecols':[0,26,27,28]},
},
'zrange': (0.0, 1.0),
'colnames': ('logM', 'log_phi', 'dlog_phi+', 'dlog_phi-'),
'label': 'PRIMUS 2013',
'missingdata': '...',
},
}
zkey_match = '< z <'
#plotting constants
validation_color = 'black'
validation_marker = 'o'
default_markers = ['v', 's', 'd', 'H', '^', 'D', 'h', '<', '>', '.']
msize = 4 #marker-size
yaxis_xoffset = 0.02
yaxis_yoffset = 0.5
def __init__(self, z='redshift_true', mass='stellar_mass', Nbins=25, log_Mlo=8., log_Mhi=12.,
observation='', zlo=0., zhi=1.0, zint=0.2, ncolumns=2, **kwargs):
#pylint: disable=W0231
self.font_size = kwargs.get('font_size', 20)
self.legend_size = kwargs.get('legend_size', 13)
self.truncate_cat_name = kwargs.get('truncate_cat_name', False)
self.fig_xsize = kwargs.get('fig_xsize', 10)
self.fig_ysize = kwargs.get('fig_ysize', 14)
self.text_size = kwargs.get('text_size', 20.)
self.Mlo = kwargs.get('Mlo', 7e8)
self.Mhi = kwargs.get('Mhi', 2e12)
#catalog quantities
self.zlabel = z
self.Mlabel = mass
#z-range and mass binning
self.Nbins = Nbins
self.log_Mlo = log_Mlo
self.log_Mhi = log_Mhi
self.Mbins = np.logspace(log_Mlo, log_Mhi, Nbins+1)
self.DM = (log_Mhi - log_Mlo)/Nbins
#validation data
self.validation_data = {}
self.observation = observation
#check for valid observations
if not observation:
print('Warning: no data file supplied, no observation requested; only catalog data will be shown')
elif observation not in self.possible_observations:
raise ValueError('Observation {} not available'.format(observation))
else:
self.validation_data = self.get_validation_data(observation)
#plotting variables
self.ncolumns = int(ncolumns)
self._color_iterator = ('C{}'.format(i) for i in count())
#setup subplot configuration and get z cuts for each plot
self.z_lo, self.z_hi = self.init_plots(zlo, zhi, zint)
zmin = np.min(self.z_lo)
zmax = np.max(self.z_hi)
self.filters = [(lambda z: (z > zmin) & (z < zmax), self.zlabel)]
#setup summary plot
self.summary_fig, self.summary_ax = plt.subplots(self.nrows, self.ncolumns, sharex='col',
figsize=(self.fig_xsize, self.fig_ysize))
self.summary_fig.text(self.yaxis_xoffset, self.yaxis_yoffset, self.yaxis, va='center', rotation='vertical',
fontsize=self.text_size) #setup a common axis label
#could plot summary validation data here if available but would need to evaluate labels, bin values etc.
#otherwise setup a check so that validation data is plotted only once on summary plot
self.first_pass = True
self._other_kwargs = kwargs
def init_plots(self, zlo, zhi, zint):
#get magnitude cuts based on validation data or default limits (only catalog data plotted)
if not self.validation_data:
z_lo = np.arange(zlo, zhi, zint)
z_hi = np.arange(zint, zhi+zint, zint)
else:
z_lo = [self.validation_data[k].get('zlo') for k in self.validation_data.keys() if self.zkey_match in k]
z_hi = [self.validation_data[k].get('zhi') for k in self.validation_data.keys() if self.zkey_match in k]
print(z_lo, z_hi)
#setup number of plots and number of rows required for subplots
self.nplots = len(z_lo)
self.nrows = (self.nplots+self.ncolumns-1)//self.ncolumns
#other plotting variables
self.markers = iter(self.default_markers)
self.yaxis = r'$d\phi/d\log_{10}(M/M_\odot)\quad[Mpc^{-3} dex^{-1}]$'
self.xaxis = '$M^*/M_\\odot$'
return z_lo, z_hi
def get_validation_data(self, observation):
data_args = self.possible_observations[observation]
validation_data = {'label':data_args['label']}
file_args = data_args['file-info']
for zkey in file_args.keys():
filename = self.possible_observations[observation]['filename_template'].format(file_args[zkey]['table#'])
data_path = os.path.join(self.data_dir, filename)
if not os.path.exists(data_path):
raise ValueError("SMF data file {} not found".format(data_path))
if not os.path.getsize(data_path):
raise ValueError("SMF data file {} is empty".format(data_path))
data = np.genfromtxt(data_path, unpack=True, usecols=file_args[zkey]['usecols'], missing_values=data_args['missingdata'])
validation_data[zkey] = dict(zip(data_args['colnames'], data))
validation_data[zkey]['zlo'] = file_args[zkey]['zlo']
validation_data[zkey]['zhi'] = file_args[zkey]['zhi']
return validation_data
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
#update color and marker to preserve catalog colors and markers across tests
catalog_color = next(self._color_iterator)
#check catalog data for required quantities
if not catalog_instance.has_quantities([self.zlabel, self.Mlabel]):
return TestResult(skipped=True, summary='Missing required quantity {} or {}'.format(self.zlabel, self.Mlabel))
#setup plots
fig, ax = plt.subplots(self.nrows, self.ncolumns, sharex='col', figsize=(self.fig_xsize, self.fig_ysize))
fig.text(self.yaxis_xoffset, self.yaxis_yoffset, self.yaxis, va='center', rotation='vertical',
fontsize=self.text_size) #setup a common axis label
if self.truncate_cat_name:
catalog_name = catalog_name.partition("_")[0]
#initialize arrays for storing histogram sums
N_array = np.zeros((self.nrows, self.ncolumns, len(self.Mbins)-1), dtype=np.int)
sumM_array = np.zeros((self.nrows, self.ncolumns, len(self.Mbins)-1))
sumM2_array = np.zeros((self.nrows, self.ncolumns, len(self.Mbins)-1))
#get catalog data by looping over data iterator (needed for large catalogs) and aggregate histograms
for catalog_data in catalog_instance.get_quantities([self.zlabel, self.Mlabel], filters=self.filters, return_iterator=True):
catalog_data = GCRQuery(*((np.isfinite, col) for col in catalog_data)).filter(catalog_data)
for cut_lo, cut_hi, N, sumM, sumM2 in zip_longest(
self.z_lo,
self.z_hi,
N_array.reshape(-1, N_array.shape[-1]), #flatten all but last dimension of array
sumM_array.reshape(-1, sumM_array.shape[-1]),
sumM2_array.reshape(-1, sumM2_array.shape[-1]),
):
if cut_lo is not None: #cut_lo can be 0. so cannot use if cut_lo
mask = (cut_lo < catalog_data[self.zlabel]) & (catalog_data[self.zlabel] < cut_hi)
M_this = catalog_data[self.Mlabel][mask]
del mask
#bin catalog_data and accumulate subplot histograms
N += np.histogram(M_this, bins=self.Mbins)[0]
sumM += np.histogram(M_this, bins=self.Mbins, weights=M_this)[0]
sumM2 += np.histogram(M_this, bins=self.Mbins, weights=M_this**2)[0]
#check that catalog has entries for quantity to be plotted
if not np.asarray([N.sum() for N in N_array]).sum():
raise ValueError('No data found for quantity {}'.format(self.Mlabel))
#loop over magnitude cuts and make plots
#change plot order so that successive redshift bins are plotted columnwise
results = {}
for n, (ax_this, summary_ax_this, cut_lo, cut_hi, N, sumM, sumM2, zkey) in enumerate(zip_longest(
ax.flat,
#np.transpose(ax).flat,
self.summary_ax.flat,
#np.transpose(self.summary_ax).flat,
self.z_lo,
self.z_hi,
N_array.reshape(-1, N_array.shape[-1]),
sumM_array.reshape(-1, sumM_array.shape[-1]),
sumM2_array.reshape(-1, sumM2_array.shape[-1]),
[k for k in self.validation_data.keys() if self.zkey_match in k],
)):
if cut_lo is None: #cut_lo is None if self.z_lo is exhausted
ax_this.set_visible(False)
summary_ax_this.set_visible(False)
else:
if not zkey:
zkey = '{:.1f} < z < {:.1f}'.format(cut_lo, cut_hi)
cut_label = '${}$'.format(zkey)
Mvalues = sumM/N
sumN = N.sum()
total = '(# of galaxies = {})'.format(sumN)
Nerrors = np.sqrt(N)
volume = get_sky_volume(catalog_instance.sky_area, cut_lo, cut_hi, catalog_instance.cosmology)
phi = N/volume/self.DM
phi_errors = Nerrors/volume/self.DM
#make subplot
validation_label = self.validation_data.get('label', '')
results[zkey] = {'Mphi':Mvalues, 'total':total, 'phi':phi, 'phi+-':phi_errors}
self.catalog_subplot(ax_this, Mvalues, phi, phi_errors, catalog_color, catalog_name)
if zkey in self.validation_data.keys():
data = self.validation_subplot(ax_this, self.validation_data[zkey], validation_label)
results[zkey].update(data)
self.decorate_subplot(ax_this, n, label=cut_label)
#add curve for this catalog to summary plot
self.catalog_subplot(summary_ax_this, Mvalues, phi, phi_errors, catalog_color, catalog_name)
if self.first_pass and zkey in self.validation_data.keys(): #add validation data if evaluating first catalog
self.validation_subplot(summary_ax_this, self.validation_data[zkey], validation_label)
self.decorate_subplot(summary_ax_this, n, label=cut_label)
#save results for catalog and validation data in txt files
for filename, dtype, info in zip_longest((catalog_name, self.observation), ('phi', 'data'), ('total',)):
if filename:
with open(os.path.join(output_dir, 'SMF_' + filename + '.txt'), 'ab') as f_handle: #open file in append mode
#loop over magnitude cuts in results dict
for key, value in results.items():
self.save_quantities(dtype, value, f_handle, comment=' '.join((key, value.get(info, ''))))
if self.first_pass: #turn off validation data plot in summary for remaining catalogs
self.first_pass = False
#make final adjustments to plots and save figure
self.post_process_plot(fig)
fig.savefig(os.path.join(output_dir, 'SMF_' + catalog_name + '.png'))
plt.close(fig)
return TestResult(inspect_only=True)
def catalog_subplot(self, ax, M, phi, phi_errors, catalog_color, catalog_label):
ax.plot(M, phi, label=catalog_label, color=catalog_color)
ax.fill_between(M, phi - phi_errors, phi + phi_errors, alpha=0.3, facecolor=catalog_color)
def validation_subplot(self, ax, validation_data, validation_label):
results = dict()
if all(x in validation_data.keys() for x in ('logM', 'log_phi', 'dlog_phi+', 'dlog_phi-')):
M = np.power(10, validation_data['logM'])
phi = np.power(10, validation_data['log_phi'])
dphi_hi = np.power(10, validation_data['log_phi'] + validation_data['dlog_phi+']) - phi
dphi_lo = -np.power(10, validation_data['log_phi'] + validation_data['dlog_phi-']) + phi
ax.errorbar(M, phi, yerr=[dphi_lo, dphi_hi], color=self.validation_color, marker=self.validation_marker,
linestyle="", label=validation_label, ms=self.msize)
results['data'] = phi
results['Mdata'] = M
results['data+'] = dphi_hi
results['data-'] = dphi_lo
else:
raise ValueError("Missing expected validation-data quantitites")
return results
def decorate_subplot(self, ax, nplot, label=None):
ax.tick_params(labelsize=18)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlim(self.Mlo, self.Mhi)
if label:
ax.text(0.99, 0.99, label, horizontalalignment='right', verticalalignment='top',
fontsize=self.text_size, transform=ax.transAxes)
#add axes and legend
if nplot+1 <= self.nplots-self.ncolumns: #x scales for last ncol plots only
ax.tick_params(direction='in', which='both')
#ax.set_xticks([])
ax.yaxis.get_major_ticks()[0].label1.set_visible(False)
else:
ax.set_xlabel(self.xaxis, size=self.font_size)
#for axlabel in ax.get_xticklabels():
# axlabel.set_visible(True)
ax.tick_params(labelbottom=True)
ax.legend(loc='lower left', fancybox=True, framealpha=0.5, numpoints=1, fontsize=self.legend_size)
@staticmethod
def post_process_plot(fig):
fig.subplots_adjust(hspace=0.0)
@staticmethod
def save_quantities(keyname, results, filename, comment=''):
if keyname in results:
if keyname+'-' in results and keyname+'+' in results:
fields = ('M'+keyname, keyname, keyname+'-', keyname+'+')
header = ', '.join(('Data columns are: <M>', keyname, keyname+'-', keyname+'+', ' '))
elif keyname+'+-' in results:
fields = ('M'+keyname, keyname, keyname+'+-')
header = ', '.join(('Data columns are: <M>', keyname, keyname+'+-', ' '))
else:
fields = ('M'+keyname, keyname)
header = ', '.join(('Data columns are: <M>', keyname, ' '))
np.savetxt(filename, np.vstack((results[k] for k in fields)).T, fmt='%12.4e', header=header+comment)
def conclude_test(self, output_dir):
self.post_process_plot(self.summary_fig)
self.summary_fig.savefig(os.path.join(output_dir, 'summary.png'))
plt.close(self.summary_fig)
| 15,804
| 46.893939
| 133
|
py
|
descqa
|
descqa-master/descqa/__init__.py
|
"""
DESCQA Validation Tests
"""
from .register import *
from .base import *
from .version import __version__
| 109
| 14.714286
| 32
|
py
|
descqa
|
descqa-master/descqa/register.py
|
import os
import importlib
import yaml
from .base import BaseValidationTest
__all__ = ['available_validations', 'load_validation', 'load_validation_from_config_dict']
def load_yaml(yaml_file):
"""
Load *yaml_file*. Ruturn a dictionary.
"""
with open(yaml_file) as f:
config = yaml.safe_load(f)
return config
def import_subclass(subclass_path, package=None, required_base_class=None):
"""
Import and return a subclass.
*subclass_path* must be in the form of 'module.subclass'.
"""
module, _, subclass_name = subclass_path.rpartition('.')
if package and not module.startswith('.'):
module = '.' + module
subclass = getattr(importlib.import_module(module, package), subclass_name)
if required_base_class:
assert issubclass(subclass, required_base_class), "Provided class is not a subclass of *required_base_class*"
return subclass
def get_available_configs(config_dir, register=None):
"""
Return (or update) a dictionary *register* that contains all config files in *config_dir*.
"""
if register is None:
register = dict()
for config_file in os.listdir(config_dir):
if config_file.startswith('_') or not config_file.lower().endswith('.yaml'):
continue
name = os.path.splitext(config_file)[0]
config = load_yaml(os.path.join(config_dir, config_file))
config['test_name'] = name
register[name] = config
return register
def load_validation_from_config_dict(validation_config):
"""
Load a validation test using a config dictionary.
Parameters
----------
validation_config : dict
a dictionary of config options
Return
------
validation_test : instance of a subclass of BaseValidationTest
See also
--------
load_catalog()
"""
return import_subclass(validation_config['subclass_name'],
__package__,
BaseValidationTest)(**validation_config)
def load_validation(validation_name, config_overwrite=None):
"""
Load a validation test as specified in one of the yaml file in configs.
Parameters
----------
validation_name : str
name of the validation test (without '.yaml')
config_overwrite : dict, optional
a dictionary of config options to overwrite
Return
------
validation_test : instance of a subclass of BaseValidationTest
"""
if validation_name.lower().endswith('.yaml'):
validation_name = validation_name[:-5]
if validation_name not in available_validations:
raise KeyError("Validation `{}` does not exist in the register. See `available_validations`.".format(validation_name))
config = available_validations[validation_name]
if config_overwrite:
config = config.copy()
config.update(config_overwrite)
return load_validation_from_config_dict(config)
available_validations = get_available_configs(os.path.join(os.path.dirname(__file__), 'configs'))
| 3,051
| 28.066667
| 126
|
py
|
descqa
|
descqa-master/descqa/ColorRedshiftTest.py
|
from __future__ import unicode_literals, absolute_import, division
import os
import re
import numpy as np
from scipy.stats import binned_statistic as bs
import matplotlib.colors as clr
from .base import BaseValidationTest, TestResult
from .plotting import plt
__all__ = ['ColorRedshiftTest']
class _CatalogDoesNotHaveQuantity(Exception):
"""Raised when the catalog doesn't have a quantity and indicates the
test should be skipped"""
def __init__(self, quantity_name):
super(_CatalogDoesNotHaveQuantity, self).__init__()
self.message = "Catalog does not have {}".format(quantity_name)
class ColorRedshiftTest(BaseValidationTest):
"""
This test plots various color-redshfit diagnostics
"""
possible_observations = {
'des_fit': {'filename_template':'red_sequence/des/rykoff_et_al_1026',
'keys': (0),
'coefficients': (1, 2, 3, 4),
'skip': 7,
'label': 'DES fit',
'zmin': 0.2,
'zmax': 0.9,
'format': 'fit',
},
'des_y1':{'filename_template':'red_sequence/des/des_y1_redshift_ri_color.txt',
'skip': 1,
'usecols': (0,1),
'colnames': ('z', 'r-i'),
'label':'DES Y1',
'format': 'data',
'bins': [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.9],
},
}
def __init__(self, **kwargs):
super(ColorRedshiftTest, self).__init__()
# load test config options
self.kwargs = kwargs
self.truncate_cat_name = kwargs.get('truncate_cat_name', False)
self.title_in_legend = kwargs.get('title_in_legend', True)
self.font_size = kwargs.get('font_size', 16)
self.text_size = kwargs.get('text_size', 12)
self.legend_size = kwargs.get('legend_size', 13)
self.observations = kwargs.get('observations', None)
#with open(os.path.join(self.data_dir, 'README.md')) as f:
# self.validation_data = f.readline().strip()
self.plot_list = kwargs.get("plot_list", [])
for plot_param in self.plot_list:
color = plot_param['color']
assert (len(color) == 3) and (color[1] == '-'), "Color must be defined as 'a-b', where a and b are band names"
allowed_colors = 'ugrizy'
plot_param['mag1'] = color[0].lower()
plot_param['mag2'] = color[2].lower()
assert (plot_param["mag1"] in allowed_colors) and (plot_param["mag2"] in allowed_colors), "only ugrizy colors are allowed"
assert (plot_param["frame"] in ['rest', 'obs', 'observed', 'observer']), "Only 'rest', 'obs', and 'observed' frames allowed"
plot_param["filter"] = plot_param.get('filter', '').lower()
assert (plot_param["filter"] in ['lsst', 'sdss', 'des']), "Only lsst, sdss, or DES filters allowed"
plot_param["baseDC2"] = plot_param.get('baseDC2', False)
plot_param["central"] = plot_param.get("central", None)
plot_param["Mr_cut"] = plot_param.get("Mr_cut", None)
plot_param["mr_cut"] = plot_param.get("mr_cut", None)
plot_param["stellar_mass_cut"] = plot_param.get("stellar_mass_cut", None)
plot_param["halo_mass_cut"] = plot_param.get("halo_mass_cut", None)
plot_param["red_sequence_cut"] = plot_param.get("red_sequence_cut", None)
plot_param["synthetic_type"] = plot_param.get("synthetic_type", None)
plot_param["log_scale"] = plot_param.get("log_scale", True)
plot_param["redshift_limit"] = plot_param.get("redshift_limit", None)
plot_param["redshift_block_limit"] = plot_param.get("redshift_block_limit", 1)
assert plot_param['redshift_block_limit'] in [1, 2, 3], "redshift_block_limit must be set to 1,2 or 3. It is set to: {}".format(plot_param['redshift_block_limit'])
#read in validation data
self.validation_data = self.get_validation_data(self.observations)
def get_validation_data(self, observations):
validation_data = {}
if observations:
for obs in observations:
data_args = self.possible_observations[obs]
fn = os.path.join(self.data_dir, data_args['filename_template'])
if 'keys' in data_args.keys():
keys = np.genfromtxt(fn, skip_header=data_args['skip'], usecols=data_args['keys'], dtype=str)
coefficients = np.genfromtxt(fn, skip_header=data_args['skip'], usecols=data_args['coefficients'])
validation_data[obs] = dict(zip(keys, coefficients))
else:
validation_data[obs] = dict(zip(data_args['colnames'],
np.loadtxt(fn, skiprows=data_args['skip'],
unpack=True, usecols=data_args['usecols'])))
validation_data[obs]['label'] = data_args['label']
validation_data[obs]['format'] = data_args['format']
if 'zmin' in data_args.keys():
validation_data[obs]['zmin'] = data_args['zmin']
if 'zmax' in data_args.keys():
validation_data[obs]['zmax'] = data_args['zmax']
if 'bins' in data_args.keys():
validation_data[obs]['bins'] = data_args['bins']
return validation_data
def post_process_plot(self, ax):
pass
# ax.text(0.05, 0.95, self.validation_data)
# ax.legend(loc='best')
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
plot_num = 0
if self.truncate_cat_name:
catalog_name = re.split('_', catalog_name)[0]
for plot_param in self.plot_list:
plot_num += 1
if plot_param["frame"] == "rest":
mag_frame = "Mag_true"
mag_end = "_z0"
else:
mag_frame = "mag"
mag_end = ""
mag1_str = "{}_{}_{}{}".format(mag_frame, plot_param["mag1"],
plot_param["filter"], mag_end)
mag2_str = "{}_{}_{}{}".format(mag_frame, plot_param["mag2"],
plot_param["filter"], mag_end)
mag1_val = self._get_quantity(catalog_instance, mag1_str,
redshift_block_limit=plot_param['redshift_block_limit'],
redshift_limit=plot_param['redshift_limit'],)
mag2_val = self._get_quantity(catalog_instance, mag2_str,
redshift_block_limit=plot_param['redshift_block_limit'],
redshift_limit=plot_param['redshift_limit'],)
redshift = self._get_quantity(catalog_instance, 'redshift',
redshift_block_limit=plot_param['redshift_block_limit'],
redshift_limit=plot_param['redshift_limit'],)
clr_val = mag1_val - mag2_val
title = ""
slct, title = self._get_selection_and_title(catalog_instance, title, plot_param,
redshift_limit=plot_param['redshift_limit'],
redshift_block_limit=plot_param['redshift_block_limit'])
fig, ax = plt.subplots()
# for ax_this in (ax, self.summary_ax):
if plot_param['redshift_limit'] is not None:
redshift_bins = np.linspace(0, 1.05*plot_param['redshift_limit'], 256)
elif plot_param['redshift_block_limit'] is not None:
redshift_bins = np.linspace(0, 1.05*(plot_param['redshift_block_limit']), 256)
else:
redshift_bins = np.linspace(0, 1.05, 256)
h, xbins, ybins = np.histogram2d(redshift[slct], clr_val[slct],
bins=(redshift_bins, np.linspace(-0.4, 2.2, 256)))
if plot_param["log_scale"]:
pc = ax.pcolor(xbins, ybins, h.T+1.0, norm=clr.LogNorm())
fig.colorbar(pc, ax=ax).set_label("Population Density + 1")
else:
pc = ax.pcolor(xbins, ybins, h.T)
fig.colorbar(pc, ax=ax).set_label("Population Density")
mag1 = re.split('_', mag1_str)[1] #get filter
mag2 = re.split('_', mag2_str)[1] #get filter
# plot observations
for v in self.validation_data.values():
color=mag1 + '-' + mag2
if v['format'] == 'fit':
coeffs = v[color]
zmask = (redshift_bins >= v['zmin']) & (redshift_bins <= v['zmax'])
obs = np.zeros(len(redshift_bins[zmask]))
for n, coeff in enumerate(coeffs):
obs += coeff*redshift_bins[zmask]**(len(coeffs)-1-n)
ax.plot(redshift_bins[zmask], obs, color='r', label=v['label'])
elif v['format'] == 'data':
if color in v.keys():
zbins = np.asarray(v['bins'])
mean, _, num = bs(v['z'], v[color], bins=zbins)
std, _, num = bs(v['z'], v[color], bins=zbins, statistic='std')
fmask = np.isfinite(mean)
z_cen = 0.5*(zbins[1:]+zbins[:-1])
ax.errorbar(z_cen[fmask], mean[fmask], ls='', marker='o',
yerr=np.sqrt(std[fmask]), c='orange', label=v['label'])
counts = [np.sum(num==i+1) for i in range(len(zbins))]
print(z_cen[fmask], mean[fmask], std[fmask], counts)
legend = ax.legend(loc='lower right', fontsize=self.legend_size)
plt.setp(legend.get_texts(), color='w')
ax.set_ylabel('{} - {}'.format(mag1, mag2), size=self.font_size)
ax.set_xlabel('Redshift $z$', size=self.font_size)
if self.title_in_legend:
title = '{}\n{}'.format(catalog_name, title)
else:
ax.set_title(catalog_name)
ax.text(0.05, 0.95, title, transform=ax.transAxes,
verticalalignment='top', color='white',
fontsize=self.text_size)
fig.savefig(os.path.join(output_dir, 'plot_{}.png'.format(plot_num)))
plt.close(fig)
return TestResult(0, inspect_only=True)
def _get_quantity(self, catalog_instance, quantity_name,
redshift_block_limit=1,
redshift_limit=None):
if not catalog_instance.has_quantities([quantity_name]):
raise _CatalogDoesNotHaveQuantity(quantity_name)
first_name = catalog_instance.first_available(quantity_name)
if redshift_limit is not None:
filters = ["redshift < {}".format(redshift_limit)]
if redshift_limit <= 1:
redshift_block_limit = 1
elif redshift_limit <= 2:
redshift_block_limit = 2
else:
redshift_block_limit = 3
else:
filters = None
native_filters = ['redshift_block_lower <= {}'.format(redshift_block_limit-1)]
return catalog_instance.get_quantities([first_name],
filters=filters,
native_filters=native_filters,)[first_name]
def _get_selection_and_title(self, catalog_instance, title, plot_param,
redshift_block_limit=1,
redshift_limit=None):
# a cheap way to get an array of trues of the correct size
redshift = self._get_quantity(catalog_instance, 'redshift',
redshift_limit=redshift_limit,
redshift_block_limit=redshift_block_limit)
slct = redshift == redshift
title_elem_per_line = 3 # The number of elements in the title. We want about
title_elem = 0 # three elements per line. The catalog name is pretty big, so it counts
# as two elements.
if plot_param["central"] is not None:
is_central = self._get_quantity(catalog_instance, 'is_central',
redshift_limit=redshift_limit,
redshift_block_limit=redshift_block_limit)
slct = slct & (is_central == plot_param["central"])
title += "central = {}, ".format(plot_param["central"])
title_elem += 1
if title_elem % title_elem_per_line == 0:
title += "\n"
if plot_param["Mr_cut"] is not None:
Mag_r = self._get_quantity(catalog_instance, "Mag_true_r_lsst_z0",
redshift_limit=redshift_limit,
redshift_block_limit=redshift_block_limit)
slct = slct & (Mag_r < plot_param["Mr_cut"])
title += "Mr < {}, ".format(plot_param["Mr_cut"])
title_elem += 1
if title_elem % title_elem_per_line == 0:
title += "\n"
if plot_param["mr_cut"] is not None:
mag_r = self._get_quantity(catalog_instance, "mag_r",
redshift_limit=redshift_limit,
redshift_block_limit=redshift_block_limit)
slct = slct & (mag_r < plot_param["mr_cut"])
title += "mr < {}, ".format(plot_param["mr_cut"])
title_elem += 1
if title_elem % title_elem_per_line == 0:
title += "\n"
if plot_param["stellar_mass_cut"] is not None:
sm = self._get_quantity(catalog_instance, "stellar_mass",
redshift_limit=redshift_limit,
redshift_block_limit=redshift_block_limit)
slct = slct & (np.log10(sm) > plot_param["stellar_mass_cut"])
title += "$\\log_{{10}}(M_{{*}}/M_\\odot) > {}$, ".format(plot_param["stellar_mass_cut"])
title_elem += 1
if title_elem % title_elem_per_line == 0:
title += "\n"
if plot_param["halo_mass_cut"] is not None:
halo_mass = self._get_quantity(catalog_instance, "halo_mass",
redshift_limit=redshift_limit,
redshift_block_limit=redshift_block_limit)
slct = slct & (np.log10(halo_mass) > plot_param["halo_mass_cut"])
title += "$\\log_{{10}}(M_{{halo}}/M_\\odot) > {}$, ".format(plot_param["halo_mass_cut"])
title_elem += 1
if title_elem % title_elem_per_line == 0:
title += "\n"
if plot_param["synthetic_type"] is not None:
upid = self._get_quantity(catalog_instance, "baseDC2/upid",
redshift_limit=redshift_limit,
redshift_block_limit=redshift_block_limit)
slct = slct & (upid == plot_param["synthetic_type"])
title += "synth = {}, ".format(plot_param["synthetic_type"])
title_elem += 1
if title_elem % title_elem_per_line == 0:
title += "\n"
if plot_param["red_sequence_cut"] is not None:
rs = self._get_quantity(catalog_instance, "baseDC2/is_on_red_sequence_gr",
redshift_limit=redshift_limit,
redshift_block_limit=redshift_block_limit)
slct = slct & (rs == plot_param["red_sequence_cut"])
if plot_param["red_sequence_cut"]:
title += "red sequence galaxies, "
title_elem += 1
if title_elem % title_elem_per_line == 0:
title += "\n"
#remove trailing ", "
title = title[0:-2]
return slct, title
def conclude_test(self, output_dir):
# self.post_process_plot(self.summary_ax)
# self.summary_fig.savefig(os.path.join(output_dir, 'summary.png'))
# plt.close(self.summary_fig)
pass
| 16,610
| 50.747664
| 175
|
py
|
descqa
|
descqa-master/descqa/StellarMassDistribution.py
|
import os
import numpy as np
from GCR import GCRQuery
from .base import BaseValidationTest, TestResult
from .plotting import plt
__all__ = ["StellarMassTest"]
class StellarMassTest(BaseValidationTest):
"""
This validation test looks at stellar mass distribution
of DC2 catalogs to make sure it matches the distribution
of CMASS galaxies which have constraints on both
magnitude and color og galaxies and also checks the
number density of galaxies per square degree as the
score to pass the test.
"""
def __init__(self, **kwargs):
# load validation data
path = os.path.join(self.data_dir, "stellar_mass_dist", "CMASS_data.txt")
self.validation_data = np.loadtxt(path)
self.truncate_cat_name = kwargs.get('truncate_cat_name', False)
self.legend_location = kwargs.get('legend_location', 'upper right')
self.font_size = kwargs.get('font_size', 22)
self.text_size = kwargs.get('text_size', 20)
self.legend_size = kwargs.get('legend_size', 18)
self.Mlo = kwargs.get('Mlo', 10.)
self.Mhi = kwargs.get('Mhi', 12.5)
@staticmethod
def get_smass(catalog_instance):
"""
Parameters
----------
catalog_instance = Catalogue to use
Returns
-------
- log10 of stellar mass with CMASS color and magnitude cuts applied
- number density of galaxies (galaxies per square degree)
"""
gc = catalog_instance
sky_area = float(gc.sky_area)
cols = {
"smass": gc.first_available("stellar_mass"),
"g": gc.first_available("mag_true_g_lsst"),
"r": gc.first_available("mag_true_r_lsst"),
"i": gc.first_available("mag_true_i_lsst"),
}
if not all(cols.values()):
raise KeyError("Not all needed quantities exist!!")
valid_smass = GCRQuery("{smass} > 0".format(**cols))
cmass_cuts = GCRQuery(
"({r} - {i}) - ({g} - {r}) / 8 > 0.55".format(**cols),
"{i} < 19.86 + 1.6 * (({r} - {i}) - ({g} - {r}) / 8 - 0.8)".format(**cols),
"{i} < 19.9".format(**cols),
"{i} > 17.5".format(**cols),
"{r} - {i} < 2".format(**cols),
)
log_smass_cmass = np.log10(gc.get_quantities([cols["smass"]], filters=[valid_smass, cmass_cuts])[cols["smass"]])
print()
print("minimum cmass-cut = ", np.min(log_smass_cmass))
print("maximum cmass-cut = ", np.max(log_smass_cmass))
print()
numDen = len(log_smass_cmass) / sky_area
return log_smass_cmass, numDen
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
log_smass_cmass, numDen = self.get_smass(catalog_instance)
if self.truncate_cat_name:
catalog_name = catalog_name.partition("_")[0]
print(catalog_name)
x = self.validation_data[:, 0]
y = self.validation_data[:, 1]
plt.figure(1, figsize=(12, 6))
plt.hist(x, bins=x, weights=y, histtype="step", color="teal", density=True, linewidth=2, label="CMASS")
plt.hist(log_smass_cmass, bins=np.linspace(self.Mlo, self.Mhi, 50), color="orange", linewidth=2, density=True,
histtype="step", label=catalog_name)
text = '{}: {:.1f} gals/sq. deg.\nCMASS: 101 gals/sq. deg.'.format(catalog_name, numDen)
#plt.title(f"n[{catalog_name} = {numDen:.1f} , CMASS = 101] gals/sq deg")
ax = plt.gca()
plt.text(0.01, 0.86, text, fontsize=self.text_size, transform=ax.transAxes)
plt.xlabel(r"$\log_{10}(M^*/M_{\odot})$", size=self.font_size)
plt.ylabel("$N$", size=self.font_size)
plt.xlim(self.Mlo + 0.3, self.Mhi - 0.3)
plt.legend(loc=self.legend_location, fontsize=self.legend_size)
plt.show()
plt.savefig(os.path.join(output_dir, "Mstellar_distribution.png"))
plt.close()
# CMASS stellar mass mean
log_cmass_mean = 11.25
# score is defined as error away from CMASS stellar mass mean
score = (np.mean(log_smass_cmass) - log_cmass_mean) / log_cmass_mean
return TestResult(score=score, passed=(score < 1.0))
| 4,241
| 36.539823
| 120
|
py
|
descqa
|
descqa-master/descqa/CheckAstroPhoto.py
|
from __future__ import print_function, division
import os
import numpy as np
from scipy.stats import binned_statistic
from CatalogMatcher.match import spatial_closest # https://github.com/LSSTDESC/CatalogMatcher
from GCR import GCRQuery
from .base import BaseValidationTest, TestResult
from .plotting import plt
from matplotlib.ticker import NullFormatter
__all__ = ['CheckAstroPhoto']
nullfmt = NullFormatter()
class CheckAstroPhoto(BaseValidationTest):
"""
Validation test to compare astrometric and photometric results between
two different datasets.
"""
def __init__(self, **kwargs):
#pylint: disable=W0231
self.kwargs = kwargs
self.min_mag = kwargs['min_mag'] # Minimum magnitude to bin the sample
self.max_mag = kwargs['max_mag'] # Maximum magnitude to bin the sample
self.nbins = kwargs['nbins_mag'] # Number of bins
self.ra=dict() # Here we are going to store the RA for all catalogs
self.dec=dict() # Here we are going to store the DEC for all catalogs
self.magnitude=dict() # Here we are going to store the magnitude (in different bands for all catalogs)
self.selection_cuts = kwargs['selection_cuts'] # Selection cuts to perform on the data sample
self.bands = kwargs['bands'] # Photometric band(s) to analyze
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
mags = {catalog_instance.first_available('mag_{}_cModel'.format(b), 'mag_true_{}'.format(b)): 'mag_{}'.format(b) for b in self.bands}
qs = ['ra', 'dec']
qs = qs + list(mags)
# Trick to read both true and observed magnitudes by @Yao
filters = [GCRQuery(self.selection_cuts)]
data = catalog_instance.get_quantities(qs, filters=filters)
data = {mags.get(k, k): v for k, v in data.items()}
print('Selected %d objects for catalog %s' % (len(data), catalog_name))
self.ra[catalog_name] = data['ra']
self.dec[catalog_name] = data['dec']
for band in self.bands:
self.magnitude[(catalog_name, band)] = data['mag_%s' % band]
return TestResult(inspect_only=True)
def scatter_project(self, x, y, xmin, xmax, ymin, ymax, nbins, xlabel, ylabel, savename, bin_stat=False):
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
fig = plt.figure()
axScatter = plt.axes(rect_scatter)
axHistx = plt.axes(rect_histx)
axHisty = plt.axes(rect_histy)
axHistx.xaxis.set_major_formatter(nullfmt)
axHisty.yaxis.set_major_formatter(nullfmt)
#axScatter.scatter(x, y, s=1., alpha=0.5)
hexplot = axScatter.hexbin(x, y, gridsize=6*nbins, extent=(xmin, xmax, ymin, ymax))
plt.colorbar(hexplot, label='Objects/bin')
if bin_stat:
mean_y, be, _ = binned_statistic(x, y, range=(xmin, xmax), bins=nbins, statistic='median')
std_y, be, _ = binned_statistic(x, y, range=(xmin, xmax), bins=nbins, statistic='std')
n_y, be, _ = binned_statistic(x, y, range=(xmin, xmax), bins=nbins, statistic='count')
axScatter.errorbar(0.5*(be[1:]+be[:-1]), mean_y, std_y/np.sqrt(n_y), marker='o', linestyle='none', color='red')
axScatter.autoscale(tight=True)
axScatter.set_xlim((xmin, xmax))
axScatter.set_ylim((ymin, ymax))
axScatter.set_xlabel(xlabel)
axScatter.set_ylabel(ylabel)
axHistx.hist(x, bins=nbins, range=(xmin, xmax))
axHisty.hist(y, bins=nbins, range=(ymin, ymax), orientation='horizontal')
axHistx.autoscale(tight=True)
axHisty.autoscale(tight=True)
axHistx.set_xlim(axScatter.get_xlim())
axHisty.set_ylim(axScatter.get_ylim())
plt.savefig(savename)
plt.close(fig)
def conclude_test(self, output_dir):
"""
This function should gather the two catalogs, match them and perform the summary plots
"""
if len(self.ra) != 2: # Making sure that we have *just* two catalogs
raise ValueError('The test can compare two catalogs only!')
cat_names = list(self.ra) # This is an auxiliary list to easily get the catalogs
cat_names = sorted(cat_names, key=lambda name: len(self.ra[name]))
# For this test we are going to match using closest neighbor since it is the fastest but it can be easily
# swapped for any other matching strategy
matched_id = spatial_closest(self.ra[cat_names[0]], self.dec[cat_names[0]],
self.ra[cat_names[1]], self.dec[cat_names[1]],
np.arange(len(self.ra[cat_names[1]])))[1]
delta_ra = self.ra[cat_names[0]]-self.ra[cat_names[1]][matched_id]
delta_dec = self.dec[cat_names[0]]-self.dec[cat_names[1]][matched_id]
delta_mag = dict()
good_mag = dict()
for band in self.bands:
delta_mag[band] = self.magnitude[(cat_names[0], band)]-self.magnitude[(cat_names[1],band)][matched_id]
good_mag[band] = (np.isnan(self.magnitude[(cat_names[0], band)])==False) & (np.isinf(self.magnitude[(cat_names[0], band)])==False)
# Scatter plot + histogram of RA and Dec (assumed to be in degrees)
astro_savename = os.path.join(output_dir, 'astrometry_check_%s_%s.png' % (cat_names[0], cat_names[1]))
self.scatter_project(delta_ra*3600, delta_dec*3600, -0.5, 0.5, -0.5, 0.5, 100, r'$\Delta$ RA [arcsec]',
r'$\Delta$ Dec [arcsec]', astro_savename)
# Scatter plot + histogram of Delta mag vs mag
for band in self.bands:
photo_savename = os.path.join(output_dir, 'photometry_check_%s_%s_%s.png' % (cat_names[0], cat_names[1], band))
self.scatter_project(self.magnitude[(cat_names[0], band)][good_mag[band]],
delta_mag[band][good_mag[band]], self.min_mag, self.max_mag,
-1, 1, self.nbins, '%s' % band, r'$\Delta %s$' % band, photo_savename, bin_stat=True)
n_true, _ = np.histogram(self.magnitude[(cat_names[1], band)], bins=50, range=(10, 30))
n_meas, bin_edges = np.histogram(self.magnitude[(cat_names[0], band)], bins=50, range=(10, 30))
fig = plt.figure()
plt.plot(0.5*(bin_edges[1:]+bin_edges[:-1]), n_meas.astype(float)/n_true,'o')
plt.xlabel('{}'.format(band))
plt.ylabel('Ratio of detected over input objects')
plt.ylim(0,1)
plt.tight_layout()
photo_savename = os.path.join(output_dir, 'mag_ratio_%s_%s_%s.png' % (cat_names[0], cat_names[1], band))
plt.savefig(photo_savename)
plt.close(fig)
| 6,989
| 48.928571
| 142
|
py
|
descqa
|
descqa-master/descqa/apparent_mag_func_test.py
|
from __future__ import unicode_literals, absolute_import, division
import os
import re
import numpy as np
from scipy.interpolate import interp1d
from .utils import get_sky_area
from .base import BaseValidationTest, TestResult
from .plotting import plt
possible_observations = {
'HSC': {
'filename_template': 'apparent_mag_func/HSC/hsc_{}_n.dat',
'usecols': (0, 1, 2),
'colnames': ('mag', 'n(<mag)', 'err', 'data', 'data_err', 'power_law'),
'skiprows': 1,
'label': 'HSC extrapolated (desqagen 2018)',
}
}
__all__ = ['ApparentMagFuncTest']
class ApparentMagFuncTest(BaseValidationTest):
"""
cumulative apparent magnitude function test
"""
def __init__(self, band='r', band_lim=(24.0, 27.5), fractional_tol=0.4, observation='HSC', **kwargs):
"""
parameters
----------
band : string
photometric band
band_lim : float
apparent magnitude lower and upper limits
fractional_tol : float
fractional tolerance allowed between mock and apparent mag func for test to pass
observation : string
string indicating which obsrvational data to use for validating
"""
# pylint: disable=super-init-not-called
self.truncate_cat_name = kwargs.get('truncate_cat_name', False)
self.title_in_legend = kwargs.get('title_in_legend', False)
self.skip_label_detail = kwargs.get('skip_label_detail', False)
self.font_size = kwargs.get('font_size', 16)
self.legend_size = kwargs.get('legend_size', 10)
self.x_lower_limit = kwargs.get('x_lower_limit', 15)
self.print_title = kwargs.get('print_title', False)
self.min_mag = kwargs.get('min_mag', 19.)
self.replace_cat_name = kwargs.get('replace_cat_name', {})
# catalog quantities needed
possible_mag_fields = ('mag_{}_cModel',
'mag_{}_lsst',
'mag_true_{}_lsst',
'mag_{}_sdss',
'mag_true_{}_sdss',
'mag_{}_des',
'mag_true_{}_des',
'mag_{}_hsc',
'mag_true_{}_hsc',
)
self.possible_mag_fields = [f.format(band) for f in possible_mag_fields]
# attach some attributes to the test
self.band = band
self.band_lim = list(band_lim)
self.fractional_tol = fractional_tol
# set color of lines in plots
colors = plt.cm.jet(np.linspace(0, 1, 5)) # pylint: disable=no-member
if band == 'g': self.line_color = colors[0]
elif band == 'r': self.line_color = colors[1]
elif band == 'i': self.line_color = colors[2]
elif band == 'z': self.line_color = colors[3]
elif band == 'y': self.line_color = colors[4]
else: self.line_color = 'black'
# check for validation observation
if not observation:
print('Warning: no data file supplied, no observation requested; only catalog data will be shown.')
elif observation not in possible_observations:
raise ValueError('Observation: {} not available for this test.'.format(observation))
else:
self.validation_data = self.get_validation_data(band, observation)
# prepare summary plot
self.summary_fig = plt.figure()
upper_rect = 0.2, 0.4, 0.7, 0.55
lower_rect = 0.2, 0.125, 0.7, 0.275
self.summary_upper_ax, self.summary_lower_ax = self.summary_fig.add_axes(upper_rect), self.summary_fig.add_axes(lower_rect)
def get_validation_data(self, band, observation):
"""
load (observational) data to use for validation test
"""
data_args = possible_observations[observation]
data_path = os.path.join(self.data_dir, data_args['filename_template'].format(band))
if not os.path.exists(data_path):
raise ValueError("{}-band data file {} not found".format(band, data_path))
if not os.path.getsize(data_path):
raise ValueError("{}-band data file {} is empty".format(band, data_path))
data = np.loadtxt(data_path, unpack=True, usecols=data_args['usecols'], skiprows=data_args['skiprows'])
validation_data = dict(zip(data_args['colnames'], data))
validation_data['label'] = data_args['label'] if not self.skip_label_detail else data_args['label'].rpartition('(')[0]
return validation_data
def post_process_plot(self, upper_ax, lower_ax):
"""
"""
#upper panel
lgnd_title = ''
title = str(self.band_lim[0]) + ' < '+self.band + ' < ' + str(self.band_lim[1])
if self.title_in_legend:
lgnd_title = title
elif self.print_title:
upper_ax.set_title(title)
upper_ax.legend(loc='upper left', title=lgnd_title, fontsize=self.legend_size)
upper_ax.set_ylabel(r'$n(< {\rm mag}) ~[{\rm deg^{-2}}]$', size=self.font_size)
upper_ax.xaxis.set_visible(False)
upper_ax.set_ylim([1000, 10**7])
#upper_ax.set_ylim([1, 10**5]) #use to check bright end of ditribution
upper_ax.fill_between([self.band_lim[0], self.band_lim[1]], [0, 0], [10**9, 10**9], alpha=0.1, color='grey')
upper_ax.set_yscale('log')
upper_ax.set_xlim([self.x_lower_limit, 30])
#lower panel
lower_ax.fill_between([self.band_lim[0], self.band_lim[1]], [-1, -1], [1, 1], alpha=0.1, color='grey')
lower_ax.set_xlabel(self.band + ' magnitude', size=self.font_size)
lower_ax.set_ylabel(r'$\Delta n/n$', size=self.font_size)
lower_ax.set_ylim([-1, 1])
#lower_ax.set_ylim([0.0, 10]) #use to check bright end of ditribution
lower_ax.set_yticks([-0.6, 0.0, 0.6])
lower_ax.set_xlim([self.x_lower_limit, 30])
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
"""
"""
mag_field_key = catalog_instance.first_available(*self.possible_mag_fields)
if not mag_field_key:
return TestResult(skipped=True, summary='Catalog is missing requested quantity: {}'.format(self.possible_mag_fields))
# check to see if catalog is a light cone
# this is required since we must be able to calculate the angular area
# if attribute `lightcone` does not exist, allow the catalog to proceed
if not getattr(catalog_instance, 'lightcone', True):
return TestResult(skipped=True, summary="Catalog is not a light cone.")
# obtain or calculate sky area
sky_area = getattr(catalog_instance, 'sky_area', None)
if sky_area is None:
if not catalog_instance.has_quantities(['ra', 'dec']):
return TestResult(skipped=True, summary="'ra' and/or 'dec' not available to compute sky area")
sky_area = get_sky_area(catalog_instance) # compute area from ra and dec
sky_area_label = ' (Sky Area = {:.1f} $\\rm deg^2$)'.format(sky_area)
#####################################################
# caclulate the cumulative number density of galaxies
#####################################################
# filter on extended sources if quantity is available in catalog (eg. in object catalog)
filters = ['extendedness == 1'] if catalog_instance.has_quantity('extendedness') else None
# retreive data from mock catalog
d = catalog_instance.get_quantities([mag_field_key], filters=filters)
m = d[mag_field_key]
m = np.sort(m) # put into order--bright to faint
# get the total number of galaxies in catalog
N_tot = len(m)
N = np.cumsum(np.ones(N_tot))/sky_area
# define the apparent magnitude bins for plotting purposes
dmag = 0.1 # bin widths
max_mag = self.band_lim[1] + 1.0 # go one mag beyond the limit
min_mag = self.min_mag # start at bright galaxies
mag_bins = np.arange(min_mag, max_mag, dmag)
# calculate N(<mag) at the specified points
inds = np.searchsorted(m, mag_bins)
mask = (inds >= len(m))
inds[mask] = -1 # take care of edge case
sampled_N = N[inds]
#################################################
# plot the cumulative apparent magnitude function
#################################################
fig = plt.figure()
upper_rect = 0.2, 0.4, 0.7, 0.55
lower_rect = 0.2, 0.125, 0.7, 0.275
upper_ax, lower_ax = fig.add_axes(upper_rect), fig.add_axes(lower_rect)
# plot on both this plot and any summary plots
if self.truncate_cat_name:
catalog_name = re.split('_', catalog_name)[0]
if self.replace_cat_name:
for k, v in self.replace_cat_name.items():
catalog_name = re.sub(k, v, catalog_name)
upper_ax.plot(mag_bins, sampled_N, '-', label=catalog_name + sky_area_label)
self.summary_upper_ax.plot(mag_bins, sampled_N, '-', label=catalog_name + sky_area_label)
# plot validation data
n = self.validation_data['n(<mag)']
m = self.validation_data['mag']
upper_ax.plot(m, n, '-', label=self.validation_data['label'], color='black')
upper_ax.fill_between(m, n-self.fractional_tol*n, n+self.fractional_tol*n, color='black', alpha=0.25)
#################################
# determine if the catalog passes
#################################
# interpolate the validation data in order to compare to the mock catalog at same points
non_zero_mask = (self.validation_data['n(<mag)'] > 0.0)
x = self.validation_data['mag'][non_zero_mask]
y = np.log10(self.validation_data['n(<mag)'])[non_zero_mask]
f_xy = interp1d(x, y, fill_value='extrapolate')
nn = 10**f_xy(mag_bins)
# calculate the fractional diffrence between the mock catalog and validation data
delta = (sampled_N-nn)/nn
# find maximum fractional difference in test range
test_range_mask = (mag_bins >= self.band_lim[0]) & (mag_bins <= self.band_lim[1])
max_frac_diff = np.max(np.fabs(delta[test_range_mask]))
# plot on both this plot and any summary plots
lower_ax.fill_between(m, 0.0*m-self.fractional_tol, 0.0*m+self.fractional_tol, color='black', alpha=0.25)
lower_ax.plot(m, m*0.0, '-', color='black')
lower_ax.plot(mag_bins, delta, '-')
self.summary_lower_ax.plot(mag_bins, delta, '-', label=catalog_name)
# apply 'passing' criterion
if max_frac_diff > self.fractional_tol:
score = max_frac_diff
passed = False
else:
score = max_frac_diff
passed = True
self.post_process_plot(upper_ax, lower_ax)
fig.savefig(os.path.join(output_dir, 'cumulative_app_mag_plot.png'))
plt.close(fig)
return TestResult(score, passed=passed)
def conclude_test(self, output_dir):
"""
"""
# plot verifaction data on summary plot
n = self.validation_data['n(<mag)']
m = self.validation_data['mag']
self.summary_upper_ax.plot(m, n, '-', label=self.validation_data['label'], color='black')
self.summary_upper_ax.fill_between(m, n-self.fractional_tol*n, n+self.fractional_tol*n, color='black', alpha=0.25)
self.summary_lower_ax.fill_between(m, 0.0*m-self.fractional_tol, 0.0*m+self.fractional_tol, color='black', alpha=0.25)
self.summary_lower_ax.plot(m, m*0.0, '-', color='black')
self.post_process_plot(self.summary_upper_ax, self.summary_lower_ax)
self.summary_fig.savefig(os.path.join(output_dir, 'summary.png'))
plt.close(self.summary_fig)
| 11,963
| 41.728571
| 131
|
py
|
descqa
|
descqa-master/descqa/example_test.py
|
from __future__ import unicode_literals, absolute_import, division
import os
import numpy as np
from .base import BaseValidationTest, TestResult
from .plotting import plt
__all__ = ['ExampleTest']
class ExampleTest(BaseValidationTest):
"""
An example validation test
"""
def __init__(self, **kwargs):
# load test config options
self.kwargs = kwargs
self.option1 = kwargs.get('option1', 'option1_default')
self.option2 = kwargs.get('option2', 'option2_default')
self.test_name = kwargs.get('test_name', 'example_test')
# load validation data
with open(os.path.join(self.data_dir, 'README.md')) as f:
self.validation_data = f.readline().strip()
# prepare summary plot
self.summary_fig, self.summary_ax = plt.subplots()
def post_process_plot(self, ax):
ax.text(0.05, 0.95, self.validation_data)
ax.legend()
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
# check if needed quantities exist
if not catalog_instance.has_quantities(['ra', 'dec']):
return TestResult(skipped=True, summary='do not have needed quantities')
data = np.random.rand(10) #do your calculation with catalog_instance
fig, ax = plt.subplots()
for ax_this in (ax, self.summary_ax):
ax_this.plot(data, label=catalog_name)
self.post_process_plot(ax)
fig.savefig(os.path.join(output_dir, 'plot.png'))
plt.close(fig)
score = data[0] #calculate your summary statistics
return TestResult(score, passed=True)
def conclude_test(self, output_dir):
self.post_process_plot(self.summary_ax)
self.summary_fig.savefig(os.path.join(output_dir, 'summary.png'))
plt.close(self.summary_fig)
| 1,841
| 29.7
| 84
|
py
|
descqa
|
descqa-master/descqa/PositionAngle.py
|
import os
from itertools import count
import numpy as np
import scipy.stats
from .base import BaseValidationTest, TestResult
from .plotting import plt
__all__ = ['PositionAngle']
class PositionAngle(BaseValidationTest):
"""
validation test to check that the distribution of galaxy position sizes is random.
"""
def __init__(self, **kwargs):
#pylint: disable=W0231
#validation data: a uniform distribution on the half-circle
self.uniform_degrees = scipy.stats.uniform(0, 180.).cdf
self.uniform_radians = scipy.stats.uniform(0, np.pi).cdf
self.acceptable_keys = kwargs['possible_position_angle_fields']
self.cutoff = kwargs['p_cutoff']
self.max_size = kwargs.get('max_size', 5e6)
self._color_iterator = ('C{}'.format(i) for i in count())
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
# update color and marker to preserve catalog colors and markers across tests
catalog_color = next(self._color_iterator)
# check catalog data for required quantities
key = catalog_instance.first_available(*self.acceptable_keys)
if not key:
summary = 'Missing required quantity ' + ' or '.join(['{}']*len(self.acceptable_keys))
return TestResult(skipped=True, summary=summary.format(*self.acceptable_keys))
# remove ultra-faint synthetics if present in catalog
if catalog_instance.has_quantity('baseDC2/halo_id'):
filters = [(lambda z: (z > -20), 'baseDC2/halo_id')]
elif catalog_instance.has_quantity('base5000/halo_id'):
filters = [(lambda z: (z > -20), 'base5000/halo_id')]
else:
filters = None
# get data
catalog_data = catalog_instance.get_quantities(key, filters=filters)
pos_angles = catalog_data[key]
is_degrees = np.max(pos_angles) > 2*np.pi
good_data_mask = np.logical_not(np.logical_or(np.isinf(pos_angles), np.isnan(pos_angles)))
# downsample data to max_size to get reliable p values
dlen = len(pos_angles)
sample=np.random.sample(dlen)
fraction = min(self.max_size/float(dlen), 1.0)
index=(sample<fraction)
test_angles = pos_angles[index]
print('Downsampling catalog data to {} for p-value statistic'.format(self.max_size))
if is_degrees:
ks_results = scipy.stats.kstest(test_angles, self.uniform_degrees)
else:
ks_results = scipy.stats.kstest(test_angles, self.uniform_radians)
fig = plt.figure()
ax = fig.add_subplot(111)
N, _, _ = ax.hist(pos_angles[good_data_mask], bins=20, color=catalog_color, edgecolor='black')
if is_degrees:
ax.set_xlabel("Angle [deg]")
else:
ax.set_xlabel("Angle [rad]")
ax.set_ylabel("N")
ax.set_ylim(0, np.max(N)*1.15)
ax.text(0.95, 0.96, 'Uniform distribution: $p={:.3f}$'.format(ks_results[1]),
horizontalalignment='right', verticalalignment='top',
transform=plt.gca().transAxes)
fig.savefig(os.path.join(output_dir, 'position_angle_{}.png'.format(catalog_name)))
plt.close(fig)
return TestResult(score=ks_results[1], passed=(ks_results[1]>self.cutoff))
| 3,335
| 40.7
| 102
|
py
|
descqa
|
descqa-master/descqa/virialscale_test.py
|
from __future__ import unicode_literals, absolute_import, division
from .plotting import plt
from .base import BaseValidationTest, TestResult
import numpy as np
import matplotlib
from matplotlib.colors import LogNorm
from astropy.cosmology import WMAP7 as cosmo # pylint: disable=E0611
import astropy.stats as stat
import os
import pdb
__all__ = ['VirialScaling']
class VirialScaling(BaseValidationTest):
"""
Tests the relationship between FOF halo mass and velocity dispersion of cluster member galaxies
"""
def __init__(self, masscut, c_axis, stellarcut, disp_func,**kwargs):
'''initialize the test with the folowing quantities:
masscut: include halos with masses at least as large as specified value
c_axis: pass 'number' for color axis to display cluster galaxy counts, and 'redshift' for cluster redshifts
disp_func: pass 'biweight' to calculate velocity dispersion using biweight scale, defaults to standard deviation'''
self.stellarcut = stellarcut
self.masscut = masscut
self.c_axis = c_axis
self.disp_func = disp_func
self.summary_fig, self.summary_ax = plt.subplots()
self.truncate_cat_name = kwargs.get('truncate_cat_name', False)
self.font_size = kwargs.get('font_size', 16)
self.legend_size = kwargs.get('legend_size', 10)
self.convert_fof = kwargs.get('convert_fof', 1.0)
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
'''collect quantities and plot the relationship between velocity dispersion and halo mass'''
if not catalog_instance.has_quantities(['halo_mass','halo_id', 'velocity_x', 'velocity_y', 'velocity_z', 'redshift']):
return TestResult(skipped = True, summary = 'do not have needed quantities')
#list containing each galaxy's larger cluster mass
complete_mass = catalog_instance.get_quantities('halo_mass')['halo_mass']
#sort the complete_mass list
#make a list of indices corresponding to halos that make the mass cut
mass_indices = np.argsort(complete_mass)
complete_mass = complete_mass[mass_indices]
complete_mass = np.array(complete_mass)
start_index = np.searchsorted(complete_mass, self.masscut)
cut_indices = list(range(start_index, np.size(complete_mass)))
#make a list of each galaxy's halo ID, sort it in same fashion as mass, and cut according to mass cut
complete_id_list = catalog_instance.get_quantities('halo_id')['halo_id']
complete_id_list = complete_id_list[mass_indices]
cut_id_list = complete_id_list[cut_indices]
#sort the cut_id_list in increasing order and get the unique values to prepare for looping
indexing_indices = np.argsort(cut_id_list)
cut_id_list = cut_id_list[indexing_indices]
unique_id_list = np.unique(cut_id_list)
#make a list of masses that make the mass cut and are sorted in the same fashion as the id list
cut_masses = complete_mass[cut_indices][indexing_indices]
#make a list of galaxy masses, used to check galaxy mass and disregard any galaxies with low stellar masses
stellar_masses = catalog_instance.get_quantities('stellar_mass')
stellar_masses = stellar_masses['stellar_mass'][mass_indices][cut_indices][indexing_indices]
#list to contain velocity magnitudes of galaxies within a cluster
vel_mag_list = np.array([])
#list to contain velocity dispersions of cluster galaxies
vel_dispersion = np.array([])
#list to contain the masses of clusters to be plotted
mass = np.array([])
#fetch and sort each velocity component for each galaxy according to the id list
vx = catalog_instance.get_quantities('velocity_x')
vx_list = vx['velocity_x'][mass_indices][cut_indices][indexing_indices]
vy = catalog_instance.get_quantities('velocity_y')
vy_list = vy['velocity_y'][mass_indices][cut_indices][indexing_indices]
vz = catalog_instance.get_quantities('velocity_z')
vz_list = vz['velocity_z'][mass_indices][cut_indices][indexing_indices]
#Get a list of redshifts for each galaxy in the catalog, sorted according to the ID's
redshifts = catalog_instance.get_quantities('redshift')['redshift']
redshifts = redshifts[mass_indices][cut_indices][indexing_indices]
largest = 0
smallest = np.max(cut_masses)
#list to contain redshifts of all galaxies within a particular cluster
redshift_list = np.array([])
#list to contain representative redshift for each cluster (determined from galaxy redshifts)
median_r = np.array([])
#galaxy counter and list to store number of galaxies in each cluster
galaxy_num = 0
galaxy_num_list = np.array([])
#function used to calculate velocity dispersion
def dispersion(val_array):
if(self.disp_func == "biweight"):
dis = stat.biweight_scale(val_array)
else:
dis = np.std(val_array)
return dis
#for each cluster above the mass cut
for unique_id in unique_id_list:
#find location of this cluster's galaxies in the list
index = np.searchsorted(cut_id_list, unique_id)
#add the cluster's mass to a list
mass = np.append(mass, cut_masses[index])
#for every galaxy that is part of the same cluster, make list of velocity magnitudes,
#galaxy counts, and redshifts
while unique_id == cut_id_list[index]:
if(stellar_masses[index] > self.stellarcut):
vel_mag = np.sqrt(np.power(vx_list[index],2)+np.power(vy_list[index],2)+np.power(vz_list[index],2))
vel_mag_list = np.append(vel_mag_list, vel_mag)
redshift_list = np.append(redshift_list, redshifts[index])
galaxy_num += 1
index+=1
if (index == np.size(cut_id_list)):
break
#append each calculated value for the cluster to the proper list
galaxy_num_list = np.append(galaxy_num_list, galaxy_num)
mask_num = galaxy_num_list>5
galaxy_num = 0
vel_dispersion = np.append(vel_dispersion, dispersion(vel_mag_list))
#use a representative, robust estimate of redshift for the whole cluster
median_r = np.append(median_r, np.median(redshift_list))
#la = cut_masses[index-1]*(cosmo.H(np.median(redshift_list)).value/100)
#sm = cut_masses[index-1]*(cosmo.H(np.median(redshift_list)).value/100)
if(cut_masses[index-1]*((cosmo.H(np.median(redshift_list))).value/100)>largest):
largest = cut_masses[index-1]*(cosmo.H(np.median(redshift_list)).value/100)
if(cut_masses[index-1]*((cosmo.H(np.median(redshift_list))).value/100)<smallest):
smallest = cut_masses[index-1]*(cosmo.H(np.median(redshift_list)).value/100)
#reset lists to empty for the next iteration/cluster
vel_mag_list = np.array([])
redshift_list = np.array([])
#fig, ax = plt.subplots(nrows=1,ncols=1)
#make different plots depending on what you want the color axis to show
x_axis = np.multiply(mass[mask_num], (cosmo.H(median_r[mask_num]).value/100))
if self.truncate_cat_name:
catalog_name = catalog_name.partition('_')[0]
else:
catalog_name + ' cluster'
if (self.c_axis == 'number'):
img = self.summary_ax.scatter(x_axis, vel_dispersion[mask_num], c = galaxy_num_list[mask_num], norm = LogNorm(), label = catalog_name)
elif(self.c_axis == 'redshift'):
img =self.summary_ax.scatter(x_axis, vel_dispersion[mask_num], c = median_r[mask_num], norm = LogNorm(), label = catalog_name)
#save halo masses, normalized hubble parameters, and velocity dispersion of galaxies for each cluster
self.mass_col = mass[mask_num]
self.norm_h_col = cosmo.H(median_r[mask_num]).value/100
self.vel_disp_col = vel_dispersion[mask_num]
self.median_r_col = median_r[mask_num]
self.galaxy_num_col = np.around(galaxy_num_list[mask_num], decimals = 0)
if (self.c_axis == 'number'):
np.savetxt(os.path.join(output_dir, 'summary.txt'), np.c_[self.mass_col, self.norm_h_col, self.vel_disp_col, self.galaxy_num_col], fmt = '%12.4e', header = 'HALO_MASS // CLUSTER_NORMALIZED_H // CLUSTER_VELOCITY_DISPERSION // CLUSTER_GALAXY_COUNT')
elif (self.c_axis == 'redshift'):
np.savetxt(os.path.join(output_dir, 'summary.txt'), np.c_[self.mass_col, self.norm_h_col, self.vel_disp_col, self.median_r_col], fmt = '%12.4e', header = 'HALO_MASS // CLUSTER_NORMALIZED_H // CLUSTER_VELOCITY_DISPERSION // CLUSTER_GALAZY_COUNT')
#make plot
x = np.linspace(smallest*.75, largest*1.5)
self.summary_ax.plot(x, eval("1082*(x/10**15)**.3361*{:.2f}".format(self.convert_fof)), c = "red",
label = "{:.2f}*(Evrard et. al. 2008)".format(self.convert_fof))
self.summary_ax.legend(loc='best', fontsize=self.legend_size)
print(self.legend_size)
bar = self.summary_fig.colorbar(img, ax = self.summary_ax)
self.summary_ax.set_xscale('log')
self.summary_ax.set_ylim(np.min(vel_dispersion[mask_num])*.3, np.max(vel_dispersion[mask_num])*5)
self.summary_ax.set_xlim(smallest*.75, largest*1.5)
self.summary_ax.set_yscale('log')
self.summary_ax.set_xlabel(r'$h(z)M_{\rm FoF}\quad [M_{\odot}]$', size=self.font_size)
self.summary_ax.set_ylabel(r'$\sigma_v\quad {\rm [km/s]}$', size=self.font_size)
#label color axis depending on what you want to show
if(self.c_axis == 'number'):
bar.ax.set_ylabel('galaxies per cluster', size=self.font_size)
else:
bar.ax.set_ylabel('median redshift', size=self.font_size)
plt.tight_layout()
plt.savefig(os.path.join(output_dir, 'mass_virial_scaling.png'))
plt.close()
return TestResult(inspect_only = True)
def conclude_test(self, output_dir):
'''conclude the test'''
self.summary_fig.savefig(os.path.join(output_dir, 'mass_virial_scaling.png'))
if (self.c_axis == 'number'):
np.savetxt(os.path.join(output_dir, 'summary.txt'), np.c_[self.mass_col, self.norm_h_col, self.vel_disp_col, self.galaxy_num_col], fmt = '%12.4e', header = 'HALO_MASS // CLUSTER_NORMALIZED_H // CLUSTER_VELOCITY_DISPERSION // CLUSTER_GALAXY_COUNT')
elif (self.c_axis == 'redshift'):
np.savetxt(os.path.join(output_dir, 'summary.txt'), np.c_[self.mass_col, self.norm_h_col, self.vel_disp_col, self.median_r_col], fmt = '%12.4e', header = 'HALO_MASS // CLUSTER_NORMALIZED_H // CLUSTER_VELOCITY_DISPERSION // CLUSTER_GALAZY_COUNT')
plt.close(self.summary_fig)
| 11,227
| 51.223256
| 259
|
py
|
descqa
|
descqa-master/v1/descqa/base.py
|
import os
__all__ = ['BaseValidationTest']
class BaseValidationTest(object):
"""
very abstract class for validation test class
"""
def run_validation_test(self, catalog_instance, catalog_name, output_dir):
raise NotImplementedError
def plot_summary(self, output_file, catalog_list):
pass
def run_on_single_catalog(self, catalog_instance, catalog_name, output_dir):
"""
Run the validation test on a single catalog.
Return an instance of TestResult.
This method will be called once for each catalog.
Parameters
----------
catalog_instance : instance of BaseGenericCatalog
instance of the galaxy catalog
catalog_name : str
name of the galaxy catalog
output_dir : str
output directory (all output must be under this directory)
Returns
-------
test_result : instance of TestResult
use the TestResult object to return test result
"""
if not hasattr(self, '_catalog_list'):
self._catalog_list = list()
self._catalog_list.append((catalog_name, output_dir))
return self.run_validation_test(catalog_instance, catalog_name, output_dir)
def conclude_test(self, output_dir):
"""
Conclude the test.
One can make summary plots for all catalogs here.
Return None.
This method will be called once when all catalogs are done.
Parameters
----------
output_dir: str
output directory (all output must be under this directory)
"""
catalog_list = getattr(self, '_catalog_list', list())
catalog_list.sort(key=lambda t: t[0])
self.plot_summary(os.path.join(output_dir, 'summary_plot.png'), catalog_list)
| 1,830
| 30.568966
| 85
|
py
|
descqa
|
descqa-master/v1/descqa/ValidationTest.py
|
from __future__ import division, print_function
import os
from warnings import warn
import itertools
zip = itertools.izip
import numpy as np
import matplotlib
mpl = matplotlib
mpl.use('Agg') # Must be before importing matplotlib.pyplot
mpl.rcParams['font.size'] = 13.0
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['mathtext.fontset'] = 'stix'
mpl.rcParams['legend.frameon'] = False
mpl.rcParams['legend.fontsize'] = 'x-small'
mpl.rcParams['figure.dpi'] = 200.0
mpl.rcParams['lines.markersize'] = 3.0
mpl.rcParams['xtick.minor.visible'] = True
mpl.rcParams['ytick.minor.visible'] = True
mpl.rcParams['xtick.major.size'] = 5.0
mpl.rcParams['xtick.minor.size'] = 3.0
mpl.rcParams['ytick.major.size'] = 5.0
mpl.rcParams['ytick.minor.size'] = 3.0
import matplotlib.pyplot
plt = matplotlib.pyplot
from .base import BaseValidationTest
from . import CalcStats
__all__ = ['ValidationTest', 'TestResult', 'mpl', 'plt', 'SimpleComparisonPlot', 'CalcStats']
class TestResult(object):
"""
class for passing back test result
"""
def __init__(self, score=None, summary='', passed=False, skipped=False, **kwargs):
"""
Parameters
----------
score : float or None
a float number to represent the test score
summary : str
short summary string
passed : bool
if the test is passed
skipped : bool
if the test is skipped, overwrites all other arguments
**kwargs :
any other keyword arguments
"""
self.skipped = bool(skipped)
self.passed = bool(passed)
self.summary = str(summary).strip()
for k, v in kwargs:
setattr(self, k, v)
# set score
if not self.skipped:
try:
self.score = float(score)
except (TypeError, ValueError):
if isinstance(score, basestring) and score.upper() in ('PASSED', 'FAILED', 'SKIPPED'):
# this is for backward compatibility in other validations
status = score.upper()
self.passed = (status == 'PASSED')
self.skipped = (status == 'SKIPPED')
else:
raise ValueError('Must set a float value for `score`')
class ValidationTest(BaseValidationTest):
"""
abstract class for validation test class
"""
_available_observations = {}
_required_quantities = dict()
_default_kwargs = dict()
_plot_config = dict()
_default_kwargs = {
'zlo': 0.0,
'zhi': 1000.0,
'jackknife_nside': 5,
}
_output_filenames = dict(
catalog_data='catalog_data.txt',
validation_data='validation_data.txt',
catalog_covariance='catalog_covariance.txt',
logfile='logfile.txt',
figure='figure.png',
)
def _import_kwargs(self, kwargs, key, attr_name=None, func=None, required=False, always_set=False):
if attr_name is None:
attr_name = '_{}'.format(key)
val = kwargs.get(key, self._default_kwargs.get(key))
if required and val is None:
raise ValueError('Must specify test option `{}`'.format(key))
if callable(func):
val = func(val)
if always_set or val is not None:
setattr(self, attr_name, val)
return val
def __init__(self, **kwargs):
self._import_kwargs(kwargs, 'base_data_dir', required=True)
self._import_kwargs(kwargs, 'test_name')
self._import_kwargs(kwargs, 'observation', always_set=True)
if self._available_observations and self._observation not in self._available_observations:
raise ValueError('`observation` not available')
self._validation_name = self._observation
self._import_kwargs(kwargs, 'bins', func=lambda b: np.logspace(*b), required=True)
self._import_kwargs(kwargs, 'validation_range', always_set=True)
self._import_kwargs(kwargs, 'jackknife_nside', func=int, required=True)
self._import_kwargs(kwargs, 'zlo', func=float, required=True)
self._import_kwargs(kwargs, 'zhi', func=float, required=True)
self._zfilter = {'zlo': self._zlo, 'zhi': self._zhi}
self._subclass_init(**kwargs)
def _subclass_init(self, **kwargs):
pass
def _prepare_validation_test(self, galaxy_catalog, catalog_name, base_output_dir):
pass
def run_validation_test(self, galaxy_catalog, catalog_name, base_output_dir):
"""
run the validation test
Parameters
----------
galaxy_catalog : galaxy catalog reader object
instance of a galaxy catalog reader
catalog_name : string
name of galaxy catalog
base_output_dir : string
Returns
-------
test_result : TestResult object
use the TestResult object to return test result
"""
output_filenames = {k: os.path.join(base_output_dir, v) for k, v in self._output_filenames.iteritems()}
#make sure galaxy catalog has appropriate quantities
if not all(k in galaxy_catalog.quantities for k in self._required_quantities):
#raise an informative warning
msg = 'galaxy catalog {} does not have all the required quantities: {}, skipping the rest of the validation test.'.format(\
catalog_name, ', '.join(self._required_quantities))
warn(msg)
with open(output_filenames['logfile'], 'a') as f:
f.write(msg)
return TestResult(skipped=True)
self._prepare_validation_test(galaxy_catalog, catalog_name, base_output_dir)
catalog_result = self._calc_catalog_result(galaxy_catalog)
self._save_data(output_filenames['validation_data'], self._validation_data)
self._save_data(output_filenames['catalog_data'], catalog_result)
if 'cov' in catalog_result:
np.savetxt(output_filenames['catalog_covariance'], catalog_result['cov'])
self._plot_result(output_filenames['figure'], catalog_result, catalog_name)
return self._calculate_summary_statistic(catalog_result)
def _calc_catalog_result(self, galaxy_catalog):
raise NotImplementedError
def _calculate_summary_statistic(self, catalog_result, passing_pvalue=0.95):
if hasattr(self, '_interp_validation') and 'cov' not in self._validation_data:
validation_data = self._interp_validation(catalog_result['x'])
else:
validation_data = self._validation_data
#restrict range of validation data supplied for test if necessary
if self._validation_range:
mask_validation = (validation_data['x'] >= self._validation_range[0]) & (validation_data['x'] <= self._validation_range[1])
mask_catalog = (catalog_result['x'] >= self._validation_range[0]) & (catalog_result['x'] <= self._validation_range[1])
else:
mask_validation = np.ones(len(validation_data['x']), dtype=bool)
mask_catalog = np.ones(len(catalog_result['x']), dtype=bool)
if np.count_nonzero(mask_validation) != np.count_nonzero(mask_catalog):
raise ValueError('The length of validation data need to be the same as that of catalog result')
d = validation_data['y'][mask_validation] - catalog_result['y'][mask_catalog]
nbin = np.count_nonzero(mask_catalog)
cov = np.zeros((nbin, nbin))
if 'cov' in catalog_result:
cov += catalog_result['cov'][np.outer(*(mask_catalog,)*2)].reshape(nbin, nbin)
if 'cov' in validation_data:
cov += validation_data['cov'][np.outer(*(mask_validation,)*2)].reshape(nbin, nbin)
if not cov.any():
raise ValueError('empty covariance')
score, pvalue = CalcStats.chisq(d, cov, nbin)
passed = (pvalue < passing_pvalue)
msg = 'chi^2/dof = {:g}/{}; p-value = {:g} {} {:g}'.format(score, nbin, pvalue, '<' if passed else '>=', passing_pvalue)
return TestResult(pvalue, msg, passed)
def _plot_result(self, savepath, catalog_result, catalog_name, save_pdf=False):
interp_validation = self._plot_config.get('plot_validation_as_line')
with SimpleComparisonPlot(savepath, save_pdf) as plot:
plot.plot_data(self._validation_data, self._validation_name, catalog_result, catalog_name, interp_validation, interp_validation)
if self._validation_range:
plot.add_vband(*self._validation_range)
plot.set_labels(self._plot_config.get('xlabel'), self._plot_config.get('ylabel'), self._plot_config.get('ylabel_lower'), self._plot_config.get('title'))
plot.set_lims(self._plot_config.get('xlim'), self._plot_config.get('ylim'), self._plot_config.get('ylim_lower'))
plot.add_legend()
@staticmethod
def _save_data(filename, result, comment=''):
fields = ('x', 'y', 'y-', 'y+') if 'y-' in result and 'y+' in result else ('x', 'y')
np.savetxt(filename, np.vstack((result[k] for k in fields)).T, header=comment)
@staticmethod
def _load_data(filename):
raw = np.loadtxt(filename).T
fields = ('x', 'y', 'y-', 'y+') if len(raw) == 4 else ('x', 'y')
return dict(zip(fields, raw))
def plot_summary(self, output_file, catalog_list, save_pdf=True):
"""
make summary plot for validation test
Parameters
----------
output_file: string
filename for summary plot
catalog_list: list of tuple
list of (catalog, catalog_output_dir) used for each catalog comparison
validation_kwargs : dict
keyword arguments used in the validation
"""
data = []
labels = []
for catalog_name, catalog_dir in catalog_list:
labels.append(catalog_name)
data.append(self._load_data(os.path.join(catalog_dir, self._output_filenames['catalog_data'])))
self._plot_result(output_file, data, labels, save_pdf)
_colors = ('#009292', '#ff6db6', '#490092', '#6db6ff', '#924900', '#24ff24')
_linestyles = ('-', '--', '-.', ':')
class SimpleComparisonPlot():
def __init__(self, savefig_path=None, save_pdf=False, logx=True, logy=True):
self.savefig_path = savefig_path
self.save_pdf = save_pdf
self.logx = logx
self.logy = logy
self.fig = None
self.ax = None
self.ax_lower = None
def __enter__(self):
self.fig, (self.ax, self.ax_lower) = plt.subplots(nrows=2, sharex=True, gridspec_kw={'height_ratios': (1, 0.3), 'hspace':0})
self.ax.set_xscale('log' if self.logx else 'linear')
self.ax_lower.set_xscale('log' if self.logx else 'linear')
self.ax.set_yscale('log' if self.logy else 'linear')
self.ax_lower.set_yscale('linear')
return self
def __exit__(self, *exc_args):
self.ax_lower.axhline(0.0, c='k', lw=0.5)
self.ax_lower.minorticks_on()
for t in self.ax_lower.yaxis.get_major_ticks()[-1:]:
t.label1.set_visible(False)
self.fig.tight_layout()
if self.savefig_path:
self.fig.savefig(self.savefig_path)
if self.save_pdf:
self.fig.savefig(self.savefig_path+'.pdf')
plt.close(self.fig)
def plot_data(self, ref_data, ref_label, other_data, other_labels, ref_as_line=False, interp=False):
if isinstance(other_labels, basestring):
ref_color = 'C1'
other_format = [('-', 'C0')]
other_data = [other_data]
other_labels = [other_labels]
else:
ref_color = 'k'
other_format = itertools.cycle(itertools.product(_linestyles, _colors))
#other_colors = mpl.cm.get_cmap('viridis')(np.linspace(0, 1, len(other_data)))
#other_linestyles = ['--', '-']*((len(other_data)+1)//2)
for data, label, (ls, color) in zip(other_data, other_labels, other_format):
self.add_line(self.mask_data(data), label, color, ls)
self.add_line(self.compare_data(ref_data, data, interp), label, color, ls, lower=True)
add_ref = self.add_line if ref_as_line else self.add_points
add_ref(self.mask_data(ref_data), ref_label, ref_color)
add_ref(self.compare_data(ref_data, ref_data), ref_label, ref_color, lower=True)
def compare_data(self, ref_data, this_data, interp=False):
d = dict()
d['x'] = this_data['x']
ref_y = ref_data['y']
if interp:
s = ref_data['x'].argsort()
ref_x = ref_data['x'][s]
this_x = d['x']
if self.logx:
ref_x = np.log(ref_x)
this_x = np.log(this_x)
ref_y = ref_data['y'][s]
if self.logy:
ref_y = np.log(ref_y)
ref_y = np.interp(this_x, ref_x, ref_y)
if self.logy:
ref_y = np.exp(ref_y)
for k in ('y', 'y+', 'y-'):
if k in this_data:
d[k] = this_data[k]/ref_y if self.logy else (this_data[k]-ref_y)
d = self.mask_data(d)
if self.logy:
for k in ('y', 'y+', 'y-'):
if k in this_data:
d[k] = np.log(d[k])
return d
def mask_data(self, data):
if self.logy:
mask = np.isfinite(data['y']) & (data['y'] > 0)
d = {k: v[mask] for k, v in data.iteritems()}
if 'y-' in d:
d['y-'][d['y-'] <= 0] = 1.0e-100
return d
return data
def add_line(self, data, label, color, linestyle='-', lower=False):
ax_this = self.ax_lower if lower else self.ax
ax_this.plot(data['x'], data['y'], label=label, color=color, ls=linestyle)
if 'y-' in data and 'y+' in data:
ax_this.fill_between(data['x'], data['y-'], data['y+'], alpha=0.15, color=color, lw=0)
def add_points(self, data, label, color, lower=False):
ax_this = self.ax_lower if lower else self.ax
if 'y-' in data and 'y+' in data:
ax_this.errorbar(data['x'], data['y'], [data['y']-data['y-'], data['y+']-data['y']], label=label, color=color, marker='s', ls='')
else:
ax_this.plot(data['x'], data['y'], label=label, color=color, marker='s', ls='')
def add_vband(self, x0, x1):
for ax_this in (self.ax, self.ax_lower):
xlim_lo, xlim_hi = ax_this.get_xlim()
if self.logx:
xlim_lo /= 1000.0
xlim_hi *= 1000.0
else:
xlim_lo -= 1000.0
xlim_hi += 1000.0
ax_this.axvspan(xlim_lo, x0, alpha=0.1, color='k', lw=0)
ax_this.axvspan(x1, xlim_hi, alpha=0.1, color='k', lw=0)
def add_legend(self, **kwargs):
d = dict(ncol=2)
d.update(kwargs)
self.ax.legend(**d)
def set_lims(self, xlim=None, ylim=None, ylim_lower=None):
if xlim:
self.ax.set_xlim(xlim)
self.ax_lower.set_xlim(xlim)
if ylim:
self.ax.set_ylim(ylim)
if ylim_lower is None:
ylim_lower = (-0.7, 0.7)
self.ax_lower.set_ylim(ylim_lower)
def set_labels(self, xlabel=None, ylabel=None, ylabel_lower=None, title=None):
if xlabel:
self.ax_lower.set_xlabel(xlabel)
if ylabel:
self.ax.set_ylabel(ylabel)
if ylabel_lower is None:
ylabel_lower = 'ln(ratio)' if self.logy else 'diff.'
self.ax_lower.set_ylabel(ylabel_lower)
if title:
self.ax.set_title(title)
| 15,735
| 36.466667
| 164
|
py
|
descqa
|
descqa-master/v1/descqa/HaloMassFunctionTest.py
|
from __future__ import division, print_function
import os
import subprocess
import numpy as np
from .BinnedStellarMassFunctionTest import BinnedStellarMassFunctionTest
class HaloMassFunctionTest(BinnedStellarMassFunctionTest):
"""
validation test class object to compute halo mass function bins
"""
_plot_config = dict(\
xlabel=r'$M_{\rm halo} \; [M_\odot]$',
ylabel=r'$dn\,/\,d\log M \; [{\rm Mpc}^{-3}\,{\rm dex}^{-1}]$',
plot_validation_as_line=True,
xlim=(1.0e8, 1.0e15),
ylim=(1.0e-7, 10.0),
)
_required_quantities = {'mass', 'parent_halo_id', 'positionX', 'positionY', 'positionZ'}
_available_observations = {'Sheth-Tormen', 'Jenkins', 'Tinker'}
_default_kwargs = {
'observation': 'Tinker',
'zlo': 0.0,
'zhi': 1000.0,
'ztest': 0,
'jackknife_nside': 5,
}
def _interp_validation(self, x):
res = {}
s = self._validation_data['x'].argsort()
x_orig = self._validation_data['x'][s]
res['x'] = x
for k in ('y', 'y-', 'y+'):
if k in self._validation_data:
res[k] = np.exp(np.interp(np.log(x), np.log(x_orig), np.log(self._validation_data[k][s])))
return res
def _subclass_init(self, **kwargs):
self._import_kwargs(kwargs, 'ztest', func=float, required=True)
def _prepare_validation_test(self, galaxy_catalog, catalog_name, base_output_dir):
"""
generate halo mass function data
"""
#associate files with observations
halo_mass_par = {'Sheth-Tormen':'ST', 'Jenkins':'JEN', 'Tinker':'TINK'}
#get path to exe
exe = os.path.join(self._base_data_dir, 'ANALYTIC/amf/amf.exe')
fn = os.path.join(base_output_dir, 'analytic.dat')
input_par_fn = os.path.join(base_output_dir, 'input.par')
h = galaxy_catalog.cosmology.H(self._ztest).value/100.
om = galaxy_catalog.cosmology.Om0
#input.par file contains (in this order!):
input_par = (
'{:.6g}'.format(om), #omega_0 -- total matter fraction
'0.046', #omega_bar -- baryon fraction
'{:.6g}'.format(h), #h -- hubble constant in 100 km/s/Mpc
'0.816', #sigma_8 -- variance of the linear density field
'0.96', #n_s -- power spectrum index
'-1', #w_0 -- dark energy equation of state parameter
'0', #w_a -- ... (see the above, w = w_0 + w_a*(1-a))
'1.686', #delta_c -- linear overdensity at virialization
'EH', #transfer function -- options: CMB, BBKS, EBW, PD, HS, KH or EH
halo_mass_par[self._observation], #fitting function -- options: PS, ST, JEN, LANL, DELP, REED, REED06 or TINK
'0.0625', #redshift -- z >= 0
'{:.6g}'.format(97.7/om), #Delta -- overdensity of SO halos; used only for Tinker MF
'1.0E7', #minimal mass -- range of masses which output will cover
'1.0E15', #maximal mass -- ... (see the above)
'50', #k_max -- maximum k for calculating sigma(k) integration
)
if getattr(self, '_amf_args', None) != input_par:
with open(input_par_fn, 'w') as f:
f.write('\n'.join(input_par))
f.write('\n')
if os.path.exists(fn):
os.remove(fn)
CWD = os.getcwd()
os.chdir(base_output_dir)
try:
subprocess.check_call([exe])
finally:
os.chdir(CWD)
MassFunc = np.loadtxt(fn).T
self._validation_data = {'x': MassFunc[2]/h, 'y':MassFunc[3]*h*h*h}
self._amf_args = tuple(input_par)
def _get_quantities_from_catalog(self, galaxy_catalog):
"""
obtain the masses and mask fom the galaxy catalog
Parameters
----------
galaxy_catalog : galaxy catalog reader object
"""
#get stellar masses from galaxy catalog
hm = galaxy_catalog.get_quantities("mass", self._zfilter)
x = galaxy_catalog.get_quantities("positionX", self._zfilter)
y = galaxy_catalog.get_quantities("positionY", self._zfilter)
z = galaxy_catalog.get_quantities("positionZ", self._zfilter)
pid = galaxy_catalog.get_quantities("parent_halo_id", self._zfilter)
#remove non-finite or negative numbers
mask = np.isfinite(hm)
mask &= (hm > 0)
mask &= np.isfinite(x)
mask &= np.isfinite(y)
mask &= np.isfinite(z)
mask &= (pid == -1)
return dict(mass=hm[mask], x=x[mask], y=y[mask], z=z[mask])
def plot_summary(self, output_file, catalog_list, save_pdf=True):
super(BinnedStellarMassFunctionTest, self).plot_summary(output_file, catalog_list, save_pdf)
| 5,114
| 39.595238
| 121
|
py
|
descqa
|
descqa-master/v1/descqa/CalcStats.py
|
from __future__ import division, print_function
import numpy as np
from scipy.stats import chi2
def get_subvolume_indices(x, y, z, box_size, n_side):
side_size = box_size/n_side
return np.ravel_multi_index(np.floor(np.vstack((x, y, z))/side_size).astype(int), (n_side,)*3, 'wrap')
def jackknife(data, jack_indices, n_jack, func, full_args=(), full_kwargs={}, jack_args=(), jack_kwargs={}):
if len(data) != len(jack_indices):
raise ValueError('`data` and `jack_indices` must have the same length')
if not np.in1d(jack_indices, np.arange(n_jack)).all():
raise ValueError('`jack_indices` must be an array of int between 0 to n_jack-1')
full = np.array(func(data, *full_args, **full_kwargs), dtype=np.float)
jack = []
for i in xrange(n_jack):
jack.append(func(data[jack_indices != i], *jack_args, **jack_kwargs))
jack = np.array(jack, dtype=np.float)
bias = (jack.mean(axis=0) - full)*(n_jack-1)
return full-bias, bias, np.cov(jack, rowvar=False, bias=True)*float(n_jack-1)
def chisq(difference, covariance, dof):
d = np.asarray(difference)
cov = np.asarray(covariance)
if cov.ndim == 1:
cov = np.diag(cov)
chisq_value = np.dot(d, np.dot(np.linalg.inv(cov), d))
return chisq_value, chi2.cdf(chisq_value, dof)
def Lp_norm(difference, p=2.0):
d = np.asarray(difference)
d **= p
return d.sum() ** (1.0/p)
def AD_statistic(n1, n2, y1, y2, threshold):
'''
Calculate the two-sample Anderson-Darling statistic from two CDFs;
n1, n2: number of objects in the two samples;
y1, y2: CDF y-values of the two distribution, and they should have
the same x-axis.
'''
n = n1+n2
h = (n1*y1+n2*y2)/n
# compute Anderson-Darling statistic
inv_weight = (h*(1-h))[:-1]
# remove infinities in the weight function
mask = (inv_weight<1e-5)
inv_weight[mask] = 1
ads = n1*n2/n * np.sum(((y2 - y1)[:-1])**2*(h[1:]-h[:-1])/inv_weight)
if ads<threshold:
success = True
else:
success = False
return ads, success
def CvM_statistic(n1, n2, y1, y2, threshold):
'''
Calculate the two-sample Cramer-von Mises statistic from two CDFs;
n1, n2: number of objects in the two samples;
y1, y2: CDF y-values of the two distribution, and they should have
the same x-axis.
'''
n = n1+n2
h = (n1*y1+n2*y2)/n
cvm_omega = np.sqrt(np.trapz((y2-y1)**2, h))
if cvm_omega<threshold:
success = True
else:
success = False
return cvm_omega, success
| 2,561
| 29.5
| 108
|
py
|
descqa
|
descqa-master/v1/descqa/WprpTest.py
|
from __future__ import division, print_function
import os
import numpy as np
from warnings import warn
from .ValidationTest import *
from helpers.CorrelationFunction import projected_correlation
class WprpTest(ValidationTest):
"""
validation test class object to compute project 2-point correlation function wp(rp)
"""
_plot_config = dict(\
xlabel=r'$r_p \; [{\rm Mpc}]$',
ylabel=r'$w_p(r_p) \; [{\rm Mpc}]$',
xlim=(0.1, 30.0),
ylim=(0.1, 2000.0),
)
_required_quantities = {'stellar_mass', 'positionX', 'positionY', 'positionZ', 'velocityZ'}
_available_observations = {'SDSS', 'MBII'}
_default_kwargs = {
'observation': 'SDSS',
'zlo': 0.0,
'zhi': 1000.0,
'jackknife_nside': 10,
'zmax': 40.0,
'sm_cut': 10.0**9.8,
}
def _subclass_init(self, **kwargs):
#set validation data information
self._import_kwargs(kwargs, 'datafile', required=True)
self._import_kwargs(kwargs, 'sm_cut', func=float, required=True)
self._import_kwargs(kwargs, 'zmax', func=float, required=True)
raw_data = np.loadtxt(os.path.join(self._base_data_dir, self._datafile))
rp = raw_data[:,0]
wp = raw_data[:,1]
wp_cov = raw_data[:,2:]
wp_err = np.sqrt(np.diag(wp_cov))
self._validation_data = {'x': rp, 'y':wp, 'y+':wp+wp_err, 'y-':wp-wp_err, 'cov':wp_cov}
def _calc_catalog_result(self, gc):
try:
h = gc.cosmology.H0.value/100.0
except AttributeError:
h = 0.702
msg = 'Make sure `cosmology` and `redshift` properties are set. Using default value h=0.702...'
warn(msg)
# convert arguments
sm_cut = self._sm_cut/(h*h)
rbins = self._bins/h
zmax = self._zmax/h
njack = self._jackknife_nside
# load catalog
flag = (gc.get_quantities("stellar_mass", self._zfilter) >= sm_cut)
x = gc.get_quantities("positionX", self._zfilter)
flag &= np.isfinite(x)
x = x[flag]
y = gc.get_quantities("positionY", self._zfilter)[flag]
z = gc.get_quantities("positionZ", self._zfilter)[flag]
vz = gc.get_quantities("velocityZ", self._zfilter)[flag]
vz /= (100.0*h)
z += vz
del vz
# calc wp(rp)
points = np.remainder(np.vstack((x, y, z)).T, gc.box_size)
wp, wp_cov = projected_correlation(points, rbins, zmax, gc.box_size, njack)
rp = np.sqrt(rbins[1:]*rbins[:-1])
wp_err = np.sqrt(np.diag(wp_cov))
return {'x': rp, 'y':wp, 'y+':wp+wp_err, 'y-':wp-wp_err, 'cov':wp_cov}
| 2,671
| 31.585366
| 107
|
py
|
descqa
|
descqa-master/v1/descqa/ColorDistributionTest.py
|
from __future__ import division, print_function
import os
from warnings import warn
import numpy as np
from scipy.ndimage.filters import uniform_filter1d
from .ValidationTest import TestResult, plt
from .base import BaseValidationTest
from .CalcStats import CvM_statistic
from .ComputeColorDistribution import load_SDSS
catalog_output_file = 'catalog_quantiles.txt'
validation_output_file = 'validation_quantiles.txt'
summary_output_file = 'summary.txt'
kcorrection_z_file = 'kcorrection_z.txt'
log_file = 'log.txt'
plot_pdf_file = 'plot_pdf.png'
plot_cdf_file = 'plot_cdf.png'
plot_pdf_cdf_file = 'plot_g-r_pdf_cdf.pdf'
data_dir = '/global/cfs/cdirs/lsst/groups/CS/descqa/data/rongpu/'
data_name = 'SDSS'
limiting_band_name = 'SDSS_r:rest:'
find_first_true = np.argmax
class ColorDistributionTest(BaseValidationTest):
"""
validaton test class object to compute galaxy color distribution
and compare with SDSS
"""
def __init__(self, **kwargs):
"""
Initialize a color distribution validation test.
Parameters
----------
base_data_dir : string
base directory that contains validation data
base_output_dir : string
base directory to store test data, e.g. plots
colors : list of string, required
list of colors to be tested
e.g ['u-g','g-r','r-i','i-z']
translate : dictionary, optional
translate the bands to catalog specific names
zlo : float, requred
minimum redshift of the validation catalog
zhi : float, requred
maximum redshift of the validation catalog
threshold : float, required
threshold value for passing the test
"""
# set parameters of test:
# filename of SDSS data
if 'sdss_fname' in list(kwargs.keys()):
self.sdss_fname = kwargs['sdss_fname']
else:
raise ValueError('`sdss_fname` not found!')
# colors
if 'colors' in kwargs:
self.colors = kwargs['colors']
else:
raise ValueError('`colors` not found!')
for color in self.colors:
if len(color)!=3 or color[1]!='-':
raise ValueError('`colors` is not in the correct format!')
# minimum redshift
if 'zlo' in list(kwargs.keys()):
self.zlo_obs = self.zlo_mock = kwargs['zlo']
else:
raise ValueError('`zlo` not found!')
# maximum redshift
if 'zhi' in list(kwargs.keys()):
self.zhi_obs = self.zhi_mock = kwargs['zhi']
else:
raise ValueError('`zhi` not found!')
# threshold value
if 'threshold' in list(kwargs.keys()):
self.threshold = kwargs['threshold']
else:
raise ValueError('`threshold` not found!')
# translation rules from bands to catalog specific names
if 'translate' in list(kwargs.keys()):
translate = kwargs['translate']
self.translate = translate
else:
raise ValueError('translate not found!')
def run_validation_test(self, galaxy_catalog, catalog_name, base_output_dir):
"""
run the validation test
Parameters
----------
galaxy_catalog : galaxy catalog reader object
instance of a galaxy catalog reader
catalog_name : string
name of mock galaxy catalog
Returns
-------
test_passed : boolean
True if the test is 'passed', False otherwise
"""
nrows = int(np.ceil(len(self.colors)/2.))
fig_cdf, axes_cdf = plt.subplots(nrows, 2, figsize=(8, 4*nrows))
fig_pdf, axes_pdf = plt.subplots(nrows, 2, figsize=(8, 4*nrows))
pass_q = True # False if any color fails
color_count = 0 # Number of available colors
pass_count = 0 # Number of colors that pass the test
cvm_sum = 0.
if hasattr(galaxy_catalog, "SDSS_kcorrection_z"):
self.SDSS_kcorrection_z = galaxy_catalog.SDSS_kcorrection_z
else:
msg = ('galaxy catalog does not have SDSS_kcorrection_z; using default SDSS_kcorrection_z = 0.06\n')
warn(msg)
self.SDSS_kcorrection_z = 0.06
# write to log file
fn = os.path.join(base_output_dir, log_file)
with open(fn, 'a') as f:
f.write(msg)
# Cosmololy for distance modulus for absolute magnitudes
self.cosmology = galaxy_catalog.cosmology
# Values of the SDSS color distribution histogram
vsummary, mrmax = load_SDSS(os.path.join(data_dir, self.sdss_fname), self.colors, self.SDSS_kcorrection_z)
# Write to summary file
filename = os.path.join(base_output_dir, summary_output_file)
with open(filename, 'a') as f:
f.write('K corrected to z = %1.3f\n'%self.SDSS_kcorrection_z)
f.write('%2.3f < z < %2.3f\n\n'%(self.zlo_obs, self.zhi_obs))
# Write K correction redshift to file
filename = os.path.join(base_output_dir, kcorrection_z_file)
with open(filename, 'a') as f:
f.write(str(self.SDSS_kcorrection_z))
# Initialize array for quantiles
catalog_quantiles = np.zeros([len(self.colors), 5])
validation_quantiles = np.zeros([len(self.colors), 5])
# Loop through colors
for ax_cdf, ax_pdf, index in zip(axes_cdf.flat, axes_pdf.flat, range(len(self.colors))):
color = self.colors[index]
band1 = self.translate[color[0]]
band2 = self.translate[color[2]]
self.band1 = band1
self.band2 = band2
nobs, obinctr, ohist, ocdf = vsummary[index]
omedian = obinctr[find_first_true(ocdf>0.5)]
# Make sure galaxy catalog has appropiate quantities
if not all(k in galaxy_catalog.quantities for k in (self.band1, self.band2)):
# raise an informative warning
msg = ('galaxy catalog does not have `{}` and/or `{}` quantities.\n'.format(band1, band2))
warn(msg)
# write to log file
fn = os.path.join(base_output_dir, log_file)
with open(fn, 'a') as f:
f.write(msg)
continue
# Calculate color distribution in mock catalog
color_dist_output = self.color_distribution(galaxy_catalog, (-1, 4, 2000), base_output_dir, omedian, mrmax)
if color_dist_output is None:
# raise an informative warning
msg = ('The `{}` and/or `{}` quantities don\'t have the correct range or format.\n'.format(band1, band2))
warn(msg)
# write to log file
fn = os.path.join(base_output_dir, log_file)
with open(fn, 'a') as f:
f.write(msg)
continue
nmock, mbinctr, mhist, mcdf, mhist_shift, mcdf_shift, median_diff = color_dist_output
# At least one color exists
color_count += 1
# Calculate median, quartiles, and 2nd percentile and 98th percentile
oq1 = obinctr[find_first_true(ocdf>0.25)]
oq3 = obinctr[find_first_true(ocdf>0.75)]
oiqr = oq3 - oq1
# oboxmin = max(oq1-1.5*oiqr, obinctr[find_first_true(ocdf>0)])
# oboxmax = min(oq3+1.5*oiqr, obinctr[find_first_true(ocdf==ocdf[-1])])
oboxmin = obinctr[find_first_true(ocdf>0.02)]
oboxmax = obinctr[find_first_true(ocdf>0.98)]
mq1 = mbinctr[find_first_true(mcdf>0.25)]
mq3 = mbinctr[find_first_true(mcdf>0.75)]
miqr = mq3 - mq1
mmedian = mbinctr[find_first_true(mcdf>0.5)]
# mboxmin = max(mq1-1.5*miqr, mbinctr[find_first_true(mcdf>0)])
# mboxmax = min(mq3+1.5*miqr, mbinctr[find_first_true(mcdf==mcdf[-1])])
mboxmin = mbinctr[find_first_true(mcdf>0.02)]
mboxmax = mbinctr[find_first_true(mcdf>0.98)]
validation_quantiles[index] = np.array([oboxmin, oq1, omedian, oq3, oboxmax])
catalog_quantiles[index] = np.array([mboxmin, mq1, mmedian, mq3, mboxmax])
# calculate Cramer-von Mises statistic
cvm_omega, cvm_success = CvM_statistic(nmock, nobs, mcdf, ocdf, threshold=self.threshold)
cvm_omega_shift, cvm_success_shift = CvM_statistic(nmock, nobs, mcdf_shift, ocdf, threshold=self.threshold)
# plot CDF
# validation distribution
ax_cdf.step(obinctr, ocdf, label=data_name,color='C1')
# catalog distribution
ax_cdf.step(mbinctr, mcdf, where="mid", label=catalog_name+'\n'+r'$\omega={:.3}$'.format(cvm_omega), color='C0')
# color distribution after constant shift
ax_cdf.step(mbinctr, mcdf_shift, where="mid", label=catalog_name+' shifted\n'+r'$\omega={:.3}$'.format(cvm_omega_shift), linestyle='--', color='C0')
ax_cdf.set_xlabel('${}$'.format(color))
ax_cdf.set_title('')
xmin = np.min([mbinctr[find_first_true(mcdf>0.001)], mbinctr[find_first_true(mcdf_shift>0.001)], obinctr[find_first_true(ocdf>0.001)]])
xmax = np.max([mbinctr[find_first_true(mcdf>0.999)], mbinctr[find_first_true(mcdf_shift>0.999)], obinctr[find_first_true(ocdf>0.999)]])
ax_cdf.set_xlim(xmin, xmax)
ax_cdf.set_ylim(0, 1)
ax_cdf.legend(loc='upper left', frameon=False)
# plot PDF
ohist_smooth = uniform_filter1d(ohist, 20)
mhist_smooth = uniform_filter1d(mhist, 20)
mhist_shift_smooth = uniform_filter1d(mhist_shift, 20)
# validation data
ax_pdf.step(obinctr, ohist_smooth, label=data_name,color='C1')
# catalog distribution
ax_pdf.step(mbinctr, mhist_smooth, where="mid", label=catalog_name+'\n'+r'$\omega={:.3}$'.format(cvm_omega), color='C0')
# color distribution after constant shift
ax_pdf.step(mbinctr, mhist_shift_smooth, where="mid", label=catalog_name+' shifted\n'+r'$\omega={:.3}$'.format(cvm_omega_shift), linestyle='--', color='C0')
ax_pdf.set_xlabel('${}$'.format(color))
ax_pdf.set_xlim(xmin, xmax)
ax_pdf.set_ylim(ymin=0.)
ax_pdf.set_title('')
ax_pdf.legend(loc='upper left', frameon=False)
# PDF+CDF plot for the paper
if index==1: # g-r color
fig_pdf_cdf, axes_pdf_cdf = plt.subplots(1, 2, figsize=(8, 4))
axes_pdf_cdf[0].step(obinctr, ohist_smooth, label=data_name,color='C1')
axes_pdf_cdf[0].step(mbinctr, mhist_smooth, where="mid", label=catalog_name+'\n'+r'$\omega={:.3}$'.format(cvm_omega), color='C0')
axes_pdf_cdf[0].step(mbinctr, mhist_shift_smooth, where="mid", label=catalog_name+' shifted\n'+r'$\omega={:.3}$'.format(cvm_omega_shift), linestyle='--', color='C0')
axes_pdf_cdf[0].set_xlabel('${}$'.format(color))
axes_pdf_cdf[0].set_xlim(xmin, xmax)
axes_pdf_cdf[0].set_ylim(ymin=0.)
axes_pdf_cdf[0].legend(loc='upper left', frameon=False)
axes_pdf_cdf[1].step(obinctr, ocdf, label=data_name,color='C1')
axes_pdf_cdf[1].step(mbinctr, mcdf, where="mid", label=catalog_name+'\n'+r'$\omega={:.3}$'.format(cvm_omega), color='C0')
axes_pdf_cdf[1].step(mbinctr, mcdf_shift, where="mid", label=catalog_name+' shifted\n'+r'$\omega={:.3}$'.format(cvm_omega_shift), linestyle='--', color='C0')
axes_pdf_cdf[1].set_xlabel('${}$'.format(color))
axes_pdf_cdf[1].set_xlim(xmin, xmax)
axes_pdf_cdf[1].set_ylim(0, 1)
axes_pdf_cdf[1].legend(loc='upper left', frameon=False)
fn = os.path.join(base_output_dir, plot_pdf_cdf_file)
fig_pdf_cdf.tight_layout()
fig_pdf_cdf.savefig(fn)
plt.close(fig_pdf_cdf)
# save result to file
filename = os.path.join(base_output_dir, summary_output_file)
with open(filename, 'a') as f:
f.write("Median "+color+" difference (mock - obs) = %2.3f\n"%(median_diff))
f.write(color+" {}: {} = {}\n".format('SUCCESS' if cvm_success else 'FAILED', 'CvM statistic', cvm_omega))
f.write(color+" (shifted) {}: {} = {}\n".format('SUCCESS' if cvm_success_shift else 'FAILED', 'CvM statistic', cvm_omega_shift))
f.write("\n")
# The test is considered pass if the all colors pass L2Diff
if cvm_success_shift:
pass_count+=1
else:
pass_q = False
cvm_sum += cvm_omega_shift
color_count += 1
if color_count>0:
# save plots
fig_cdf.tight_layout()
fn = os.path.join(base_output_dir, plot_cdf_file)
fig_cdf.savefig(fn)
fig_pdf.tight_layout()
fn = os.path.join(base_output_dir, plot_pdf_file)
fig_pdf.savefig(fn)
plt.close(fig_cdf)
plt.close(fig_pdf)
# save quantiles
fn = os.path.join(base_output_dir, catalog_output_file)
np.savetxt(fn, catalog_quantiles)
fn = os.path.join(base_output_dir, validation_output_file)
np.savetxt(fn, validation_quantiles)
#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--#--
if color_count>0:
cvm_omega_average = cvm_sum/color_count
if color_count==0:
return TestResult(summary='No available colors for comparison. ', skipped=True)
elif pass_q:
return TestResult(score=cvm_omega_average,
summary='{}/{} success - all colors pass the test; average Cramer-von Mises statistic = {:.3f}'.format(pass_count, len(self.colors), cvm_omega_average), passed=True)
else:
return TestResult(score=cvm_omega_average,
summary='{}/{} success - not all colors pass the test; average CvM statistic = {:.3f}'.format(pass_count, len(self.colors), cvm_omega_average), passed=False)
def color_distribution(self, galaxy_catalog, bin_args, base_output_dir, omedian, mrmax):
"""
Calculate the color distribution of mock catalog.
Parameters
----------
galaxy_catalog : (mock) galaxy catalog reader object
"""
# get magnitudes from galaxy catalog
mag1 = galaxy_catalog.get_quantities(self.band1, {'zlo': self.zlo_mock, 'zhi': self.zhi_mock})
mag2 = galaxy_catalog.get_quantities(self.band2, {'zlo': self.zlo_mock, 'zhi': self.zhi_mock})
if len(mag1)==0:
msg = 'No object in the redshift range!\n'
warn(msg)
#write to log file
fn = os.path.join(base_output_dir, log_file)
with open(fn, 'a') as f:
f.write(msg)
return
#apply magnitude limit and remove nonsensical magnitude values
mag_lim = galaxy_catalog.get_quantities(limiting_band_name, {'zlo': self.zlo_mock, 'zhi': self.zhi_mock})
mask = (mag_lim<mrmax)
# # r-band apparent magnitude cut
# r_band_mag = galaxy_catalog.get_quantities('SDSS_r:observed:', {'zlo': self.zlo_mock, 'zhi': self.zhi_mock})
# mask = mask & (r_band_mag<17.77)
mag1 = mag1[mask]
mag2 = mag2[mask]
if np.sum(mask)==0:
msg = 'No object in the magnitude range!\n'
warn(msg)
#write to log file
fn = os.path.join(base_output_dir, log_file)
with open(fn, 'a') as f:
f.write(msg)
return
mmedian = np.median(mag1-mag2)
median_diff = mmedian - omedian
# Histrogram
hist, bins = np.histogram(mag1-mag2, bins=np.linspace(*bin_args))
hist_shift, _ = np.histogram(mag1-mag2-median_diff, bins=np.linspace(*bin_args))
# normalize the histogram so that the sum of hist is 1
hist = hist/np.sum(hist)
hist_shift = hist_shift/np.sum(hist_shift)
binctr = (bins[1:] + bins[:-1])/2.
# Convert PDF to CDF
cdf = np.zeros(len(hist))
cdf[0] = hist[0]
for cdf_index in range(1, len(hist)):
cdf[cdf_index] = cdf[cdf_index-1]+hist[cdf_index]
cdf_shift = np.zeros(len(hist_shift))
cdf_shift[0] = hist_shift[0]
for cdf_index in range(1, len(hist_shift)):
cdf_shift[cdf_index] = cdf_shift[cdf_index-1]+hist_shift[cdf_index]
return len(mag1), binctr, hist, cdf, hist_shift, cdf_shift, median_diff
def plot_summary(self, output_file, catalog_list):
"""
make summary plot for validation test
Parameters
----------
output_file: string
filename for summary plot
catalog_list: list of tuple
list of (catalog, catalog_output_dir) used for each catalog comparison
"""
colors = self.colors
nrows = int(np.ceil(len(colors)/2.))
fig, axes = plt.subplots(nrows, 2, figsize=(8, 4.*nrows), sharex=True)
# Sort catalogs by kcorrect_z and names
catalog_names = [catalog_name for catalog_name, _ in catalog_list]
argsort = np.argsort(catalog_names, kind='mergesort')
catalog_list = [catalog_list[i] for i in argsort]
kcorrection_z_list = []
for catalog_name, catalog_dir in catalog_list:
fn = os.path.join(catalog_dir, kcorrection_z_file)
kcorrection_z_list.append(float(np.loadtxt(fn)))
argsort = np.argsort(kcorrection_z_list, kind='mergesort')
catalog_list = [catalog_list[i] for i in argsort]
# Load summary quantiles data
data = []
for _, catalog_dir in catalog_list:
fn = os.path.join(catalog_dir, catalog_output_file)
data.append(np.loadtxt(fn))
data = np.array(data)
data[~data.any(axis=-1)] = np.nan # to hide catalogs that do not have all colors
# loop over colors
for index, ax in enumerate(axes.flat):
if index >= len(colors):
ax.axis('off')
continue
# Mock catalog results
ax.boxplot(data[:,index].T, whis='range', medianprops=dict(color='k'))
ax.set_ylabel('${}$'.format(colors[index]), fontsize=16)
# Validation results
first_plot = True
for cat_index in range(len(catalog_list)):
_, catalog_dir = catalog_list[cat_index]
fn = os.path.join(catalog_dir, validation_output_file)
vquantiles = np.loadtxt(fn)[index]
if not np.all(vquantiles==0):
# xmin and xmax are relative coordinates in range of 0-1.
xmin, xmax = [cat_index/len(catalog_list), (cat_index+1)/len(catalog_list)]
if first_plot:
ax.axhline(vquantiles[2], xmin=xmin, xmax=xmax, lw=2, color='r', label='{} median'.format(data_name))
ax.axhspan(vquantiles[1], vquantiles[3], xmin=xmin, xmax=xmax, facecolor='r', alpha=0.3, lw=0, label=' [$Q_1$, $Q_3$]')
ax.axhspan(vquantiles[0], vquantiles[1], xmin=xmin, xmax=xmax, facecolor='grey', alpha=0.2, lw=0, label=' [2nd, 98th]')
ax.axhspan(vquantiles[3], vquantiles[4], xmin=xmin, xmax=xmax, facecolor='grey', alpha=0.2, lw=0)
first_plot = False
else:
ax.axhline(vquantiles[2], xmin=xmin, xmax=xmax, lw=2, color='r')
ax.axhspan(vquantiles[1], vquantiles[3], xmin=xmin, xmax=xmax, facecolor='r', alpha=0.3, lw=0)
ax.axhspan(vquantiles[0], vquantiles[1], xmin=xmin, xmax=xmax, facecolor='grey', alpha=0.2, lw=0)
ax.axhspan(vquantiles[3], vquantiles[4], xmin=xmin, xmax=xmax, facecolor='grey', alpha=0.2, lw=0)
x = np.arange(1, len(catalog_list)+1)
labels = [catalog_name for catalog_name, _ in catalog_list]
ax.set_xticks(x)
ax.set_xticks([], True)
if index >= (axes.size - 2):
ax.set_xticklabels(labels, rotation='vertical')
else:
ax.set_xticklabels(['' for _ in x])
ax.yaxis.grid(True)
#ymin = min(vquantiles[0], data[:,index,0].min())
#ymax = max(vquantiles[4], data[:,index,4].max())
#yrange = ymax - ymin
#ax.set_ylim(ymin-0.15*yrange, ymax+0.15*yrange)
if index==3:
ax.legend(fontsize='small', framealpha=0.4, loc='lower right')
plt.tight_layout()
plt.savefig(output_file)
plt.savefig(output_file+'.pdf')
plt.close()
| 21,099
| 43.514768
| 195
|
py
|
descqa
|
descqa-master/v1/descqa/ComputeColorDistribution.py
|
from __future__ import division, print_function
import numpy as np
from astropy.io import fits
from astropy.table import Table
import os
import kcorrect
from astropy.cosmology import FlatLambdaCDM
def load_SDSS(filename, colors, SDSS_kcorrection_z):
"""
Compute the CDF of SDSS colors for some redshift range.
Parameters
----------
colors : list of string, required
list of colors to be tested
e.g ['u-g','g-r','r-i','i-z']
zlo : float, requred
minimum redshift of the validation catalog
zhi : float, requred
maximum redshift of the validation catalog
"""
translate = {'u':'M_u', 'g':'M_g', 'r':'M_r', 'i':'M_i', 'z':'M_z'}
data_dir = os.path.dirname(filename)
kcorrect_magnitudes_path = os.path.join(data_dir, 'sdss_k_corrected_magnitudes_z_0.06_0.09_z_{:.3f}.fits'.format(SDSS_kcorrection_z))
if not os.path.exists(kcorrect_magnitudes_path):
kcorrect_maggies_path = os.path.join(data_dir, 'sdss_k_corrected_maggies_z_0.06_0.09_z_{:.3f}.dat'.format(SDSS_kcorrection_z))
# Load kcorrect templates and filters
kcorrect.load_templates()
kcorrect.load_filters()
kcorrect.reconstruct_maggies_from_file(filename, redshift=SDSS_kcorrection_z, outfile=kcorrect_maggies_path)
#----------Convert kcorrected maggies to magnitudes----------------
cat = Table.read(os.path.join(data_dir, kcorrect_maggies_path), format='ascii.no_header', names=('redshift', 'maggies_u', 'maggies_g', 'maggies_r', 'maggies_i', 'maggies_z'))
cat0 = Table.read(filename, format='ascii.no_header')
redshifts = cat0['col1']
u = -2.5*np.log10(cat['maggies_u'])
g = -2.5*np.log10(cat['maggies_g'])
r = -2.5*np.log10(cat['maggies_r'])
i = -2.5*np.log10(cat['maggies_i'])
z = -2.5*np.log10(cat['maggies_z'])
cat1 = Table()
cat1['redshift'] = redshifts
cat1['u'] = u
cat1['g'] = g
cat1['r'] = r
cat1['i'] = i
cat1['z'] = z
cat1.write(kcorrect_magnitudes_path)
cat = cat1.copy()
else:
cat = Table.read(kcorrect_magnitudes_path)
# distance modulus
##########################################
cosmo = FlatLambdaCDM(H0=70.2, Om0=0.275)
##########################################
dm = np.array(cosmo.distmod(cat['redshift']))
cat['M_u'] = cat['u'] - dm
cat['M_g'] = cat['g'] - dm
cat['M_r'] = cat['r'] - dm
cat['M_i'] = cat['i'] - dm
cat['M_z'] = cat['z'] - dm
# Calculate the aboluste magnitude cut
mask = (cat['redshift']>0.089) & (cat['redshift']<0.090)
mr = cat['M_r'][mask]
mr_sort = np.sort(mr)
mrmax = mr_sort[int(len(mr)*0.85)]
# Apply r-band absolute magnitude
mask = (cat['M_r'] < mrmax)
cat = cat[mask]
vsummary = []
# Histogram with small bins (for calculating CDF)
for index in range(len(colors)):
color = colors[index]
band1 = translate[color[0]]
band2 = translate[color[2]]
bins = np.linspace(-1, 4, 2000)
hist, bin_edges = np.histogram((cat[band1]-cat[band2]), bins=bins)
hist = hist/np.sum(hist)
binctr = (bin_edges[1:] + bin_edges[:-1])/2.
# Convert PDF to CDF
cdf = np.zeros(len(hist))
cdf[0] = hist[0]
for cdf_index in range(1, len(hist)):
cdf[cdf_index] = cdf[cdf_index-1]+hist[cdf_index]
vsummary.append((len(cat), binctr, hist, cdf))
return vsummary, mrmax
| 3,610
| 32.435185
| 182
|
py
|
descqa
|
descqa-master/v1/descqa/__init__.py
|
# This is an empty file to make this directory a python package
"""
DESCQA Validation Tests
"""
from .register import *
__version__ = '1.1.0'
| 142
| 19.428571
| 63
|
py
|
descqa
|
descqa-master/v1/descqa/StellarMassHaloMassTest.py
|
from __future__ import division, print_function
import os
import subprocess
import numpy as np
from .ValidationTest import *
def mean_y_in_x_bins(y, x, bins, sorter=None):
y = np.asanyarray(y)
k = np.searchsorted(x, bins, sorter=sorter)
res = []
for i, j in zip(k[:-1], k[1:]):
if j == i:
res.append(np.nan)
else:
s_this = slice(i, j) if sorter is None else sorter[i:j]
res.append(y[s_this].mean())
return np.array(res)
class StellarMassHaloMassTest(ValidationTest):
"""
validation test class object to compute stellar mass halo mass relation
"""
_plot_config = dict(\
xlabel=r'$M_{\rm halo} \; [{\rm M}_\odot]$',
ylabel=r'$\langle M_* \rangle \; [{\rm M}_\odot]$',
xlim=(1.0e8, 1.0e15),
ylim=(1.0e7, 1.0e13),
)
_required_quantities = {'mass', 'stellar_mass', 'parent_halo_id', 'positionX', 'positionY', 'positionZ'}
_available_observations = {'MassiveBlackII'}
_default_kwargs = {
'zlo': 0,
'zhi': 1000.0,
'jackknife_nside': 5,
}
def _subclass_init(self, **kwargs):
"""
load tabulated stellar mass halo mass function data
"""
#column 1: halo mass bin center
#column 2: mean stellar mass
#column 4: mean stellar mass - error (on mean)
#column 5: mean stellar mass + error (on mean)
#column 6: bin minimum
#column 7: bin maximum
#column 8: 1-sigma error
#column 9: 16th percentile
#column 11: 84th percentile
fn = os.path.join(self._base_data_dir, 'MASSIVEBLACKII/StellarMass_HaloMass/tab_new.txt')
self._validation_data = dict(zip(('x', 'y', 'y-', 'y+'), np.loadtxt(fn, unpack=True, usecols=(0,1,3,4))))
self._validation_data['cov'] = np.diag(((self._validation_data['y+']-self._validation_data['y-'])*0.5)**2.0)
self._validation_name = 'MBII (validation)'
def _get_quantities_from_catalog(self, galaxy_catalog):
"""
obtain the masses and mask fom the galaxy catalog
Parameters
----------
galaxy_catalog : galaxy catalog reader object
"""
#get stellar masses from galaxy catalog
hm = galaxy_catalog.get_quantities("mass", self._zfilter)
sm = galaxy_catalog.get_quantities("stellar_mass", self._zfilter)
x = galaxy_catalog.get_quantities("positionX", self._zfilter)
y = galaxy_catalog.get_quantities("positionY", self._zfilter)
z = galaxy_catalog.get_quantities("positionZ", self._zfilter)
pid = galaxy_catalog.get_quantities("parent_halo_id", self._zfilter)
#remove non-finite or negative numbers
mask = np.isfinite(hm)
mask &= (hm > 0)
mask = np.isfinite(sm)
mask &= (sm > 0)
mask &= np.isfinite(x)
mask &= np.isfinite(y)
mask &= np.isfinite(z)
mask &= (pid == -1)
return dict(hm=hm[mask], sm=sm[mask], x=x[mask], y=y[mask], z=z[mask])
def _calc_catalog_result(self, galaxy_catalog):
"""
calculate the stellar mass - halo mass relation in bins
Parameters
----------
galaxy_catalog : galaxy catalog reader object
"""
#get quantities from galaxy catalog
quantities = self._get_quantities_from_catalog(galaxy_catalog)
#sort halo mass
s = quantities['hm'].argsort()
for k in quantities:
quantities[k] = quantities[k][s]
del s
res = {'x': np.sqrt(self._bins[1:]*self._bins[:-1])}
#get errors from jackknife samples if requested
if self._jackknife_nside > 0:
masses = np.vstack((quantities['hm'], quantities['sm'])).T
jack_indices = CalcStats.get_subvolume_indices(quantities['x'], quantities['y'], quantities['z'], \
galaxy_catalog.box_size, self._jackknife_nside)
njack = self._jackknife_nside**3
res['y'], _, covariance = CalcStats.jackknife(masses, jack_indices, njack, \
lambda arr: mean_y_in_x_bins(arr[:,1], arr[:,0], self._bins))
yerr = np.sqrt(np.diag(covariance))
res.update({'y-':res['y']-yerr, 'y+':res['y']+yerr, 'cov':covariance})
else:
res['y'] = mean_y_in_x_bins(quantities['sm'], quantities['hm'], self._bins)
return res
| 4,421
| 33.015385
| 116
|
py
|
descqa
|
descqa-master/v1/descqa/register.py
|
import os
import importlib
import yaml
from .base import BaseValidationTest
__all__ = ['available_validations', 'load_validation', 'load_validation_from_config_dict']
def load_yaml(yaml_file):
"""
Load *yaml_file*. Ruturn a dictionary.
"""
with open(yaml_file) as f:
config = yaml.safe_load(f)
return config
def import_subclass(subclass, package=None, required_base_class=None):
"""
Import and return a subclass.
"""
subclass = getattr(importlib.import_module('.'+subclass, package), subclass)
if required_base_class:
assert issubclass(subclass, required_base_class), "Provided class is not a subclass of *required_base_class*"
return subclass
def get_available_configs(config_dir, register=None):
"""
Return (or update) a dictionary *register* that contains all config files in *config_dir*.
"""
if register is None:
register = dict()
for config_file in os.listdir(config_dir):
if config_file.startswith('_') or not config_file.lower().endswith('.yaml'):
continue
name = os.path.splitext(config_file)[0]
config = load_yaml(os.path.join(config_dir, config_file))
config['test_name'] = name
config['base_data_dir'] = os.path.join(os.path.dirname(__file__), 'data')
register[name] = config
return register
def load_validation_from_config_dict(validation_config):
"""
Load a validation test using a config dictionary.
Parameters
----------
validation_config : dict
a dictionary of config options
Return
------
validation_test : instance of a subclass of BaseValidationTest
See also
--------
load_catalog()
"""
return import_subclass(validation_config['module'],
__package__,
BaseValidationTest)(**validation_config)
def load_validation(validation_name, config_overwrite=None):
"""
Load a validation test as specified in one of the yaml file in configs.
Parameters
----------
validation_name : str
name of the validation test (without '.yaml')
config_overwrite : dict, optional
a dictionary of config options to overwrite
Return
------
validation_test : instance of a subclass of BaseValidationTest
"""
if validation_name.lower().endswith('.yaml'):
validation_name = validation_name[:-5]
if validation_name not in available_validations:
raise KeyError("Validation `{}` does not exist in the register. See `available_validations`.".format(validation_name))
config = available_validations[validation_name]
if config_overwrite:
config = config.copy()
config.update(config_overwrite)
return load_validation_from_config_dict(config)
available_validations = get_available_configs(os.path.join(os.path.dirname(__file__), 'configs'))
| 2,922
| 27.656863
| 126
|
py
|
descqa
|
descqa-master/v1/descqa/BinnedStellarMassFunctionTest.py
|
from __future__ import division, print_function
import os
import numpy as np
from .ValidationTest import *
class BinnedStellarMassFunctionTest(ValidationTest):
"""
validation test class object to compute stellar mass function bins
"""
_plot_config = dict(\
xlabel=r'$M_* \; [{\rm M}_\odot]$',
ylabel=r'$dn\,/\,d\log M \; [{\rm Mpc}^{-3}\,{\rm dex}^{-1}]$',
xlim=(1.0e7, 2.0e12),
ylim=(1.0e-7, 10.0),
)
_required_quantities = {'stellar_mass', 'positionX', 'positionY', 'positionZ'}
_available_observations = {'LiWhite2009', 'MassiveBlackII'}
_default_kwargs = {
'observation': 'LiWhite2009',
'zlo': 0.0,
'zhi': 1000.0,
'jackknife_nside': 5,
}
def _subclass_init(self, **kwargs):
"""
load tabulated stellar mass function data
"""
columns = {'LiWhite2009': (0,5,6), 'MassiveBlackII': (0,1,2)}.get(self._observation)
fn = os.path.join(self._base_data_dir, 'LIWHITE/StellarMassFunction/massfunc_dataerr.txt')
#column 1: stellar mass bin center
#column 2: number density
#column 3: 1-sigma error
binctr, mhist, merr = np.loadtxt(fn, unpack=True, usecols=columns)
self._validation_data = {'x':binctr, 'y':mhist, 'y-':mhist-merr, 'y+':mhist+merr, 'cov':np.diag(merr*merr)}
def _get_quantities_from_catalog(self, galaxy_catalog):
"""
obtain the masses and mask fom the galaxy catalog
Parameters
----------
galaxy_catalog : galaxy catalog reader object
"""
#get stellar masses from galaxy catalog
sm = galaxy_catalog.get_quantities("stellar_mass", self._zfilter)
x = galaxy_catalog.get_quantities("positionX", self._zfilter)
y = galaxy_catalog.get_quantities("positionY", self._zfilter)
z = galaxy_catalog.get_quantities("positionZ", self._zfilter)
#remove non-finite or negative numbers
mask = np.isfinite(sm)
mask &= (sm > 0)
mask &= np.isfinite(x)
mask &= np.isfinite(y)
mask &= np.isfinite(z)
return dict(mass=sm[mask], x=x[mask], y=y[mask], z=z[mask])
def _calc_catalog_result(self, galaxy_catalog):
"""
calculate the stellar mass function in bins
Parameters
----------
galaxy_catalog : galaxy catalog reader object
"""
#get stellar masses from galaxy catalog
quantities = self._get_quantities_from_catalog(galaxy_catalog)
#histogram points and compute bin positions
mhist = np.histogram(quantities['mass'], bins=self._bins)[0].astype(float)
summass = np.histogram(quantities['mass'], bins=self._bins, weights=quantities['mass'])[0]
binwid = np.log10(self._bins[1:]/self._bins[:-1])
binctr = np.sqrt(self._bins[1:]*self._bins[:-1])
has_mass = (mhist > 0)
binctr[has_mass] = (summass/mhist)[has_mass]
#count galaxies in log bins
#get errors from jackknife samples if requested
if self._jackknife_nside > 0:
jack_indices = CalcStats.get_subvolume_indices(quantities['x'], quantities['y'], quantities['z'], \
galaxy_catalog.box_size, self._jackknife_nside)
njack = self._jackknife_nside**3
mhist, _, covariance = CalcStats.jackknife(quantities['mass'], jack_indices, njack, \
lambda m, scale: np.histogram(m, bins=self._bins)[0]*scale, \
full_args=(1.0,), jack_args=(njack/(njack-1.0),))
else:
covariance = np.diag(mhist)
#calculate number differential density
vol = galaxy_catalog.box_size**3.0
mhist /= (binwid * vol)
covariance /= (vol*vol)
covariance /= np.outer(binwid, binwid)
merr = np.sqrt(np.diag(covariance))
return {'x':binctr, 'y':mhist, 'y-':mhist-merr, 'y+':mhist+merr, 'cov':covariance}
def plot_summary(self, output_file, catalog_list, save_pdf=True):
super(self.__class__, self).plot_summary(output_file, catalog_list, save_pdf)
if self._observation == 'LiWhite2009':
self._plot_smf_validation_comparison(os.path.join(os.path.dirname(output_file), 'smf_validation_comparison.pdf'))
def _plot_smf_validation_comparison(self, save_path):
d = np.loadtxt(os.path.join(self._base_data_dir, 'LIWHITE/StellarMassFunction/massfunc_dataerr.txt'), unpack=True)
fig, ax = plt.subplots()
for i, ls, label in zip((1,3,5), ('--', '-', ':'), ('MBII', 'LiWhite2009', 'LiWhite2009 (corrected)')):
ax.errorbar(d[0], d[i], d[i+1], ls=ls, label=label)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_xlabel(self._plot_config.get('xlabel'))
ax.set_ylabel(self._plot_config.get('ylabel'))
ax.set_xlim(self._plot_config.get('xlim'))
ax.set_ylim(self._plot_config.get('ylim'))
ax.legend()
fig.tight_layout()
fig.savefig(save_path)
plt.close(fig)
| 5,060
| 37.930769
| 125
|
py
|
descqa
|
descqa-master/v1/GCRCatalogs/YaleCAMGalaxyCatalog.py
|
# Yale CAM galaxy catalogue class
# Duncan Campbell
# Yale University
# February, 2016
# load modules
import os
import re
import numpy as np
import h5py
from astropy.cosmology import FlatLambdaCDM
import astropy.units as u
from .GalaxyCatalogInterface import GalaxyCatalog
__all__ = ['YaleCAMGalaxyCatalog']
class YaleCAMGalaxyCatalog(GalaxyCatalog):
"""
Yale CAM galaxy catalog class.
Notes
-----
The Yale CAM galaxy mocks store all physical properties internally in units where h=1.
"""
def __init__(self, **kwargs):
"""
Initialize Yale CAM galaxy catalog class.
Parameters
----------
fn : string
filename of mock catalog
"""
fn = kwargs.get('fn')
# set file type and location
self.type_ext = 'hdf5'
# set fixed properties
self.lightcone = False
self.cosmology = FlatLambdaCDM(H0=70.2, Om0 = 0.275)
self.simulation = 'Massive Black'
self.box_size = 100.0 / self.cosmology.h
self.volume = self.box_size**3.0
self.SDSS_kcorrection_z = 0
# translates between desc keywords to those used in the stored mock
# note: all appropriate quantities are in h=1 units.
self.quantities = { 'stellar_mass': self._stored_property_wrapper('stellar_mass'),
'mass': self._stored_property_wrapper('halo_mvir'),
'ssfr': self._stored_property_wrapper('SSFR'),
'halo_id': self._stored_property_wrapper('halo_id'),
'positionX': self._stored_property_wrapper('x'),
'positionY': self._stored_property_wrapper('y'),
'positionZ': self._stored_property_wrapper('z'),
'velocityX': self._stored_property_wrapper('vx'),
'velocityY': self._stored_property_wrapper('vy'),
'velocityZ': self._stored_property_wrapper('vz'),
'SDSS_u:rest:': self._stored_property_wrapper('absmag_u'),
'SDSS_g:rest:': self._stored_property_wrapper('absmag_g'),
'SDSS_r:rest:': self._stored_property_wrapper('absmag_r'),
'SDSS_i:rest:': self._stored_property_wrapper('absmag_i'),
'SDSS_z:rest:': self._stored_property_wrapper('absmag_z'),
'SDSS_u:observed:': self._stored_property_wrapper('mag_u'),
'SDSS_g:observed:': self._stored_property_wrapper('mag_g'),
'SDSS_r:observed:': self._stored_property_wrapper('mag_r'),
'SDSS_i:observed:': self._stored_property_wrapper('mag_i'),
'SDSS_z:observed:': self._stored_property_wrapper('mag_z'),
'g-r': self._stored_property_wrapper('g-r'),
'parent_halo_id': self._stored_property_wrapper('halo_upid'),
}
return GalaxyCatalog.__init__(self, fn)
def load(self, fn='yale_cam_age_matching_LiWhite_2009_z0.0.hdf5'):
"""
load mock galaxy catalog
Parameters
----------
fn : string
filename of mock catalog
"""
#extract mock parameters from filename
nums = re.findall(r"[-+]?\d*\.\d+|\d+", fn)
self.redshift = float(nums[-2])
f = h5py.File(fn, 'r')
toplevel=f.keys()[0]
self._data = f.get(toplevel)
#convert quantities into physical units given the cosmology
#see 'notes' section of the Yale CAM class.
#see arXiv:1308.4150
#cast hdf5 data as numpy array to allow modification of data without modifying file contents
self.Xdata=np.array(self._data)
self.Xdata['stellar_mass'] = self._data['stellar_mass']/(self.cosmology.h)**2
self.Xdata['x'] = self._data['x']/(self.cosmology.h)
self.Xdata['y'] = self._data['y']/(self.cosmology.h)
self.Xdata['z'] = self._data['z']/(self.cosmology.h)
self.Xdata['halo_mvir'] = self._data['halo_mvir']/(self.cosmology.h)
self.Xdata['absmag_u'] = self._data['absmag_u'] + 5.0*np.log10(self.cosmology.h)
self.Xdata['absmag_g'] = self._data['absmag_g'] + 5.0*np.log10(self.cosmology.h)
self.Xdata['absmag_r'] = self._data['absmag_r'] + 5.0*np.log10(self.cosmology.h)
self.Xdata['absmag_i'] = self._data['absmag_i'] + 5.0*np.log10(self.cosmology.h)
self.Xdata['absmag_z'] = self._data['absmag_z'] + 5.0*np.log10(self.cosmology.h)
#I think this is the correct thing to do with apparent magnitudes
self.Xdata['mag_u'] = self._data['mag_u'] - 5.0*np.log10(self.cosmology.h)
self.Xdata['mag_g'] = self._data['mag_g'] - 5.0*np.log10(self.cosmology.h)
self.Xdata['mag_r'] = self._data['mag_r'] - 5.0*np.log10(self.cosmology.h)
self.Xdata['mag_i'] = self._data['mag_i'] - 5.0*np.log10(self.cosmology.h)
self.Xdata['mag_z'] = self._data['mag_z'] - 5.0*np.log10(self.cosmology.h)
#how many galaxies are in the catalog?
self.Ngals = len(self._data)
return self
def _construct_mask(self, filters):
"""
Construct a mask array for use in filtering the catalog.
Parameters
----------
filters: dict
dictionary of filter constraints
Returns
-------
mask : numpy.array
numpy array boolean mask
"""
#check that filters is of the correct type
if type(filters) is not dict:
msg = ('filters must be given as a dictionary type.')
raise TypeError(msg)
#initialize filter
mask = np.ones((self.Ngals), dtype=bool)
#generate boolean mask
for filter_name in filters.keys():
#place code here to create filter(s)
pass
return mask
def _get_stored_property(self, quantity, filters):
"""
Return the requested property of galaxies in the mock catalog.
Parameters
----------
quantity : string
key into mock galaxy catalogue of galaxy property
filters : dict
dictionary of filter constraints
Returns
-------
property : numpy.array
numpy array of requested property from the catalogue
"""
#build filter
filter_mask = self._construct_mask(filters)
#return requested data as an array
return self.Xdata[quantity][np.where(filter_mask)]
def _stored_property_wrapper(self, name):
"""
private function used to translate desc keywords into stored keywords in the mock
Parameters
----------
name : string
key into stored mock catalogue
"""
return (lambda quantity, filter : self._get_stored_property(name, filter))
| 7,150
| 36.835979
| 100
|
py
|
descqa
|
descqa-master/v1/GCRCatalogs/MB2GalaxyCatalog.py
|
# Massive Black 2 galaxy catalog class
import numpy as np
from astropy.table import Table
import astropy.units as u
import astropy.cosmology
from .GalaxyCatalogInterface import GalaxyCatalog
class MB2GalaxyCatalog(GalaxyCatalog):
"""
Massive Black 2 galaxy catalog class.
"""
def __init__(self, **kwargs):
fn = kwargs.get('fn')
self.type_ext = 'MB2'
self.filters = {
'zlo': True,
'zhi': True
}
self.h = 0.702
self.cosmology = astropy.cosmology.FlatLambdaCDM(H0=self.h*100.0, Om0 = 0.275)
self.quantities = {
'halo_id': self._get_stored_property,
'parent_halo_id': self._get_stored_property,
'redshift': self._get_stored_property,
'positionX': self._get_derived_property, # Position returned in Mpc, stored in kpc/h
'positionY': self._get_derived_property,
'positionZ': self._get_derived_property,
'velocityX': self._get_stored_property, # Velocity returned in km/sec
'velocityY': self._get_stored_property, # Velocity returned in km/sec
'velocityZ': self._get_stored_property, # Velocity returned in km/sec
'mass': self._get_derived_property, # Masses returned in Msun but stored in 1e10 Msun/h
'stellar_mass': self._get_derived_property,
'gas_mass': self._get_stored_property,
'sfr': self._get_stored_property,
'SDSS_u:observed:': self._get_derived_property,
'SDSS_g:observed:': self._get_derived_property,
'SDSS_r:observed:': self._get_derived_property,
'SDSS_i:observed:': self._get_derived_property,
'SDSS_z:observed:': self._get_derived_property,
'SDSS_u:rest:': self._get_derived_property,
'SDSS_g:rest:': self._get_derived_property,
'SDSS_r:rest:': self._get_derived_property,
'SDSS_i:rest:': self._get_derived_property,
'SDSS_z:rest:': self._get_derived_property,
}
self.derived = {
'mass': (('mass',), (1.e10 / self.h,), self._multiply),
'stellar_mass': (('stellar_mass',), (1.e10 / self.h,), self._multiply),
'positionX': (('x',), (1.e-3 / self.h,), self._multiply), # Position stored in kpc/h
'positionY': (('y',), (1.e-3 / self.h,), self._multiply),
'positionZ': (('z',), (1.e-3 / self.h,), self._multiply),
'SDSS_u:rest:': (('SDSS_u:rest:',), (), self._luminosity_to_magnitude),
'SDSS_g:rest:': (('SDSS_g:rest:',), (), self._luminosity_to_magnitude),
'SDSS_r:rest:': (('SDSS_r:rest:',), (), self._luminosity_to_magnitude),
'SDSS_i:rest:': (('SDSS_i:rest:',), (), self._luminosity_to_magnitude),
'SDSS_z:rest:': (('SDSS_z:rest:',), (), self._luminosity_to_magnitude),
'SDSS_u:observed:': (('SDSS_u:rest:', 'redshift'), (), self._add_distance_modulus),
'SDSS_g:observed:': (('SDSS_g:rest:', 'redshift'), (), self._add_distance_modulus),
'SDSS_r:observed:': (('SDSS_r:rest:', 'redshift'), (), self._add_distance_modulus),
'SDSS_i:observed:': (('SDSS_i:rest:', 'redshift'), (), self._add_distance_modulus),
'SDSS_z:observed:': (('SDSS_z:rest:', 'redshift'), (), self._add_distance_modulus),
}
self.Ngals = 0
self.sky_area = 4.*np.pi*u.sr # all sky by default
self.lightcone = False
self.box_size = 100.0 / self.h
self.SDSS_kcorrection_z = 0.1
return GalaxyCatalog.__init__(self, fn)
def load(self, fn):
"""
Given a catalog path, attempt to read the catalog and set up its
internal data structures.
"""
self.catalog = Table.read(fn, path='data')
self.Ngals = len(self.catalog)
self.redshift = self.catalog['redshift'][0]
return self
def _construct_mask(self, filters):
"""
Given a dictionary of filter constraints, construct a mask array
for use in filtering the catalog.
"""
if type(filters) is not dict:
raise TypeError("construct_mask: filters must be given as dict")
mask = np.ones(self.Ngals, dtype=bool)
mask &= (np.isfinite(self.catalog['x'])) # filter out NaN positions from catalog
mask &= (np.isfinite(self.catalog['y']))
mask &= (np.isfinite(self.catalog['z']))
for filter_name in filters.keys():
if filter_name == 'zlo':
mask &= (filters[filter_name] < self.catalog['redshift'])
elif filter_name == 'zhi':
mask &= (filters[filter_name] > self.catalog['redshift'])
return mask
def _get_stored_property(self, quantity, filters):
"""
Return the requested property of galaxies in the catalog as a NumPy
array. This is for properties that are explicitly stored in the
catalog.
"""
filter_mask = self._construct_mask(filters)
if not filter_mask.any():
return np.array([])
return self.catalog[quantity][np.where(filter_mask)].data
def _get_derived_property(self, quantity, filters):
"""
Return a derived halo property. These properties aren't stored
in the catalog but can be computed from properties that are via
a simple function call.
"""
filter_mask = self._construct_mask(filters)
if not filter_mask.any():
return np.array([])
arrays_required, scalars, func = self.derived[quantity]
return func([self.catalog[name][np.where(filter_mask)].data for name in arrays_required], scalars)
# Functions for computing derived values
def _translate(self, propList):
"""
Translation routine -- a passthrough that accomplishes mapping of
derived quantity names to stored quantity names via the derived
property function mechanism.
"""
return propList
def _multiply(self, array_tuple, scalar_tuple):
"""
Multiplication routine -- derived quantity is equal to a stored
quantity times some factor. Additional args for the derived quantity
routines are passed in as a tuple, so extract the factor first.
"""
return array_tuple[0] * scalar_tuple[0]
def _add_distance_modulus(self, array_tuple, scalar_tuple):
return self._luminosity_to_magnitude(array_tuple,scalar_tuple) + self.cosmology.distmod(array_tuple[1]).value
def _luminosity_to_magnitude(self,array_tuple,scalar_tuple):
bandlum = array_tuple[0]*1.0e28
bandflux = bandlum/(4*(np.pi)*(1.0e38)*(3.08567758**2))
return -2.5*(np.log10(bandflux)) - 48.6
| 7,881
| 51.198675
| 134
|
py
|
descqa
|
descqa-master/v1/GCRCatalogs/GalaxyCatalogInterface.py
|
# DESCQA galaxy catalog interface. This defines the GalaxyCatalog base class
# and, on import, registers all of the available catalog readers. Convenience
# functions are defined that enable automatic detection of the appropriate
# catalog type.
# Note: right now we are working with galaxy properties as floats, with
# expected return units listed in GalaxyCatalog.__init__ below. In the future
# we might move to expecting values as Astropy Quantity objects.
__all__ = ['GalaxyCatalog']
import os
import numpy as np
import astropy.units as u
# Galaxy catalog base class.
class GalaxyCatalog(object):
"""
Base class for galaxy catalog classes. Common internal data structures:
type_ext A string giving the file name extension, for catalogs that use
the default method for determining file type.
filters A dictionary whose keys are strings giving the names of
filters supported by the catalog class, and whose values are
the methods used to apply these constraints (or True if they
are supported but handled via a different mechanism). The
default implementation sets this dictionary to include keys
that should be supported by all catalogs.
quantities A dictionary whose keys are strings giving the names of
quantities that can be requested from the catalog, and whose
values are the methods used to request these quantities. The
methods should take two arguments: the name of the quantity
and a dictionary containing the filters to be applied and the
values for the filters. The default implementation sets this
dictionary to include keys that should be supported by all
catalogs.
sky_area The sky area covered by the catalog as an Astropy Quantity
object.
cosmology Should be set by load routines to an Astropy.cosmology object
encoding the cosmology used to generate the catalog. This
allows calling programs to compute things like comoving
volumes appropriately. None by default.
"""
type_ext = ''
filters = {'zlo' : None, # min redshift
'zhi' : None # max redshift
}
quantities = {'stellar_mass' : None # stellar mass in M_sun
}
sky_area = 4.*np.pi*u.sr # all sky by default
cosmology = None
def __init__(self, fn=None):
"""
Default GalaxyCatalog constructor takes one optional filename argument.
If present, the referenced catalog is checked for validity and loaded
if possible. If it is not valid, a ValueError is raised. If no argument
is given, an instance of the class is created without internal data.
Subclass __init__ methods that override this one should call this one
just before they return.
"""
if fn:
if self.is_valid(fn):
self.load(fn)
else:
raise ValueError('invalid catalog file')
def is_valid(self, fn):
"""
Given a catalog path, determine whether it is a valid catalog of this
type. The default implementation merely checks the filename extension
against the type_ext attribute of the class.
"""
base = os.path.basename(fn)
ext = base.split('.')[-1]
return (ext == self.type_ext)
def load(self, fn):
"""
Given a catalog path, attempt to read the catalog and set up its
internal data structures. Should return self if successful.
"""
return self
def get_cosmology(self):
"""
Return as an Astropy.cosmology object the cosmological parameter values
assumed in generating this catalog.
"""
return self.cosmology
def get_sky_area(self):
"""
Return the sky area covered by the catalog as an Astropy Quantity.
"""
return self.sky_area
def get_quantities(self, ids, filters):
"""
Given a list of string property names and optional filter arguments,
return as a list of NumPy arrays the selected values from the catalog.
A single property name can also be passed, in which case the result
is a single NumPy array. Filters are specified using a dictionary in
which the keys are string constraint names and the values are the
constraints.
"""
if type(ids) is list:
idList = ids
elif isinstance(ids, basestring):
idList = [ids]
else:
raise TypeError("get_quantities: ids must be list or str")
if type(filters) != dict:
raise TypeError("get_quantities: filters must be dict")
okQuantities = self.get_supp_quantities()
for quantity in idList:
if quantity not in okQuantities:
raise ValueError("get_quantities: quantity '%s' not supported" % quantity)
okFilters = self.get_supp_filters()
for filt in filters.keys():
if filt not in okFilters:
raise ValueError("get_quantities: filter '%s' not supported" % filt)
results = []
for quantity in idList:
quantityGetter = self.quantities[quantity]
results.append(quantityGetter(quantity, filters))
if type(ids) == list:
return results
else:
return results[0]
def get_supp_filters(self):
"""
Return a list containing the supported filter keywords for this
catalog.
"""
return self.filters.keys()
filterList = self.filters.keys()
filterListOK = []
for filt in filterList:
if self.filters[filt]:
filterListOK.append(filt)
return filterListOK
def get_supp_quantities(self):
"""
Return a list containing the supported quantities for this
catalog.
"""
quantityList = self.quantities.keys()
quantityListOK = []
for quantity in quantityList:
if self.quantities[quantity]:
quantityListOK.append(quantity)
return quantityListOK
| 6,439
| 38.268293
| 90
|
py
|
descqa
|
descqa-master/v1/GCRCatalogs/config.py
|
__all__ = ['base_catalog_dir']
base_catalog_dir = '/global/cfs/cdirs/lsst/groups/CS/descqa/catalog'
| 100
| 32.666667
| 68
|
py
|
descqa
|
descqa-master/v1/GCRCatalogs/GalacticusGalaxyCatalog.py
|
# Argonne galaxy catalog class.
import os
import re
import numpy as np
import h5py
import astropy.cosmology
import astropy.units as u
from .GalaxyCatalogInterface import GalaxyCatalog
class GalacticusGalaxyCatalog(GalaxyCatalog):
"""
Argonne galaxy catalog class. Uses generic quantity and filter mechanisms
defined by GalaxyCatalog base class. In addition, implements the use of
'stored' vs. 'derived' quantity getter methods. Additional data structures:
catalog A dictionary whose keys are halo names and whose values are
themselves dictionaries. The halo dictionaries have as keys
the names of the various stored properties, and as values
arrays containing the values of these quantities for each of
the galaxies in the halos.
derived A dictionary whose keys are the names of derived quantities
and whose values are tuples containing the string name of a
corresponding stored quantity (actually present in the file)
and a pointer to the function used to compute the derived
quantity from the stored one. Some catalogs may support
having the stored quantity be a tuple of stored quantity
names.
"""
Output='Output'
Outputs='Outputs'
z='z'
def __init__(self, **kwargs):
self.kwargs = kwargs
self.type_ext = 'galacticus'
self.filters = { 'zlo': True,
'zhi': True
}
self.quantities = { 'redshift': self._get_stored_property,
'ra': self._get_stored_property,
'dec': self._get_stored_property,
'v_pec': self._get_stored_property,
'mass': self._get_derived_property,
'age': self._get_stored_property,
'stellar_mass': self._get_derived_property,
'log_stellarmass': self._get_stored_property,
'log_halomass': self._get_stored_property,
'gas_mass': self._get_stored_property,
'metallicity': self._get_stored_property,
'sfr': self._get_stored_property,
'ellipticity': self._get_stored_property,
#'positionX': self._get_derived_property, #units are in physical Mpc
#'positionY': self._get_derived_property,
#'positionZ': self._get_derived_property,
'positionX': self._get_stored_property, #units are now in comoving Mpc
'positionY': self._get_stored_property,
'positionZ': self._get_stored_property,
'velocityX': self._get_stored_property,
'velocityY': self._get_stored_property,
'velocityZ': self._get_stored_property,
'parent_halo_id': self._get_stored_property,
'disk_ra': self._get_stored_property,
'disk_dec': self._get_stored_property,
'disk_sigma0': self._get_stored_property,
'disk_re': self._get_stored_property,
'disk_index': self._get_stored_property,
'disk_a': self._get_stored_property,
'disk_b': self._get_stored_property,
'disk_theta_los': self._get_stored_property,
'disk_phi': self._get_stored_property,
'disk_stellarmass': self._get_derived_property,
'log_disk_stellarmass': self._get_stored_property,
'disk_metallicity': self._get_stored_property,
'disk_age': self._get_stored_property,
'disk_sfr': self._get_stored_property,
'disk_ellipticity': self._get_stored_property,
'bulge_ra': self._get_stored_property,
'bulge_dec': self._get_stored_property,
'bulge_sigma0': self._get_stored_property,
'bulge_re': self._get_stored_property,
'bulge_index': self._get_stored_property,
'bulge_a': self._get_stored_property,
'bulge_b': self._get_stored_property,
'bulge_theta_los': self._get_stored_property,
'bulge_phi': self._get_stored_property,
'bulge_stellarmass': self._get_derived_property,
'log_bulge_stellarmass': self._get_stored_property,
'bulge_age': self._get_stored_property,
'bulge_sfr': self._get_stored_property,
'bulge_metallicity': self._get_stored_property,
'bulge_ellipticity': self._get_stored_property,
'agn_ra': self._get_stored_property,
'agn_dec': self._get_stored_property,
'agn_mass': self._get_stored_property,
'agn_accretnrate': self._get_stored_property,
'SDSS_u:rest:': self._get_stored_property,
'SDSS_g:rest:': self._get_stored_property,
'SDSS_r:rest:': self._get_stored_property,
'SDSS_i:rest:': self._get_stored_property,
'SDSS_z:rest:': self._get_stored_property,
'SDSS_u:observed:': self._get_stored_property,
'SDSS_g:observed:': self._get_stored_property,
'SDSS_r:observed:': self._get_stored_property,
'SDSS_i:observed:': self._get_stored_property,
'SDSS_z:observed:': self._get_stored_property,
'DES_g:rest:': None,
'DES_r:rest:': None,
'DES_i:rest:': None,
'DES_z:rest:': None,
'DES_Y:rest:': None,
'DES_g:observed:': None,
'DES_r:observed:': None,
'DES_i:observed:': None,
'DES_z:observed:': None,
'DES_Y:observed:': None,
'LSST_u:rest:': None,
'LSST_g:rest:': None,
'LSST_r:rest:': None,
'LSST_i:rest:': None,
'LSST_z:rest:': None,
'LSST_y4:rest:': None,
'LSST_u:observed:': None,
'LSST_g:observed:': None,
'LSST_r:observed:': None,
'LSST_i:observed:': None,
'LSST_z:observed:': None,
'LSST_y4:observed:': None,
'B': None,
'U': None,
'V': None,
'CFHTL_g:rest:': None,
'CFHTL_r:rest:': None,
'CFHTL_i:rest:': None,
'CFHTL_z:rest:': None,
'CFHTL_g:observed:': None,
'CFHTL_r:observed:': None,
'CFHTL_i:observed:': None,
'CFHTL_z:observed:': None,
}
self.derived = {'stellar_mass': ('log_stellarmass', self._unlog10),
'mass': ('log_halomass', self._unlog10),
'disk_stellarmass': ('log_disk_stellarmass', self._unlog10),
'bulge_stellarmass': ('log_bulge_stellarmass', self._unlog10)}
self.catalog = {}
self.sky_area = 4.*np.pi*u.sr # all sky by default
self.cosmology = None
self.lightcone = False
self.box_size = None
self.outkeys = []
self.zvalues = []
self.load_init() #get catalog output keys only
def load_init(self):
"""
Given a catalog path, attempt to read the catalog output groups and cosmological parameters
"""
fn = os.path.join(self.kwargs['base_catalog_dir'],self.kwargs['filename'])
hdfFile = h5py.File(fn, 'r')
hdfKeys, self.hdf5groups = self._gethdf5group(hdfFile)
self.outkeys=sorted([key for key in hdfKeys if key.find(self.Output)!=-1],key=self.stringSplitByIntegers)
self.zvalues=self.getzvalues(self.outkeys)
allowed_parkeys = ['parameters','Parameters']
for key in allowed_parkeys:
if (key in hdfKeys):
mydict = self._gethdf5attributes(hdfFile, key)
self.cosmology = astropy.cosmology.LambdaCDM(H0 = mydict['H_0'],
Om0 = mydict['Omega_Matter'],
Ode0 = mydict['Omega_DE'])
self.box_size=mydict['boxSize'] #already in Mpc
self.sigma_8=mydict['sigma_8']
self.n_s=mydict['N_s']
self.redshift = [] #empty until values requested by test
self.catalog = {} #init empty catalog
#kluge for color test on snapshot
self.SDSS_kcorrection_z=.05
# TODO: how to get sky area?
hdfFile.close()
def load(self,hdfKeys=[]):
"""
Given a catalog path, attempt to read the catalog and set up its
internal data structures.
"""
if len(self.outkeys)==0:
self.load_init()
#check for requested keys and use all keys as default
if(len(hdfKeys)==0):
hdfKeys=self.outkeys
fn = os.path.join(self.kwargs['base_catalog_dir'],self.kwargs['filename'])
hdfFile = h5py.File(fn, 'r')
#print "load using keys: ", hdfKeys
#loop over requested keys and fetch those not already loaded
for key in hdfKeys:
if 'Output' in key and not(key in self.catalog.keys()): #check key, check if already loaded
outgroup = hdfFile[key]
dataKeys, dataAttrs = self._gethdf5group(outgroup)
self.catalog[key] = self._gethdf5arrays(outgroup)
hdfFile.close()
return
# Functions for applying filters
#check this function to see if really necessary
def _check_halo(self, halo, filters):
"""
Apply the requested filters to a given halo and return True if it
passes them all, False if not.
"""
status = True
if type(filters) is not dict:
raise TypeError("check_halo: filters must be given as dict")
for filter_name in filters.keys():
if filter_name == 'zlo':
try:
zmax = max(halo['redshift'])
status = status and (zmax >= filters[filter_name])
except KeyError:
status = False
elif filter_name == 'zhi':
try:
zmin = min(halo['redshift'])
status = status and (zmin <= filters[filter_name])
except KeyError:
status = False
return status
# Functions for returning quantities from the catalog
def _getfiltered_outkeys(self,filters):
outkeys=[]
zvalues=[]
for z,outkey in zip(self.zvalues, self.outkeys):
if z > filters.get('zlo',-0.01) and z < filters.get('zhi',9999.):
outkeys.append(outkey)
zvalues.append(z)
return outkeys, zvalues
def _get_stored_property(self, quantity, filters):
"""
Return the requested property of galaxies in the catalog as a NumPy
array. This is for properties that are explicitly stored in the
catalog.
"""
props = []
outkeys, zvalues = self._getfiltered_outkeys(filters)
#get any data that hasn't been loaded
self.load(hdfKeys=outkeys)
if (len(outkeys)>0):
self.redshift=zvalues
for outkey in outkeys:
outdict = self.catalog[outkey]
if self._check_halo(outdict, filters):
if quantity in outdict.keys():
props.extend(outdict[quantity])
else:
raise ValueError('No catalog outputs available for redshifts requested')
return np.asarray(props)
def _get_derived_property(self, quantity, filters):
"""
Return a derived halo property. These properties aren't stored
in the catalog but can be computed from properties that are via
a simple function call.
"""
#print "in get_derived_property: ", quantity, filters
props = []
#if 'position' in quantity:
# return np.asarray()
stored_qty_rec = self.derived[quantity]
stored_qty_name = stored_qty_rec[0]
stored_qty_fctn = stored_qty_rec[1]
#print 'stored_qty:', stored_qty_name, stored_qty_fctn
outkeys, zvalues = self._getfiltered_outkeys(filters)
#get any data that hasn't been loaded
self.load(hdfKeys=outkeys)
if (len(outkeys)>0):
self.redshift=zvalues
for outkey in outkeys:
outdict = self.catalog[outkey]
if self._check_halo(outdict, filters):
#if type(stored_qty_name) is tuple and stored_qty_name[0] in outdict.keys():
# values = outdict[stored_qty_name[0]]
# props.extend(stored_qty_fctn(values, stored_qty_name[1:]))
#else:
if stored_qty_name in outdict.keys():
props.extend(stored_qty_fctn( outdict[stored_qty_name] ))
else:
print ('No catalog outputs available for redshifts requested')
return np.asarray(props)
# Functions for computing derived values
def _unlog10(self, propList):
"""
Take a list of numbers and return 10.**(the numbers).
"""
result = []
for value in propList:
result.append(10.**value)
return result
# HDF5 utility routines
def _gethdf5keys(self,id,*args):
keys=id.keys()
keylist=[str(x) for x in keys]
return keylist
def _gethdf5attributes(self,id,key,*args):
#Return dictionary with group attributes and values
group=id[key]
mydict={}
for item in group.attrs.items():
attribute=str(item[0])
mydict[attribute]=item[1]
#endfor
return mydict
def _gethdf5group(self,group,*args):
#return dictionary of (sub)group dictionaries
groupkeys=self._gethdf5keys(group)
groupdict={}
for key in groupkeys:
mydict=self._gethdf5attributes(group,key)
groupdict[str(key)]=mydict
#endfor
return groupkeys,groupdict
def _gethdf5arrays(self,group,*args):
groupkeys=self._gethdf5keys(group)
arraydict={}
oldlen=-1
for key in groupkeys:
array=np.array(group[key])
arraylen=len(array)
if(oldlen>-1): #check that array length is unchanged
if(oldlen!=arraylen):
print "Warning: hdf5 array length changed for key",key
#endif
else:
oldlen=arraylen #set to ist array length
#endif
arraydict[str(key)]=array
#endfor
return arraydict
def _multiply(self, propList, factor_tuple):
"""
Multiplication routine -- derived quantity is equal to a stored
quantity times some factor. Additional args for the derived quantity
routines are passed in as a tuple, so extract the factor first.
"""
#print "in _multiply: ", propList, " ; factor_tuple: ", factor_tuple
factor = factor_tuple[0]
return propList * factor
def _add(self, propList):
"""
Routine that returns element-wise addition of two arrays.
"""
x = sum(propList)
return x
def stringSplitByIntegers(self,x):
r = re.compile('(\d+)')
l = r.split(x)
return [int(y) if y.isdigit() else y for y in l]
def getzvalues(self,outkeys,hdf5groups=None):
myname=self.getzvalues.__name__
zvalues=[]
if(type(outkeys)==str): #create list if necessary
outkeys=[outkeys]
#endif
if(hdf5groups is None):
hdf5groups=self.hdf5groups
#endif
for outkey in outkeys:
if(hdf5groups.has_key(outkey)):
if(outkey.find(self.Output)!=-1):
if (hdf5groups[outkey].has_key(self.z)):
outputz=hdf5groups[outkey][self.z]
elif(hdf5groups[outkey].has_key("z")):
outputz=hdf5groups[outkey]["z"]
else:
print("Missing attribute",self.z)
#elif (hdf5groups.has_key(self.Outputs)):
# outputz=hdf5groups[self.Outputs][outkey][self.outputRedshift]
else:
print("Unknown catalog key",outkey)
#endif
zvalues.append(outputz)
#endfor
return np.asarray(zvalues)
| 19,154
| 44.176887
| 113
|
py
|
descqa
|
descqa-master/v1/GCRCatalogs/SHAMGalaxyCatalog.py
|
# SHAM galaxy catalog class
# Contact: Yao-Yuan Mao <yymao.astro@gmail.com>
import os
import numpy as np
from astropy.cosmology import FlatLambdaCDM
#from GalaxyCatalogInterface import GalaxyCatalog
GalaxyCatalog = object
class SHAMGalaxyCatalog(GalaxyCatalog):
"""
SHAM galaxy catalog class.
"""
def __init__(self, redshift=0.062496, match_to='LiWhite', **kwargs):
if match_to not in ('LiWhite', 'MBII'):
raise ValueError('`match_to` must be "LiWhite" or "MBII"')
self.match_to = match_to
self.redshift = redshift
self.scale = 1.0/(1.0+self.redshift)
self.base_catalog_dir = kwargs['base_catalog_dir']
self.filename = os.path.join(self.base_catalog_dir, 'SHAM_{:.5f}_{}.npz'.format(self.scale, self.match_to))
if not os.path.isfile(self.filename):
raise ValueError('{} does not exist!'.format(self.filename))
self.npz_file = np.load(self.filename)
self.data_cache = {}
self.cosmology = FlatLambdaCDM(H0=70.2, Om0=0.275, Ob0=0.046)
self._h = self.cosmology.H0.value / 100.0
self._distmod = self.cosmology.distmod(self.redshift).value
self.box_size = (100.0/self._h)
self.overdensity = 97.7
self.lightcone = False
self.SDSS_kcorrection_z = 0.0
self.quantities = { 'redshift': ('redshift', None),
'stellar_mass': ('sm', None),
'halo_id': ('id', None),
'parent_halo_id': ('upid', None),
'positionX': ('x', lambda x: x/self._h),
'positionY': ('y', lambda x: x/self._h),
'positionZ': ('z', lambda x: x/self._h),
'velocityX': ('vx', None),
'velocityY': ('vy', None),
'velocityZ': ('vz', None),
'mass': ('mvir', lambda x: x/self._h),
'SDSS_u:observed:': ('AMAG[0]', lambda x: x+self._distmod),
'SDSS_g:observed:': ('AMAG[1]', lambda x: x+self._distmod),
'SDSS_r:observed:': ('AMAG[2]', lambda x: x+self._distmod),
'SDSS_i:observed:': ('AMAG[3]', lambda x: x+self._distmod),
'SDSS_z:observed:': ('AMAG[4]', lambda x: x+self._distmod),
'SDSS_u:rest:': ('AMAG[0]', None),
'SDSS_g:rest:': ('AMAG[1]', None),
'SDSS_r:rest:': ('AMAG[2]', None),
'SDSS_i:rest:': ('AMAG[3]', None),
'SDSS_z:rest:': ('AMAG[4]', None),
}
def get_quantities(self, quantities, filters={}):
if isinstance(quantities, basestring):
quantities = [quantities]
if not quantities:
raise ValueError('quantities cannot be empty')
if not all(q in self.quantities for q in quantities):
raise ValueError('Some quantities are not available in this catalog')
if self.redshift < filters.get('zlo', -np.inf) or self.redshift > filters.get('zhi', np.inf):
result = [np.array([]) for _ in quantities]
else:
result = []
for q in quantities:
if q in self.data_cache:
result.append(self.data_cache[q])
else:
key, func = self.quantities[q]
d = func(self.npz_file[key]) if callable(func) else self.npz_file[key]
self.data_cache[q] = d
result.append(d)
return result if len(result) > 1 else result[0]
| 3,934
| 44.755814
| 115
|
py
|
descqa
|
descqa-master/v1/GCRCatalogs/SAGGalaxyCatalog.py
|
#! /usr/bin/env python
import os
import h5py
import numpy as np
import astropy.cosmology
import astropy.units as u
from .GalaxyCatalogInterface import GalaxyCatalog
class SAGGalaxyCatalog(GalaxyCatalog):
"""
Semi-Analytic Galaxies (SAG) model galaxy catalog class. Uses generic
quantity and filter mechanisms defined by GalaxyCatalog base class. In
addition, implements the use of 'stored' vs. 'derived' quantity getter
methods.
Additional data structures:
catalog A dictionary whose keys are the names of the various stored
properties, and whose values are arrays containing the values
of these quantities for each of the galaxies in the catalog.
derived A dictionary whose keys are the names of derived quantities
and whose values are tuples containing the string name of a
corresponding stored quantity (actually present in the file)
and a pointer to the function used to compute the derived
quantity from the stored one. Some catalogs may support
having the stored quantity be a tuple of stored quantity
names.
"""
def __init__(self, **kwargs):
fn = kwargs.get('fn')
self.type_ext = 'sag'
self.filters = { 'zlo': True,
'zhi': True
}
self.quantities = {
'positionX' : self._get_derived_property,
'positionY' : self._get_derived_property,
'positionZ' : self._get_derived_property,
'velocityX' : self._stored_property_wrapper('Vx'),
'velocityY' : self._stored_property_wrapper('Vy'),
'velocityZ' : self._stored_property_wrapper('Vz'),
'stellar_mass' : self._get_derived_property,
'mass' : self._get_derived_property,
'parent_halo_id' : self._get_derived_property,
'LSST_u:rest:' : self._stored_property_wrapper('SED/Magnitudes/Mag_id224_AB_tot_r'),
'LSST_g:rest:' : self._stored_property_wrapper('SED/Magnitudes/Mag_id225_AB_tot_r'),
'LSST_r:rest:' : self._stored_property_wrapper('SED/Magnitudes/Mag_id226_AB_tot_r'),
'LSST_i:rest:' : self._stored_property_wrapper('SED/Magnitudes/Mag_id227_AB_tot_r'),
'LSST_z:rest:' : self._stored_property_wrapper('SED/Magnitudes/Mag_id228_AB_tot_r'),
'LSST_y:rest:' : self._stored_property_wrapper('SED/Magnitudes/Mag_id229_AB_tot_r'),
'LSST_u:observed:' : self._stored_property_wrapper('SED/Magnitudes/Mag_id224_AB_tot_o'),
'LSST_g:observed:' : self._stored_property_wrapper('SED/Magnitudes/Mag_id225_AB_tot_o'),
'LSST_r:observed:' : self._stored_property_wrapper('SED/Magnitudes/Mag_id226_AB_tot_o'),
'LSST_i:observed:' : self._stored_property_wrapper('SED/Magnitudes/Mag_id227_AB_tot_o'),
'LSST_z:observed:' : self._stored_property_wrapper('SED/Magnitudes/Mag_id228_AB_tot_o'),
'LSST_y:observed:' : self._stored_property_wrapper('SED/Magnitudes/Mag_id229_AB_tot_o'),
'SDSS_u:rest:' : self._quantity_alias('LSST_u:rest:'),
'SDSS_g:rest:' : self._quantity_alias('LSST_g:rest:'),
'SDSS_r:rest:' : self._quantity_alias('LSST_r:rest:'),
'SDSS_i:rest:' : self._quantity_alias('LSST_i:rest:'),
'SDSS_z:rest:' : self._quantity_alias('LSST_z:rest:'),
'SDSS_u:observed:' : self._quantity_alias('LSST_u:observed:'),
'SDSS_g:observed:' : self._quantity_alias('LSST_g:observed:'),
'SDSS_r:observed:' : self._quantity_alias('LSST_r:observed:'),
'SDSS_i:observed:' : self._quantity_alias('LSST_i:observed:'),
'SDSS_z:observed:' : self._quantity_alias('LSST_z:observed:'),
}
self.SDSS_kcorrection_z = 0.0
self.derived = {
'positionX' : (('X',), lambda x: x*1.0e-3, -1.0),
'positionY' : (('Y',), lambda x: x*1.0e-3, -1.0),
'positionZ' : (('Z',), lambda x: x*1.0e-3, -1.0),
'stellar_mass' : (('M_star_disk', 'M_star_bulge'), np.add, -1.0),
'mass' : (('Halo/M200c',), None, -1.0),
'parent_halo_id' : (('Galaxy_Type',), lambda x: np.where(x==0, -1, 100), None),
}
self.catalog = None
self.sky_area = 4.0 * np.pi * u.sr # All sky by default
self.h = None
self.cosmology = None
self.lightcone = False
return GalaxyCatalog.__init__(self, fn)
def load(self, fn):
"""
Given a catalog path, attempt to read the catalog and set up its
internal data structures.
"""
self.catalog = SAGcollection(fn)
self.h = self.catalog.readAttr('Hubble_h')[0]
self.box_size = float(self.catalog.boxSizeMpc)/self.h
self.cosmology = astropy.cosmology.LambdaCDM(H0 = self.h*100.0,
Om0 = self.catalog.readAttr('Omega')[0],
Ode0 = self.catalog.readAttr('OmegaLambda')[0])
# turam added: use first redshift
self.redshift = self.catalog.redshift[0]
return self
def _get_stored_property(self, quantity, filters):
"""
Return the requested property of galaxies in the catalog as a NumPy
array. This is for properties that are explicitly stored in the
catalog.
"""
#zrange = [filters['zlo'], filters['zhi']] if 'zlo' in filters and 'zhi' in filters else None
zrange = None #TODO: should fix this
return self.catalog.readDataset(dsname=quantity, zrange=zrange).flatten()
def _get_derived_property(self, quantity, filters):
"""
Return a derived halo property. These properties aren't stored
in the catalog but can be computed from properties that are via
a simple function call.
"""
stored_keys, convert_func, h_factor = self.derived[quantity]
if not hasattr(convert_func, '__call__'):
convert_func = lambda x: x
output = convert_func(*(self._get_stored_property(key, filters) for key in stored_keys))
if h_factor:
output *= (self.h**h_factor)
return output
def _stored_property_wrapper(self, name):
"""
private function used to translate desc keywords into stored keywords in the mock
Parameters
----------
name : string
key into stored mock catalogue
"""
return (lambda quantity, filter : self._get_stored_property(name, filter))
def _quantity_alias(self, name):
"""
private function used to alias a desc keyword into another existing quantity keyword
Parameters
----------
name : string
name to alias
"""
return (lambda quantity, filter : self.quantities[name](quantity, filter))
class SAGcollection():
"""
A collection of SAGdata objects of SAG. It assumes the outputs are ordered in
different directories (one per snapshot/redshift).
"""
def __init__(self, filename, boxSizeMpc=0):
"""
It creates a new catalog collection from a specified directory
"""
self.dataList = []
self.snaptag = []
self.nfiles = []
self.redshift = []
self.nz = 0
self.boxSizeMpc = 0
self.zminidx = -1
# turam : Disable this path munging: for DESCQA we are passing in the
# directory with a ".sag" extension to trigger the reader instead
# of the name of an individual hdf5 file; for example:
# filename was: sag_directory/snapshot/file.hdf5
# filename under DESCQA: sag_directory
#filename = os.path.split(os.path.split(filename)[0])[0]
print(filename)
if 0 != boxSizeMpc:
simname = 'SAG_sim'
self.boxSizeMpc = boxSizeMpc
else:
# This file must exist!
simdat = open(filename+"/simdata.txt")
simname = simdat.readline()
self.boxSizeMpc = simdat.readline()
simdat.close()
ls = os.listdir(filename)
ls.sort()
for name in ls:
if name.split("_")[0] == "snapshot":
snap = name.split("_")[1]
lss = os.listdir(filename+"/"+name)
lss.sort()
filesindir = 0
for h5name in lss:
if h5name.split(".")[-1] in ['hdf5','h5'] and \
h5name.split("_")[0] == 'gal':
# This is a SAG file!:
filesindir += 1
if filesindir == 1: ## add a new redshift to the collection:
self.dataList.append(SAGdata(simname, self.boxSizeMpc))
self.snaptag.append(snap)
self.nfiles.append(0)
self.nz += 1
sagname = filename+'/'+name+'/'+h5name
print('Opening file '+sagname)
self.dataList[self.nz-1].addFile(sagname)
self.nfiles[self.nz-1] += 1
# and the corresponding redshifts:
for i in range(self.nz):
self.redshift.append(float(self.dataList[i].readAttr('Redshift')))
# If the outputs are not ordered in subfolders, then they are mixed up:
filesindir = 0
if 0 == self.nz:
for name in ls:
if name.split(".")[-1] in ['hdf5','h5'] and \
name.split("_")[0] == 'gal':
filesindir += 1
snap = name.split("_")[1]
if snap == 'itf':
snap = name.split("_")[2]
try:
idx = self.snaptag.index(snap)
except ValueError:
self.dataList.append(SAGdata(simname, self.boxSizeMpc))
self.snaptag.append(snap)
self.nfiles.append(0)
idx = self.nz
self.nz += 1
sagname = filename+'/'+name
print('Opening file '+sagname)
self.dataList[idx].addFile(sagname)
self.nfiles[idx] += 1
# and the redshift:
if 1 == self.nfiles[idx]:
self.redshift.append(float(self.dataList[idx].readAttr('Redshift')))
self.zminidx = self.redshift.index(min(self.redshift))
self.reduced = self.dataList[0].reduced
def clear(self):
del self.snaptag[:]
del self.nfiles[:]
del self.redshift[:]
self.nz = 0
self.boxSizeMpc = 0
self.zminidx = -1
for sagd in self.dataList:
sagd.clear()
def _lookup_z(self, zlow, zhigh):
"""
It returns a list with the redshifts of the collection that are in the
range zlow <= Z <= zhigh.
"""
zl = []
for z in self.redshift:
if zlow <= z <= zhigh:
zl.append(z)
return zl
def readDataset(self, dsname, multiSnaps=False, zrange=None, **kwargs):
"""
It searches for an unique or a set of redshifts or boxes and returns the
requested datasets.
"""
for key in kwargs.keys():
if key not in ['idxfilter']:
raise KeyError(key)
if multiSnaps: print("Warning: requesting multiple snaps!")
if not zrange:
if not multiSnaps:
# the lowest redshift (hopefully z=0):
iarr = [self.zminidx]
else:
iarr = [i for i in range(self.nz)]
else:
zl = zrange[0]
zh = zrange[1]
if multiSnaps:
iarr = []
for z in self._lookup_z(zl, zh):
iarr.append(self.redshift.index(z))
else:
# search for the lowest match:
zarr = self._lookup_z(zl, zh)
if not zarr:
return np.array([])
iarr = [self.redshift.index(min(zarr))]
if 'idxfilter' in kwargs.keys():
flt = kwargs['idxfilter']
else:
flt = []
# Now we have the list of redshifts we are going to use, let's concatenate the
# datasets:
for k, i in enumerate(iarr):
dsarr = self.dataList[i].readDataset(dsname)
if 0 == k:
nparr = dsarr
else:
nparr = np.concatenate([nparr, dsarr])
if 0 != len(flt):
tmp = nparr[flt]
del nparr
nparr = tmp
return nparr
# All the files should have the same attributes and units, so these return
# the ones found in the first file at the lowest redshift.
def readAttr(self, attname):
"""
It returns a requested attribute.
"""
return self.dataList[self.zminidx].readAttr(attname)
def readUnits(self):
"""
It return an 'Units' object with the unit conversions of the catalog.
"""
return self.dataList[self.zminidx].readUnits()
def datasetList(self):
"""
It returns the dataset list of the files, following the groups recursively.
"""
return self.dataList[self.zminidx].datasetList()
def getGalaxies_by_ids(self, ids, dslist='all', multiSnaps=False, zrange=None):
"""
It returns a dictionary with the different datasets for all the requested
galaxies.
"""
if multiSnaps: print("Warning: requesting multiple snaps!")
if not zrange:
if not multiSnaps:
# the lowest redshift (hopefully z=0):
iarr = [self.zminidx]
else:
iarr = [i for i in range(self.nz)]
else:
zl = zrange[0]
zh = zrange[1]
if multiSnaps:
iarr = []
for z in self._lookup_z(zl, zh):
iarr.append(self.redshift.index(z))
else:
# search for the lowest match:
zarr = self._lookup_z(zl, zh)
iarr = [self.redshift.index(min(zarr))]
if dslist == 'all':
dslist = self.dataList[iarr[0]].datasetList()
if 'Histories/DeltaT_List' in dslist:
dslist.remove('Histories/DeltaT_List')
# here we need to verify the datasets are present in all the snaps.
if multiSnaps:
if len(iarr) > 1:
ds1 = set(dslist)
ds2 = set(self.dataList[iarr[1]].datasetList())
dslist = list(ds1.intersection(ds2))
# now we collect the galaxies:
gal = { 'ngal': np.array([0]) }
for i in iarr:
gtmp = self.dataList[i].getGalaxies_by_ids(ids, dslist)
if 0 == gal['ngal']:
gal.update(gtmp)
gal['ngal'] += len(gal[dslist[0]])
if len(iarr) > 1:
gal['redshift'] = np.repeat(self.redshift[i],len(gal[dslist[0]]))
else:
for ds in dslist:
gal[ds] = np.concatenate([gal[ds], gtmp[ds]])
gal['ngal'] += len(gtmp[dslist[0]])
tmp = np.repeat(self.redshift[i],len(gtmp[dslist[0]]))
gal['redshift'] = np.concatenate([gal['redshift'], tmp])
return gal
class SAGdata():
"""
The class 'SAGdata' stores a collection of hdf5 output files
created by the SAG code. It can extract a particular array from
all the stored files and returns a unique np array with the
requested data.
"""
def __init__(self, simname, boxSizeMpc):
"""
It creates an empty collection of files.
"""
self.simname = str(simname)
self.filenames = []
self.dataList = []
self.nfiles = 0
self.boxSizeMpc = boxSizeMpc
self.reduced = False
def clear(self):
self.simname = ""
del self.filenames[:]
self.nfiles = self.boxSizeMpc = 0
self.reduced = False
for fsag in self.dataList:
fsag.close()
def addFile(self, filename):
"""
It adds an hdf5 file to the object.
"""
try:
sag = h5py.File(filename, "r")
except IOError:
print("Cannot load file: '"+filename+"'")
return
self.filenames.append(filename)
self.dataList.append(sag)
self.nfiles += 1
if 1 == self.nfiles:
try:
attr = self.dataList[0].attrs['REDUCED_HDF5']
if attr == 'YES':
self.reduced = True
except KeyError:
pass
def readDataset(self, dsname, idxfilter=[]):
"""
It returns a unique np array of the requested dataset only
if it exists in all loaded SAG files.
The idxfilter can be created with np.where(condition), for example:
>>> types = d.readDataset("Type")
>>> row, col = np.where(types == 0)
>>> discMass = d.readDataset("DiscMass", idxfilter=row)
>>> pos = d.readDataset("Pos", idxfilter=row)
"""
for i, sag in enumerate(self.dataList):
dsarr = np.array(sag.get(dsname))
if None == dsarr.all():
print("Dataset '"+dsname+"' not present in "+self.filenames[i])
return None
if 0 == i:
nparr = dsarr
else:
nparr = np.concatenate([nparr, dsarr])
if 0 != len(idxfilter):
tmp = nparr[idxfilter]
del nparr
nparr = tmp
return nparr
def readAttr(self, attname, fnum=0):
"""
It returns the value of the requested attribute from a particular
file of the list.
"""
try:
attr = self.dataList[fnum].attrs[attname]
return attr
except KeyError:
print("Attribute '"+attname+"' not present in "
+self.filenames[fnum])
return None
def readUnits(self, fnum=0):
"""
It returns an instance of the 'Units' class, with all the unit conversions
of the data found in the firts hdf5 file of the list.
"""
if 0 < self.nfiles:
if not self.reduced:
m_in_g = float(self.dataList[fnum].attrs["UnitMass_in_g"])
l_in_cm = float(self.dataList[fnum].attrs["UnitLength_in_cm"])
vel_in_cm_s = float(self.dataList[fnum].attrs["UnitVelocity_in_cm_per_s"])
else:
m_in_g = 1.989e33 # Msun
l_in_cm = 3.085678e21 # kpc
vel_in_cm_s = 1e5 # km/s
h = float(self.readAttr('Hubble_h'))
units = Units(l_in_cm, m_in_g, vel_in_cm_s, h)
return units
else:
return None
def datasetList(self, fnum=0, group="/"):
ks = []
for tag in self.dataList[fnum][group].keys():
if type(self.dataList[fnum][group+tag]) is h5py._hl.dataset.Dataset:
ks.append(group+tag)
elif type(self.dataList[fnum][group+tag]) is h5py._hl.group.Group:
tmp = self.datasetList(fnum, group=group+tag+"/")
ks += tmp
return ks
def _gal_idxs(self, ids, dsname):
if type(ids) != list: ids = [ids]
idxs = []
boxes = []
for i in range(self.nfiles):
dset = self.dataList[i][dsname]
tmp = np.where(np.in1d(dset, ids, assume_unique=True))[0]
idxs += tmp.tolist()
for _ in range(len(tmp)): boxes.append(i)
return np.array(idxs), np.array(boxes)
def getGalaxies(self, dslist='all'):
if dslist == 'all':
dslist = self.datasetList()
gal = {}
for dstag in dslist:
if type(self.dataList[0][dstag]) is h5py._hl.dataset.Dataset:
gal[dstag] = self.readDataset(dstag)
return gal
def getGalaxies_by_ids(self, ids, dslist='all'):
"""
It returns a dictionary with the different datasets for all the requested
galaxies.
"""
if dslist == 'all':
dslist = self.datasetList()
dslist.remove('Histories/DeltaT_List')
# retrieve indexes of the galaxies:
idname = 'GalaxyID' if self.reduced else 'UID'
idxs, boxes = self._gal_idxs(ids, idname)
gal = {}
for dstag in dslist:
if type(self.dataList[0][dstag]) is h5py._hl.dataset.Dataset:
dims = self.dataList[0][dstag].shape[1]
l = np.zeros((len(idxs),dims), dtype=self.dataList[0][dstag].dtype)
for i in range(self.nfiles):
l_idx = (boxes == i)
l[l_idx] = self.dataList[i][dstag][:][idxs[l_idx]]
gal[dstag] = l
return gal
| 22,166
| 37.753497
| 120
|
py
|
descqa
|
descqa-master/v1/GCRCatalogs/__init__.py
|
"""
DESCQA v1 reader interface
"""
from .register import *
__version__ = '1.1.1'
| 81
| 12.666667
| 26
|
py
|
descqa
|
descqa-master/v1/GCRCatalogs/iHODGalaxyCatalog.py
|
# Massive Black 2 galaxy catalog class
import numpy as np
import astropy.cosmology
import h5py
import astropy.units as u
from .GalaxyCatalogInterface import GalaxyCatalog
class iHODGalaxyCatalog(GalaxyCatalog):
"""
iHOD galaxy catalog class.
"""
def __init__(self, **kwargs):
fn = kwargs.get('fn')
self.type_ext = 'iHOD'
self.filters = {
'zlo': True,
'zhi': True
}
self.quantities = {
'positionX': self._get_stored_property,
'positionY': self._get_stored_property,
'positionZ': self._get_stored_property,
'velocityX': self._get_stored_property,
'velocityY': self._get_stored_property,
'velocityZ': self._get_stored_property,
'stellar_mass': self._get_stored_property,
'mass': self._get_stored_property,
'halo_id': self._get_stored_property,
'parent_halo_id': self._get_stored_property,
'SDSS_g:rest:': self._get_stored_property,
'SDSS_r:rest:': self._get_stored_property,
}
self.Ngals = 0
self.sky_area = 4.*np.pi*u.sr # all sky by default
self.lightcone = False
self.redshift = (1.0 / 0.941176) - 1.0
self.cosmology = astropy.cosmology.FlatLambdaCDM(H0=70.2, Om0=0.275, Ob0=0.046)
self.box_size = 100.0 / self.cosmology.h
self.SDSS_kcorrection_z = 0.1
return GalaxyCatalog.__init__(self, fn)
def load(self, fn):
"""
Given a catalog path, attempt to read the catalog and set up its
internal data structures.
"""
self.catalog = self._read_rec_from_hdf5(fn, group='galaxy')
self.Ngals = len(self.catalog)
return self
def _construct_mask(self, filters):
"""
Given a dictionary of filter constraints, construct a mask array
for use in filtering the catalog.
"""
if type(filters) is not dict:
raise TypeError("construct_mask: filters must be given as dict")
mask = np.ones((self.Ngals), dtype=np.bool_)
for filter_name in filters.keys():
if filter_name == 'zlo':
mask = mask & (filters[filter_name] < self.redshift)
elif filter_name == 'zhi':
mask = mask & (filters[filter_name] > self.redshift)
return mask
def _get_stored_property(self, quantity, filters):
"""
Return the requested property of galaxies in the catalog as a NumPy
array. This is for properties that are explicitly stored in the
catalog.
"""
filter_mask = self._construct_mask(filters)
return self.catalog[quantity][np.where(filter_mask)]
def _get_derived_property(self, quantity, filters):
"""
Return a derived halo property. These properties aren't stored
in the catalog but can be computed from properties that are via
a simple function call.
"""
filter_mask = self._construct_mask(filters)
stored_qty_rec = self.derived[quantity]
stored_qty_name = stored_qty_rec[0]
stored_qty_fctn = stored_qty_rec[1]
if type(stored_qty_name) is tuple:
values = self.catalog[stored_qty_name[0]][np.where(filter_mask)]
return stored_qty_fctn(values, stored_qty_name[1:])
else:
values = self.catalog[stored_qty_name][np.where(filter_mask)]
return stored_qty_fctn(values)
def _read_rec_from_hdf5(self, h5file, group="galaxy"):
recdict = self._read_recdict_from_hdf5(h5file)
return(recdict[group])
def _read_recdict_from_hdf5(self, h5file):
""" read catalog as a dictionary of record arrays.
"""
f = h5py.File(h5file, "r")
recdict = {}
for grp, val in f.iteritems():
print grp
datasets = []
dtypes = []
for key in f[grp].keys():
dset = f[grp][key][:]
dtypename = f[grp][key].dtype.name
dtype = (str(key), dtypename)
datasets.append(dset)
dtypes.append(dtype)
recdict[str(grp)] = np.rec.fromarrays(tuple(datasets), dtype=dtypes)
f.close()
return(recdict)
# Functions for computing derived values
def _translate(self, propList):
"""
Translation routine -- a passthrough that accomplishes mapping of
derived quantity names to stored quantity names via the derived
property function mechanism.
"""
return propList
def _multiply(self, propList, factor_tuple):
"""
Multiplication routine -- derived quantity is equal to a stored
quantity times some factor. Additional args for the derived quantity
routines are passed in as a tuple, so extract the factor first.
"""
factor = factor_tuple[0]
return propList * factor
def _power(self, propList, base_tuple):
return base_tuple[0] ** propList
| 5,592
| 39.528986
| 90
|
py
|
descqa
|
descqa-master/v1/GCRCatalogs/register.py
|
import os
import importlib
import yaml
from .config import base_catalog_dir
__all__ = ['available_catalogs', 'get_catalog_config', 'get_available_catalogs', 'load_catalog']
def load_yaml(yaml_file):
"""
Load *yaml_file*. Ruturn a dictionary.
"""
with open(yaml_file) as f:
config = yaml.safe_load(f)
return config
def import_subclass(subclass, package=None, required_base_class=None):
"""
Import and return a subclass.
*subclass_path* must be in the form of 'module.subclass'.
"""
subclass = getattr(importlib.import_module('.'+subclass, package), subclass)
if required_base_class:
assert issubclass(subclass, required_base_class), "Provided class is not a subclass of *required_base_class*"
return subclass
def get_catalog_config(catalog):
"""
get the config dict of *catalog*
"""
return available_catalogs[catalog]
def get_available_configs(config_dir, register=None):
"""
Return (or update) a dictionary *register* that contains all config files in *config_dir*.
"""
if register is None:
register = dict()
for config_file in os.listdir(config_dir):
if config_file.startswith('_') or not config_file.lower().endswith('.yaml'):
continue
name = os.path.splitext(config_file)[0]
config = load_yaml(os.path.join(config_dir, config_file))
config['base_catalog_dir'] = base_catalog_dir
if 'fn' in config:
config['fn'] = os.path.join(base_catalog_dir, config['fn'])
register[name] = config
return register
def get_available_catalogs(include_default_only=False): # pylint: disable=unused-argument
"""
Return *available_catalogs* as a dictionary
"""
return available_catalogs
def load_catalog_from_config_dict(catalog_config):
"""
Load a galaxy catalog using a config dictionary.
Parameters
----------
catalog_config : dict
a dictionary of config options
Return
------
galaxy_catalog : instance of a subclass of BaseGalaxyCatalog
See also
--------
load_catalog()
"""
return import_subclass(catalog_config['reader'],
__package__,
object)(**catalog_config)
def load_catalog(catalog_name, config_overwrite=None):
"""
Load a galaxy catalog as specified in one of the yaml file in catalog_configs.
Parameters
----------
catalog_name : str
name of the catalog (without '.yaml')
config_overwrite : dict, optional
a dictionary of config options to overwrite
Return
------
galaxy_catalog : instance of a subclass of BaseGalaxyCatalog
"""
if catalog_name.lower().endswith('.yaml'):
catalog_name = catalog_name[:-5]
if catalog_name not in available_catalogs:
raise KeyError("Catalog `{}` does not exist in the register. See `available_catalogs`.".format(catalog_name))
config = available_catalogs[catalog_name]
if config_overwrite:
config = config.copy()
config.update(config_overwrite)
return load_catalog_from_config_dict(config)
available_catalogs = get_available_configs(os.path.join(os.path.dirname(__file__), 'configs'))
| 3,261
| 26.644068
| 117
|
py
|
descqa
|
descqa-master/descqarun/master.py
|
from __future__ import print_function, unicode_literals, absolute_import
import os
import sys
import shutil
import time
import json
import logging
import traceback
import importlib
import argparse
import collections
import fnmatch
import subprocess
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import yaml
from . import config
__all__ = ['main']
_horizontal_rule = '-'*50
pjoin = os.path.join
def make_path_absolute(path):
return os.path.abspath(os.path.expanduser(path))
def _is_string_like(obj):
"""
Check whether obj behaves like a string.
"""
try:
obj + ''
except (TypeError, ValueError):
return False
return True
class CatchExceptionAndStdStream():
def __init__(self, filenames=None, logger=None, during=None):
self._logger = logger
self._filenames = [filenames] if _is_string_like(filenames) else filenames
self._during = ' when {}'.format(during) if during else ''
self._stream = StringIO()
self._stdout = self._stderr = None
def __enter__(self):
self._stdout = sys.stdout
self._stdout.flush()
sys.stdout = self._stream
self._stderr = sys.stderr
self._stderr.flush()
sys.stderr = self._stream
def __exit__(self, exc_type, exc_value, exc_tb):
self._stream.flush()
has_exception = False
if exc_type:
traceback.print_exception(exc_type, exc_value, exc_tb)
has_exception = True
output = self._stream.getvalue().strip()
self._stream.close()
sys.stdout = self._stdout
sys.stderr = self._stderr
if self._logger:
if has_exception:
self._logger.error('Exception occurred{}. Below are stdout/stderr and traceback:\n{}'.format(self._during, output))
elif output:
self._logger.debug('Below are stdout/stderr{}:\n{}'.format(self._during, output))
if self._filenames and output:
for filename in self._filenames:
with open(filename, 'a') as f:
f.write(output)
f.write('\n')
return True
def create_logger(verbose=False):
logger = logging.getLogger()
logger.setLevel(logging.DEBUG if verbose else logging.INFO)
logFormatter = logging.Formatter('[%(levelname)-5.5s][%(asctime)s] %(message)s')
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
logger.addHandler(consoleHandler)
return logger
def record_version(module, version, record_dict=None, logger=None):
if record_dict is None:
record_dict = dict()
record_dict[module] = version
if logger:
logger.info('Using {} {}'.format(module, version))
return record_dict
def check_copy(src, dst):
if os.path.exists(dst):
raise OSError('{} already exists'.format(dst))
if os.path.isdir(src):
shutil.copytree(src, dst, ignore=shutil.ignore_patterns('.*', '*~', '#*'))
elif os.path.isfile(src):
shutil.copy(src, dst)
else:
raise OSError('{} does not exist'.format(src))
return dst
def make_output_dir(root_output_dir):
root_output_dir = make_path_absolute(root_output_dir)
if not os.path.isdir(root_output_dir):
raise OSError('{} does not exist'.format(root_output_dir))
new_dir_name = time.strftime('%Y-%m-%d')
parent_dir = pjoin(root_output_dir, new_dir_name.rpartition('-')[0])
if not os.path.exists(parent_dir):
os.mkdir(parent_dir)
subprocess.check_call(['chmod', 'a+rx,g+ws,o-w', parent_dir])
output_dir = pjoin(parent_dir, new_dir_name)
if os.path.exists(output_dir):
i = max((int(s.partition('_')[-1] or 0) for s in os.listdir(parent_dir) if s.startswith(new_dir_name)))
output_dir += '_{}'.format(i+1)
os.mkdir(output_dir)
return output_dir
def get_username():
for k in ('LOGNAME', 'USER', 'LNAME', 'USERNAME'):
user = os.getenv(k)
if user:
return user
return 'UNKNOWN'
def print_available_and_exit(catalogs, validations):
print(_horizontal_rule)
print('Available catalogs')
print(_horizontal_rule)
for c in sorted(catalogs):
print(c, '*' if catalogs[c].get('included_by_default') or catalogs[c].get('include_in_default_catalog_list') else '')
print()
print(_horizontal_rule)
print('Available validations')
print(_horizontal_rule)
for v in sorted(validations):
print(v, '*' if validations[v].get('included_by_default') else '')
print()
sys.exit(0)
class DescqaTask(object):
logfile_basename = 'traceback.log'
config_basename = 'config.yaml'
status_basename = 'STATUS'
def __init__(self, output_dir, validations_to_run, catalogs_to_run, logger):
self.output_dir = output_dir
self.logger = logger
self.validations_to_run = self.select_subset(descqa.available_validations, validations_to_run)
self.catalogs_to_run = self.select_subset(GCRCatalogs.get_available_catalogs(False), catalogs_to_run)
if not self.validations_to_run or not self.catalogs_to_run:
raise RuntimeError('Nothing to run... Aborted!')
self._validation_instance_cache = dict()
self._results = dict()
@staticmethod
def select_subset(available, wanted=None):
if wanted is None:
available_default = None
if isinstance(available, dict):
available_default = [k for k, v in available.items() if v.get('included_by_default') or v.get('include_in_default_catalog_list')]
return set(available_default) if available_default else set(available)
wanted = set(wanted)
output = set()
for item in wanted:
matched = fnmatch.filter(available, item)
if not matched:
raise KeyError("{} does not match any available names: {}".format(item, ', '.join(sorted(available))))
output.update(matched)
return tuple(sorted(output))
def get_path(self, validation, catalog=None):
return pjoin(self.output_dir, validation, catalog) if catalog else pjoin(self.output_dir, validation)
def make_all_subdirs(self):
for validation in self.validations_to_run:
os.mkdir(self.get_path(validation))
for catalog in self.catalogs_to_run:
os.mkdir(self.get_path(validation, catalog))
with open(pjoin(self.get_path(validation), self.config_basename), 'w') as f:
f.write(yaml.dump(descqa.available_validations[validation], default_flow_style=False))
f.write('\n')
def get_description(self, description_key='description'):
dv = {v: descqa.available_validations[v].get(description_key) for v in self.validations_to_run}
dc = {c: GCRCatalogs.get_catalog_config(c).get(description_key) for c in self.catalogs_to_run}
return {'validation_{}'.format(description_key): dv, 'catalog_{}'.format(description_key): dc}
def get_validation_instance(self, validation):
if validation not in self._validation_instance_cache:
logfile = pjoin(self.get_path(validation), self.logfile_basename)
instance = None
with CatchExceptionAndStdStream(logfile, self.logger, 'loading validation `{}`'.format(validation)):
instance = descqa.load_validation(validation)
if instance is None:
self.set_result('VALIDATION_TEST_MODULE_ERROR', validation=validation)
self._validation_instance_cache[validation] = instance
return self._validation_instance_cache[validation]
def get_catalog_instance(self, catalog):
logfile = [pjoin(self.get_path(validation, catalog), self.logfile_basename) for validation in self.validations_to_run]
instance = None
with CatchExceptionAndStdStream(logfile, self.logger, 'loading catalog `{}`'.format(catalog)):
instance = GCRCatalogs.load_catalog(catalog)
if instance is None:
self.set_result('LOAD_CATALOG_ERROR', catalog=catalog)
return instance
def set_result(self, test_result, validation=None, catalog=None):
if validation and catalog:
key = (validation, catalog)
elif validation:
for c in self.catalogs_to_run:
self.set_result(test_result, validation, c)
return
elif catalog:
for v in self.validations_to_run:
self.set_result(test_result, v, catalog)
return
else:
raise ValueError('Must specify *validation* and/or *catalog*')
if key in self._results:
self.logger.debug('Warning: result of {} has been set already!'.format(key))
return
if _is_string_like(test_result):
status = test_result
test_result = None
elif hasattr(test_result, 'status_code'):
status = test_result.status_code
else:
status = 'VALIDATION_TEST_{}'.format('SKIPPED' if test_result.skipped else ('PASSED' if test_result.passed else 'FAILED'))
self._results[key] = (status, test_result)
with open(pjoin(self.get_path(*key), self.status_basename), 'w') as f:
if hasattr(test_result, 'status_full'):
f.write(test_result.status_full + '\n')
else:
f.write(status + '\n')
if getattr(test_result, 'summary', None):
f.write(test_result.summary + '\n')
if getattr(test_result, 'score', None):
f.write('{:.3g}'.format(test_result.score) + '\n')
def get_status(self, validation=None, catalog=None, return_test_result=False):
if validation and catalog:
return self._results.get((validation, catalog), (None, None))[int(return_test_result)]
elif validation:
return {c: self.get_status(validation, c, return_test_result) for c in self.catalogs_to_run}
elif catalog:
return {v: self.get_status(v, catalog, return_test_result) for v in self.validations_to_run}
else:
return {v: self.get_status(v, None, return_test_result) for v in self.validations_to_run}
def check_status(self):
msg = 'hmmm, something is wrong with the test results!'
if not all((v, c) in self._results for v in self.validations_to_run for c in self.catalogs_to_run):
self.logger.error(msg)
def count_status(self):
count_by_validation = {v: collections.Counter(self.get_status(validation=v).values()) for v in self.validations_to_run}
count_by_catalog = {c: collections.Counter(self.get_status(catalog=c).values()) for c in self.catalogs_to_run}
return count_by_validation, count_by_catalog
def get_status_report(self):
report = StringIO()
for validation in self.validations_to_run:
report.write(_horizontal_rule + '\n')
report.write(validation + '\n')
report.write(_horizontal_rule + '\n')
l = max(len(catalog) for catalog in self.catalogs_to_run)
l += 3
for catalog in self.catalogs_to_run:
s = self.get_status(validation, catalog)
report.write('{{:{}}}{{}}\n'.format(l).format(catalog, s))
report.write(_horizontal_rule + '\n')
report_content = report.getvalue()
report.close()
return report_content
def run_tests(self):
run_at_least_one_catalog = False
for catalog in self.catalogs_to_run:
catalog_instance = self.get_catalog_instance(catalog)
if catalog_instance is None:
continue
run_at_least_one_catalog = True
for validation in self.validations_to_run:
validation_instance = self.get_validation_instance(validation)
if validation_instance is None:
continue
output_dir_this = self.get_path(validation, catalog)
logfile = pjoin(output_dir_this, self.logfile_basename)
msg = 'running validation `{}` on catalog `{}`'.format(validation, catalog)
self.logger.debug(msg)
test_result = None
with CatchExceptionAndStdStream(logfile, self.logger, msg):
test_result = validation_instance.run_on_single_catalog(catalog_instance, catalog, output_dir_this)
self.set_result(test_result or 'RUN_VALIDATION_TEST_ERROR', validation, catalog)
if not run_at_least_one_catalog:
msg = 'No valid catalog to run! Abort!'
self.logger.error(msg)
raise RuntimeError(msg)
def conclude_tests(self):
for validation in self.validations_to_run:
validation_instance = self.get_validation_instance(validation)
if validation_instance is None:
continue
output_dir_this = self.get_path(validation)
logfile = pjoin(output_dir_this, self.logfile_basename)
msg = 'concluding validation test `{}`'.format(validation)
self.logger.debug(msg)
with CatchExceptionAndStdStream(logfile, self.logger, msg):
validation_instance.conclude_test(output_dir_this)
def run(self):
self.logger.debug('creating subdirectories in output_dir...')
self.make_all_subdirs()
if all(self.get_validation_instance(validation) is None for validation in self.validations_to_run):
self.logger.info('No valid validation tests. End program.')
self.check_status()
return
self.logger.debug('starting to run all validation tests...')
self.run_tests()
self.check_status()
self.logger.debug('starting to conclude all validation tests...')
self.conclude_tests()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('root_output_dir',
help='Output directory (where the web interface runs on). A sub directory named with the current date will be created within.')
parser.add_argument('-v', '--verbose', action='store_true',
help='Display all debug messages')
parser.add_argument('-m', '--comment',
help='Attach a comment to this run')
parser.add_argument('-l', '--list', action='store_true',
help='Just list available catalogs and validations. Runs nothing!')
parser.add_argument('-t', '--validations-to-run', dest='validations_to_run', metavar='VALIDATION', nargs='+',
help='Run only a subset of validations')
parser.add_argument('-c', '--catalogs-to-run', dest='catalogs_to_run', metavar='CATALOG', nargs='+',
help='run only a subset of catalogs')
parser.add_argument('-p', '--insert-sys-path', dest='paths', metavar='PATH', nargs='+',
help='Insert path(s) to sys.path')
parser.add_argument('-w', '--web-base-url', metavar='URL', default=config.base_url,
help='Web interface base URL')
args = parser.parse_args()
logger = create_logger(verbose=args.verbose)
master_status = dict()
master_status['user'] = get_username()
master_status['start_time'] = time.time()
if args.comment:
master_status['comment'] = args.comment
master_status['versions'] = dict()
logger.debug('Importing DESCQA and GCR Catalogs...')
if args.paths:
sys.path = [make_path_absolute(path) for path in args.paths] + sys.path
global GCRCatalogs #pylint: disable=W0601
GCRCatalogs = importlib.import_module('GCRCatalogs')
global descqa #pylint: disable=W0601
descqa = importlib.import_module('descqa')
record_version('DESCQA', descqa.__version__, master_status['versions'], logger=logger)
record_version('GCRCatalogs', GCRCatalogs.__version__, master_status['versions'], logger=logger)
if hasattr(GCRCatalogs, 'GCR'):
record_version('GCR', GCRCatalogs.GCR.__version__, master_status['versions'], logger=logger)
if args.list:
print_available_and_exit(GCRCatalogs.get_available_catalogs(False), descqa.available_validations)
logger.debug('creating root output directory...')
output_dir = make_output_dir(args.root_output_dir)
open(pjoin(output_dir, '.lock'), 'w').close()
try: # we want to remove ".lock" file even if anything went wrong
logger.info('output of this run is stored in %s', output_dir)
logger.debug('creating code snapshot...')
snapshot_dir = pjoin(output_dir, '_snapshot')
os.mkdir(snapshot_dir)
check_copy(descqa.__path__[0], pjoin(snapshot_dir, 'descqa'))
check_copy(GCRCatalogs.__path__[0], pjoin(snapshot_dir, 'GCRCatalogs'))
if hasattr(GCRCatalogs, 'GCR'):
if getattr(GCRCatalogs.GCR, '__path__', None):
check_copy(GCRCatalogs.GCR.__path__[0], pjoin(snapshot_dir, 'GCR'))
else:
check_copy(GCRCatalogs.GCR.__file__, pjoin(snapshot_dir, 'GCR.py'))
logger.debug('preparing to run validation tests...')
descqa_task = DescqaTask(output_dir, args.validations_to_run, args.catalogs_to_run, logger)
master_status.update(descqa_task.get_description())
logger.info('running validation tests...')
descqa_task.run()
logger.debug('finishing up...')
master_status['status_count'], master_status['status_count_group_by_catalog'] = descqa_task.count_status()
master_status['end_time'] = time.time()
with open(pjoin(output_dir, 'STATUS.json'), 'w') as f:
json.dump(master_status, f, indent=True)
logger.info('All done! Status report:\n%s', descqa_task.get_status_report())
finally:
os.unlink(pjoin(output_dir, '.lock'))
subprocess.check_call(['chmod', '-R', 'a+rX,o-w', output_dir])
logger.info('Web output: %s?run=%s', args.web_base_url, os.path.basename(output_dir))
if __name__ == '__main__':
main()
| 18,221
| 36.64876
| 145
|
py
|
descqa
|
descqa-master/descqarun/config.py
|
__all__ = ['base_url']
base_url = 'https://portal.nersc.gov/cfs/lsst/descqa/v2/'
| 81
| 26.333333
| 57
|
py
|
descqa
|
descqa-master/descqarun/__init__.py
|
"""
DESCQA Execution Script
"""
from .master import main
__version__ = '2.1.1'
| 79
| 12.333333
| 24
|
py
|
descqa
|
descqa-master/descqaweb/twopanels.py
|
from __future__ import unicode_literals, print_function
import os
import sys
from . import config
from .interface import DescqaRun, b64encode
__all__ = ['prepare_leftpanel', 'print_file']
def prepare_leftpanel(run, test=None, catalog=None, right=None):
try:
descqa_run = DescqaRun(run, config.root_dir)
except AssertionError:
raise ValueError('Invalid run "{}"'.format(run))
if test is None and catalog is None:
raise ValueError('`test` and `catalog` cannot both be `None`')
if test and test not in descqa_run.tests:
raise ValueError('Invalid test {} for run {}'.format(test, run))
if catalog and catalog not in descqa_run.catalogs:
raise ValueError('Invalid catalog {} for run {}'.format(catalog, run))
data = dict()
data['run'] = descqa_run.name
data['test'] = test
data['catalog'] = catalog
data['right'] = right
if test:
data['group'] = [descqa_run[test, c] for c in descqa_run.catalogs]
for item in data['group']:
item.name = item.catalog
data['summary'] = descqa_run[test]
data['title'] = test
data['is_group_by_catalog'] = False
else:
data['group'] = [descqa_run[t, catalog] for t in descqa_run.tests]
for item in data['group']:
item.name = item.test
data['title'] = catalog
data['summary'] = None
data['is_group_by_catalog'] = True
return data
def print_file(target_file, root_dir=config.root_dir):
try:
assert (not os.path.isabs(target_file)) or target_file.startswith(root_dir)
with open(os.path.join(root_dir, target_file), 'rb') as f:
file_content = f.read()
except (OSError, IOError, AssertionError):
print('Content-Type: text/plain; charset=utf-8')
print()
sys.stdout.flush()
print('[Error] Cannot open/read file {}'.format(target_file))
else:
if target_file.lower().endswith('.png'):
print('Content-Type: text/html; charset=utf-8')
print()
sys.stdout.flush()
print('<!DOCTYPE html>')
print('<html><body>')
print('<img src="data:image/png;base64,{}" width="100%">'.format(b64encode(file_content)))
print('</body></html>')
elif target_file.lower().endswith('.pdf'):
print('Content-Type: application/pdf')
print('Content-Length: {}'.format(len(file_content)))
print('Content-Disposition: inline; filename="{}"'.format(os.path.basename(target_file)))
print()
sys.stdout.flush()
try:
sys.stdout.buffer.write(file_content)
except AttributeError:
print(file_content)
elif target_file.lower().endswith('.html'):
print('Content-Type: text/html; charset=utf-8')
file_content = file_content.decode('utf-8')
print('Content-Length: {}'.format(len(file_content)))
print()
sys.stdout.flush()
print(file_content)
else:
print('Content-Type: text/plain; charset=utf-8')
file_content = file_content.decode('utf-8')
print('Content-Length: {}'.format(len(file_content)))
print('Content-Disposition: inline; filename="{}"'.format(os.path.basename(target_file)))
print()
sys.stdout.flush()
print(file_content)
| 3,473
| 34.814433
| 102
|
py
|
descqa
|
descqa-master/descqaweb/main.py
|
from __future__ import print_function, unicode_literals
import sys
import cgi
from jinja2 import Environment, PackageLoader
from . import config
from .bigtable import prepare_bigtable
from .twopanels import prepare_leftpanel, print_file
from .matrix import prepare_matrix
__all__ = ['run']
env = Environment(loader=PackageLoader('descqaweb', 'templates'))
def _convert_to_integer(value, default=0):
try:
return int(value)
except (ValueError, TypeError):
return default
def run():
form = cgi.FieldStorage()
if form.getfirst('file'):
print_file(form.getfirst('file'))
return
print('Content-Type: text/html; charset=utf-8')
print()
sys.stdout.flush()
if form.getfirst('header'):
print(env.get_template('header.html').render(full_header=True, header_page=True, config=config))
return
_run = form.getfirst('run', '')
if _run.lower() == 'all':
page = _convert_to_integer(form.getfirst('page'), 1)
months = _convert_to_integer(form.getfirst('months'), config.months_to_search)
search = {item: form.getfirst(item) for item in ('users', 'tests', 'catalogs') if form.getfirst(item)}
print(env.get_template('header.html').render(full_header=True, please_wait=True, config=config))
sys.stdout.flush()
print(env.get_template('bigtable.html').render(**prepare_bigtable(page, months, search)))
return
elif _run:
catalog = form.getfirst('catalog')
test = form.getfirst('test')
if catalog or test:
if form.getfirst('left'):
print(env.get_template('header.html').render(please_wait=True, config=config))
sys.stdout.flush()
print(env.get_template('leftpanel.html').render(**prepare_leftpanel(_run, test, catalog)))
else:
print(env.get_template('twopanels.html').render(run=_run, catalog=catalog, test=test, right=form.getfirst('right')))
return
print(env.get_template('header.html').render(full_header=True, please_wait=True, config=config))
sys.stdout.flush()
if _run or getattr(config, 'use_latest_run_as_home', True):
print(env.get_template('matrix.html').render(**prepare_matrix(
run=_run,
catalog_prefix=form.getfirst('catalog_prefix'),
test_prefix=form.getfirst('test_prefix'),
)))
else:
print(env.get_template('home.html').render(general_info=config.general_info))
| 2,516
| 33.958333
| 132
|
py
|
descqa
|
descqa-master/descqaweb/config.py
|
from __future__ import unicode_literals
__all__ = ['site_title', 'root_dir', 'general_info', 'static_dir', 'run_per_page', 'logo_filename', 'github_url', 'months_to_search']
root_dir = '/global/cfs/cdirs/lsst/groups/CS/descqa/run/v2'
site_title = 'DESCQA (v2): LSST DESC Quality Assurance for Galaxy Catalogs'
run_per_page = 20
months_to_search = 3
static_dir = 'web-static'
logo_filename = 'desc-logo-small.png'
github_url = 'https://github.com/lsstdesc/descqa'
general_info = '''
This is DESCQA v2. You can also visit the previous version, <a class="everblue" href="https://portal.nersc.gov/cfs/lsst/descqa/v1/">DESCQA v1</a>.
<br><br>
The DESCQA framework executes validation tests on mock galaxy catalogs.
These tests and catalogs are contributed by LSST DESC collaborators.
See <a href="https://arxiv.org/abs/1709.09665" target="_blank">the DESCQA paper</a> for more information.
Full details about the catalogs and tests, and how to contribute, are available <a href="https://confluence.slac.stanford.edu/x/Z0uKDQ" target="_blank">here</a> (collaborators only).
The source code of DESCQA is hosted in <a href="https://github.com/LSSTDESC/descqa/" target="_blank">this GitHub repo</a>.
'''
use_latest_run_as_home = False
| 1,232
| 46.423077
| 182
|
py
|
descqa
|
descqa-master/descqaweb/bigtable.py
|
from __future__ import unicode_literals
import html
from .interface import iter_all_runs, DescqaRun
from . import config
__all__ = ['prepare_bigtable']
try:
unicode # pylint: disable=used-before-assignment
except NameError:
unicode = str # pylint: disable=redefined-builtin
def format_status_count(status_count):
output = []
try:
for name, d in status_count.items():
total = sum(d.values())
output.append(name + ' - ' + '; '.join(('{}/{} {}'.format(d[k], total, html.escape(k)) for k in d)))
except AttributeError:
if isinstance(status_count, unicode):
output = [html.escape(l) for l in status_count.splitlines()]
return '<br>'.join(output)
def format_bigtable_row(descqa_run):
user = descqa_run.status.get('user', '')
user = ' ({})'.format(user) if user else ''
comment = descqa_run.status.get('comment', '')
if len(comment) > 20:
comment = comment[:20] + '...'
if comment:
comment = '<br> <i>{}</i>'.format(comment)
test_status = format_status_count(descqa_run.status.get('status_count', {}))
light = 'green'
if not test_status:
light = 'red'
test_status = 'status file "STATUS.json" not found or cannot be read!'
elif '_ERROR' in test_status:
light = 'yellow'
catalog_status = format_status_count(descqa_run.status.get('status_count_group_by_catalog', {}))
output = []
main_link = ' <a href="?run={}" onMouseOver="appear(\'{}\', \'{}\');" onMouseOut="disappear();">{}</a>'.format(\
descqa_run.name, test_status, catalog_status, descqa_run.name)
output.append('<td>{}{}{}</td>'.format(main_link, user, comment))
output.append('<td><img src="{}/{}.gif"></td>'.format(config.static_dir, light))
test_links = ' | '.join(('<a href="?run={0}&test={1}">{1}</a>'.format(descqa_run.name, t) for t in descqa_run.tests))
catalog_links = ' | '.join(('<a href="?run={0}&catalog={1}">{1}</a>'.format(descqa_run.name, c) for c in descqa_run.catalogs))
output.append('<td>TESTS: {}<br>{}{} </td>'.format(test_links, 'CATALOGS: ' if catalog_links else '', catalog_links))
return '\n'.join(output)
def filter_search_results(descqa_run, search):
if 'users' in search and descqa_run.status.get('user') not in search['users'].split():
return False
if 'tests' in search and not all(any(t.startswith(ts) for t in descqa_run.tests) for ts in search['tests'].split()):
return False
if 'catalogs' in search and not all(any(c.startswith(cs) for c in descqa_run.catalogs) for cs in search['catalogs'].split()):
return False
return True
def prepare_bigtable(page=1, months=3, search=None):
all_runs = list(iter_all_runs(config.root_dir, months_to_search=months))
if search:
all_runs = [descqa_run
for descqa_run in (DescqaRun(run, config.root_dir, validated=True) for run in all_runs)
if filter_search_results(descqa_run, search)]
n_per_page = config.run_per_page
npages = ((len(all_runs) - 1) // n_per_page) + 1
if page > npages:
page = npages
all_runs = all_runs[n_per_page*(page-1):n_per_page*page]
if not search:
all_runs = [DescqaRun(run, config.root_dir, validated=True) for run in all_runs]
table_out = []
table_out.append('<table class="bigboard" border="0" width="100%" cellspacing="0">')
for run in all_runs:
table_out.append('<tr>{}</tr>'.format(format_bigtable_row(run)))
table_out.append('</table>')
return dict(table='\n'.join(table_out), page=page, npages=npages, static_dir=config.static_dir, search=search)
| 3,735
| 40.054945
| 140
|
py
|
descqa
|
descqa-master/descqaweb/__init__.py
|
"""
DESCQA Web Interface
"""
from .main import run
__version__ = '2.1.0'
| 73
| 11.333333
| 21
|
py
|
descqa
|
descqa-master/descqaweb/interface.py
|
from __future__ import unicode_literals
import os
import re
import json
import datetime
import base64
__all__ = ['b64encode', 'iter_all_runs', 'DescqaRun']
ALLOWED_EXT = {'txt', 'dat', 'csv', 'log', 'json', 'yaml', 'pdf', 'png', 'html'}
STATUS_COLORS = {'PASSED': 'green', 'SKIPPED': 'gold', 'INSPECT': 'blue', 'FAILED': 'orangered', 'ERROR': 'darkred'}
def b64encode(content):
return base64.b64encode(content).decode('ascii')
class File(object):
'''
encapsulates the data needed to locate any files
'''
def __init__(self, filename, dir_path=None, rel_dir_path=None):
if dir_path is None:
self.path = filename
self.filename = os.path.basename(filename)
else:
self.path = os.path.join(dir_path, filename)
self.filename = filename
if rel_dir_path is not None:
self.relpath = os.path.join(rel_dir_path, filename)
self._data = None
self.is_png = self.filename.lower().endswith('.png')
@property
def data(self):
if self.is_png and self._data is None:
self._data = b64encode(open(self.path, 'rb').read())
return self._data
class DescqaItem(object):
def __init__(self, test, catalog, run, base_dir):
self.path = os.path.join(base_dir, run, test)
self.relpath = os.path.join(os.path.basename(os.path.normpath(base_dir)), run, test)
self.is_test_summary = True
if catalog is not None:
self.path = os.path.join(self.path, catalog)
self.relpath = os.path.join(self.relpath, catalog)
self.is_test_summary = False
self.test = test
self.catalog = catalog
self.run = run
self.name = ''
self._status = None
self._summary = None
self._score = None
self._status_color = None
self._files = None
def _parse_status(self):
if self.is_test_summary:
return
try:
with open(os.path.join(self.path, 'STATUS')) as f:
lines = f.readlines()
except (OSError, IOError):
lines = []
while len(lines) < 3:
lines.append('')
self._status = lines[0].strip().upper() or 'NO_STATUS_FILE_ERROR'
self._summary = lines[1].strip()
self._score = lines[2].strip()
for status, color in STATUS_COLORS.items():
if self._status.endswith(status):
self._status_color = color
break
else:
self._status_color = 'darkred'
@property
def status(self):
if self._status is None:
self._parse_status()
return self._status
@property
def summary(self):
if self._summary is None:
self._parse_status()
return self._summary
@property
def score(self):
if self._score is None:
self._parse_status()
return self._score
@property
def status_color(self):
if self._status_color is None:
self._parse_status()
return self._status_color
def _get_files(self):
files = []
for item in sorted((f for f in os.listdir(self.path) if os.path.isfile(os.path.join(self.path, f)))):
if item.rpartition('.')[-1].lower() in ALLOWED_EXT:
files.append(File(item, self.path, self.relpath))
return tuple(files)
@property
def files(self):
if self._files is None:
self._files = self._get_files()
return self._files
def validate_descqa_run_name(run_name, sub_base_dir):
path = os.path.join(sub_base_dir, run_name)
if not os.path.isdir(path):
return
if not os.access(path, os.R_OK + os.X_OK):
return
if os.path.exists(os.path.join(path, '.lock')):
return
m = re.match(r'(2[01]\d{2}-[01]\d-[0123]\d)(?:_(\d+))?', run_name)
if not m:
return
m = m.groups()
t = datetime.datetime(*(int(i) for i in m[0].split('-')), microsecond=int(m[1] or 0))
return t
class DescqaRun(object):
def __init__(self, run_name, base_dir, validated=False):
if not run_name.startswith(os.path.basename(os.path.normpath(base_dir))):
base_dir = os.path.join(base_dir, run_name.rpartition('-')[0])
if not validated:
assert validate_descqa_run_name(run_name, base_dir) is not None
self.base_dir = base_dir
self.name = run_name
self.path = os.path.join(base_dir, run_name)
self._tests = None
self._catalogs = None
self._test_prefixes = None
self._catalog_prefixes = None
self._status = None
self._data = dict()
@staticmethod
def _find_subdirs(path):
return tuple(sorted((d for d in os.listdir(path) if os.path.isdir(os.path.join(path, d)) and os.access(os.path.join(path, d), os.R_OK + os.X_OK) and not d.startswith('_'))))
def _find_tests(self):
return self._find_subdirs(self.path)
def _find_catalogs(self):
return self._find_subdirs(os.path.join(self.path, self.tests[0])) if len(self.tests) else tuple()
@staticmethod
def _find_prefixes(items):
prefixes = set()
for item in items:
prefixes.add(item.partition('_')[0])
return tuple(sorted(prefixes))
@property
def tests(self):
if self._tests is None:
self._tests = self._find_tests()
return self._tests
@property
def catalogs(self):
if self._catalogs is None:
self._catalogs = self._find_catalogs()
return self._catalogs
@property
def test_prefixes(self):
if self._test_prefixes is None:
self._test_prefixes = self._find_prefixes(self.tests)
return self._test_prefixes
@property
def catalog_prefixes(self):
if self._catalog_prefixes is None:
self._catalog_prefixes = self._find_prefixes(self.catalogs)
return self._catalog_prefixes
@staticmethod
def _get_things(things, prefix=None, return_iter=False):
it = (t for t in things if prefix is None or t.startswith(prefix))
return it if return_iter else tuple(it)
def get_tests(self, prefix=None, return_iter=False):
return self._get_things(self.tests, prefix, return_iter)
def get_catalogs(self, prefix=None, return_iter=False):
return self._get_things(self.catalogs, prefix, return_iter)
def __getitem__(self, key):
if key not in self._data:
try:
test, catalog = key
except ValueError:
test = key
catalog = None
if test in self.tests and (catalog in self.catalogs or catalog is None):
self._data[key] = DescqaItem(test, catalog, self.name, self.base_dir)
else:
raise KeyError('(test, catalog) = {} does not exist'.format(key))
return self._data[key]
@property
def status(self):
if self._status is None:
try:
with open(os.path.join(self.path, 'STATUS.json')) as f:
self._status = json.load(f)
except (IOError, OSError, ValueError):
self._status = dict()
return self._status
def iter_all_runs_unsorted(base_dir):
for run_name in os.listdir(base_dir):
run_key = validate_descqa_run_name(run_name, base_dir)
if run_key:
yield (run_name, run_key)
def iter_all_runs(base_dir, months_to_search=None):
for i, month_dir in enumerate(sorted((d for d in os.listdir(base_dir)
if re.match(r'\d{4}-[01]\d$', d) and os.path.isdir(os.path.join(base_dir, d))), reverse=True)):
if months_to_search is not None and i >= int(months_to_search):
break
sub_base_dir = os.path.join(base_dir, month_dir)
for run_name, _ in sorted(iter_all_runs_unsorted(sub_base_dir), key=lambda r: r[1], reverse=True):
yield run_name
| 8,045
| 31.443548
| 181
|
py
|
descqa
|
descqa-master/descqaweb/matrix.py
|
from __future__ import unicode_literals
import time
import html
from . import config
from .interface import iter_all_runs, DescqaRun
__all__ = ['prepare_matrix']
def find_last_descqa_run():
last_run = None
for run in iter_all_runs(config.root_dir):
descqa_run = DescqaRun(run, config.root_dir, validated=True)
if last_run is None:
last_run = descqa_run
if descqa_run.status.get('comment', '').strip().lower() == 'full run':
last_run = descqa_run
break
return last_run
def format_filter_link(targetDir, istest, new_test_prefix, new_catalog_prefix, current_test_prefix, current_catalog_prefix):
text = (new_test_prefix if istest else new_catalog_prefix) or 'CLEAR'
if new_test_prefix == current_test_prefix and new_catalog_prefix == current_catalog_prefix:
return '<span style="color:gray">{}</span>'.format(text)
new_test_prefix_str = '&test_prefix={}'.format(new_test_prefix) if new_test_prefix else ''
new_catalog_prefix_str = '&catalog_prefix={}'.format(new_catalog_prefix) if new_catalog_prefix else ''
return '<a href="?run={}{}{}">{}</a>'.format(targetDir, new_test_prefix_str, new_catalog_prefix_str, text)
def format_description(description_dict):
output = []
for k in sorted(description_dict):
v = description_dict.get(k)
if v:
output.append('<tr><td>{}</td><td>{}</td></tr>'.format(k, html.escape(v)))
if output:
return '\n'.join(output)
def get_short_status(status):
short_status = status.rpartition('_')[-1]
if short_status == 'FAILED':
short_status = 'NOT QUITE'
return short_status
def prepare_matrix(run=None, catalog_prefix=None, test_prefix=None):
if run:
try:
descqa_run = DescqaRun(run, config.root_dir)
except AssertionError:
raise ValueError('Invalid run "{}"'.format(run))
else:
descqa_run = find_last_descqa_run()
data = dict()
data['general_info'] = config.general_info
data['run'] = descqa_run.name
data['comment'] = html.escape(descqa_run.status.get('comment', ''))
data['user'] = descqa_run.status.get('user', 'UNKNOWN')
data['versions'] = ' | '.join(('{}: {}'.format(k, v) for k, v in descqa_run.status.get('versions', dict()).items()))
if 'start_time' in descqa_run.status:
data['start_time'] = time.strftime('at %Y/%m/%d %H:%M:%S PT', time.localtime(descqa_run.status.get('start_time')))
data['time_used'] = (descqa_run.status.get('end_time', -1.0) - descqa_run.status.get('start_time', 0.0))/60.0
else:
data['start_time'] = None
data['time_used'] = -1.0
links = ' | '.join((format_filter_link(run, True, p, catalog_prefix, test_prefix, catalog_prefix) \
for p in ('',) + descqa_run.test_prefixes))
data['test_links'] = '[ Test prefix: {} ]<br>'.format(links)
links = ' | '.join((format_filter_link(run, False, test_prefix, p, test_prefix, catalog_prefix) \
for p in ('',) + descqa_run.catalog_prefixes))
data['catalog_links'] = '[ Catalog prefix: {} ]'.format(links)
catalogs_this = descqa_run.get_catalogs(catalog_prefix)
table_width = len(catalogs_this)*120 + 200
if table_width > 1280:
data['table_width'] = "100%"
else:
data['table_width'] = "{}px".format(table_width)
matrix = list()
matrix.append('<tr><td> </td>')
for catalog in catalogs_this:
matrix.append('<td><a href="?run={1}&catalog={0}">{0}</a></td>'.format(catalog, descqa_run.name))
matrix.append('</tr>')
for test in descqa_run.get_tests(test_prefix, True):
matrix.append('<tr>')
matrix.append('<td><a href="?run={0}&test={1}">{1}</a></td>'.format(descqa_run.name, test))
for catalog in catalogs_this:
item = descqa_run[test, catalog]
matrix.append('<td class="{}"><a class="celllink" href="?run={}&test={}&catalog={}">{}<br>{}</a></td>'.format(\
item.status_color, descqa_run.name, test, catalog, get_short_status(item.status), item.score))
matrix.append('</tr>')
data['matrix'] = '\n'.join(matrix)
for type_this in ('Validation', 'Catalog'):
key = '{}_description'.format(type_this.lower())
if key in descqa_run.status:
table = format_description(descqa_run.status[key])
if table:
data[key] = '<table><thead><tr><td>{}</td><td>Description</td></tr></thead>\n<tbody>\n{}\n</tbody></table>'.format(type_this, table)
return data
| 4,624
| 40.294643
| 148
|
py
|
descqa
|
descqa-master/tests/test_descqa_subclasses.py
|
import descqa
def test_subclass_name_in_config():
for validation_name, validation_config in descqa.available_validations.items():
assert 'subclass_name' in validation_config, "{}.yaml has no `subclass_name`".format(validation_name)
def test_subclass_importable():
validations = set(v['subclass_name'] for v in descqa.available_validations.values())
for validation in validations:
descqa.register.import_subclass(validation, 'descqa', descqa.BaseValidationTest)
| 491
| 43.727273
| 109
|
py
|
descqa
|
descqa-master/tests/test_descqa_utils.py
|
from __future__ import division
import numpy as np
from scipy.stats import chi2
import healpy as hp
from descqa.utils import *
def check_ra_dec_basic(ra, dec, n):
assert ra.size == n
assert dec.size == n
assert (ra <= 360.0).all()
assert (ra >= 0.0).all()
assert (dec <= 90.0).all()
assert (dec >= -90.0).all()
def check_ra_dec_uniform(ra, dec, nside=2, footprint=None):
pixels = hp.ang2pix(nside, ra, dec, lonlat=True)
npix = hp.nside2npix(nside) if footprint is None else footprint.size
pixels, counts = np.unique(pixels, return_counts=True)
assert pixels.size == npix
mean = ra.size / npix
assert chi2.sf(((counts - mean)**2.0 / mean).sum(), df=npix-1) > 1e-5
def test_generate_uniform_random_ra_dec_basic():
n = 10000
ra, dec = generate_uniform_random_ra_dec(n)
check_ra_dec_basic(ra, dec, n)
check_ra_dec_uniform(ra, dec)
def test_generate_uniform_random_ra_dec_footprint():
n = 10000
nside = 2
npix = hp.nside2npix(nside)
footprint = np.arange(npix)[np.random.randint(2, size=npix).astype(bool)]
ra, dec = generate_uniform_random_ra_dec_footprint(n, footprint, nside)
check_ra_dec_basic(ra, dec, n)
check_ra_dec_uniform(ra, dec, nside, footprint)
assert (get_healpixel_footprint(ra, dec, nside) == footprint).all()
| 1,327
| 29.181818
| 77
|
py
|
descqa
|
descqa-master/descqagen/app_mag_func_test/data/__init__.py
| 0
| 0
| 0
|
py
|
|
Transformers-From-Optimization
|
Transformers-From-Optimization-main/combination_energy.py
|
import torch as tc
import torch.nn as nn
import matplotlib.pyplot as plt
from tqdm import tqdm
from utils import set_random_seed
import pdb
import sklearn
import sklearn.decomposition
from matplotlib.patches import ConnectionPatch
set_random_seed(233)
def F_norm(Y):
return (Y ** 2).sum()
n = 500
d = 128
W1 = tc.randn(d,d) * 0.1
W2 = tc.randn(n,n) * 0.1
num_epochs = 200
YB1 = tc.rand(n,d)
YB2 = tc.rand(n,d)
class Model(nn.Module):
def __init__(self , Y0):
super().__init__()
self.Y = nn.Parameter( Y0 )
Y0 = tc.rand(n,d) * 2
model = Model( Y0 )
model_h = Model( Y0.detach().clone() )
optimizer_1 = tc.optim.SGD(model.parameters() , lr = 0.05)
optimizer_2 = tc.optim.SGD(model.parameters() , lr = 0.05)
optimizer_h = tc.optim.SGD(model_h.parameters() , lr = 0.01) # 用来寻找h的最优点
def ener_1(Y):
return F_norm(Y @ W1) + F_norm( Y - YB1 )
def ener_2(Y):
return F_norm(W2 @ Y) + F_norm( Y - YB2 )
mapW = tc.randn(2 , n*d)
def mapsto(Y):
Y = Y.view(-1,1)
Y = mapW @ Y
return Y
tot_losses = []
Y_trace = []
for epoch_id in tqdm( range(num_epochs) ):
Y = model.Y
if epoch_id % 2 == 0:
loss = ener_1(Y)
optimizer = optimizer_1
elif epoch_id % 2 == 1:
loss = ener_2(Y)
optimizer = optimizer_2
optimizer.zero_grad()
loss.backward()
optimizer.step()
with tc.no_grad():
tot_loss = ener_1(Y) + ener_2(Y)
tot_losses.append( tot_loss )
Y_trace.append( Y.data.clone().view(1,-1) )
# 求h的最优解
_pbar = tqdm(range(600))
for _ in _pbar:
loss_h = ener_1(model_h.Y) + ener_2(model_h.Y)
optimizer_h.zero_grad()
loss_h.backward()
optimizer_h.step()
_pbar.set_description("loss = %.4f" % loss_h)
Y_trace = tc.cat( Y_trace , dim = 0 ) # (num_epoch , n*d)
pca = sklearn.decomposition.PCA(2)
mapedYs = pca.fit_transform(Y_trace)
mapedh = tc.Tensor( pca.transform(model_h.Y.detach().view(1,-1)) ).view(-1)
tot_losses = tc.Tensor(tot_losses)
tot_losses = tc.log(tot_losses)
tot_losses = tot_losses - tot_losses.min()
# ---- 画轨迹 ----
# 小方框
xl , xr = mapedh[0]-4,mapedh[0]+4
yb , yt = mapedh[1]-4,mapedh[1]+4
fig = plt.figure(figsize=(12,5) , dpi=512)
p1 = plt.subplot(121)
p2 = plt.subplot(122)
p1.plot( mapedYs[:,0] , mapedYs[:,1] , zorder = 1 , label = "trace of $Y^{(t)}$")
p1.scatter( mapedh[0] , mapedh[1] , color = (1,0.4,0.1) , s = 40 , zorder = 2 , marker = "^" , label = "$Y_h^*$")
p1.scatter( mapedYs[0][0] , mapedYs[0][1] , color = (0.7,0.4,0.4) , s = 40 , zorder = 3 , marker = "*" , label = "$Y^{(0)}$")
p1.plot( [xl,xr,xr,xl,xl] , [yt,yt,yb,yb,yt] , color = (0.2,0.0,0.2,0.7))
p1.legend()
# p1.set_xlabel("x[0]")
# p1.set_ylabel("x[1]")
p2.plot( mapedYs[:,0] , mapedYs[:,1] , zorder = 1 , label = "trace of $Y^{(t)}$")
p2.scatter( mapedh[0] , mapedh[1] , color = (1,0.4,0.1) , s = 40 , zorder = 2 , marker = "^" , label = "$Y_h^*$")
p2.set_xlim(xl , xr)
p2.set_ylim(yb , yt)
p2.legend()
# p2.set_xlabel("x[0]")
# p2.set_ylabel("x[1]")
# 连接p1和p2
con1 = ConnectionPatch(
xyA = [xr,yt] , xyB = [xl,yt] ,
coordsA = "data" , coordsB = "data" ,
axesA = p1 , axesB = p2 ,
color = (0.2,0.0,0.2,0.7) , linestyle = "dashed"
)
con2 = ConnectionPatch(
xyA = [xr,yb] , xyB = [xl,yb] ,
coordsA = "data" , coordsB = "data" ,
axesA = p1 , axesB = p2 ,
color = (0.2,0.0,0.2,0.7) , linestyle = "dashed"
)
p1.add_artist(con1)
p1.add_artist(con2)
fig.tight_layout()
plt.savefig("generated_figures/alternate_trace.png")
# ---- 画能量函数图 ----
fig = plt.figure(figsize=(6,4) , dpi=512)
plt.plot( range(num_epochs) , tot_losses )
plt.xlabel("$t$" , fontsize = 15)
plt.ylabel("$\log E\\left(Y^{(t)}\\right) - \log E_{\\min}$" , fontsize = 15)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
fig.tight_layout()
plt.savefig("generated_figures/alternate_energy.png")
| 3,850
| 24.335526
| 125
|
py
|
Transformers-From-Optimization
|
Transformers-From-Optimization-main/divergence.py
|
import torch as tc
import torch.nn as nn
import matplotlib.pyplot as plt
from tqdm import tqdm
from utils import set_random_seed
import pdb
import sklearn
import sklearn.decomposition
import scipy.spatial as spt
import itertools
set_random_seed(23333)
def norm(x):
return (x ** 2).sum(-1) ** 0.5
W = tc.randn(2,2)
W = W @ W.t()
B = tc.randn(1,2)
def h(x):
return 0.5 * x.t() @ W @ x + 0.5 * norm(x)**2
def alphah(X):
alpha = 0.25
return alpha * X @ W + alpha * X - alpha * B
def div(X):
'''X: (n,2)'''
xi1 = alphah(X) # (n,2)
xi2 = X # (n,2)
r = xi2**2 - xi1**2
r [r >= 0] = 0
return r.sum(-1) / norm(xi1)**2
fig = plt.figure(figsize = (24,6) , dpi = 512 )
p1 = plt.subplot(141)
p2 = plt.subplot(142)
p3 = plt.subplot(143)
p4 = plt.subplot(144)
for thres , pl in zip([0.2 , 0.4 , 0.6, 0.8] , [p1,p2,p3,p4]):
X = tc.arange(-5,5 , 0.02)
Y = tc.arange(-5,5 , 0.02)
xys = tc.Tensor( list(itertools.product(X.numpy(),Y.numpy())) )
idx = div(xys) >= -thres
xys = xys[idx]
pl.scatter(xys[:,0] , xys[:,1] , s = 4)
pl.set_xlim(-5,5)
pl.set_ylim(-5,5)
for tick in pl.xaxis.get_major_ticks():
tick.label.set_fontsize(15)
for tick in pl.yaxis.get_major_ticks():
tick.label.set_fontsize(15)
# pl.set_xticks(fontsize=15)
# pl.set_yticks(fontsize=15)
pl.set_title("$\kappa={0}$".format(thres) , fontsize = 18)
fig.tight_layout()
# plt.show()
plt.savefig("generated_figures/divergence.png")
| 1,504
| 19.902778
| 67
|
py
|
Transformers-From-Optimization
|
Transformers-From-Optimization-main/utils.py
|
import random
import torch as tc
import numpy as np
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
tc.manual_seed(seed)
tc.cuda.manual_seed_all(seed)
tc.backends.cudnn.deterministic = True
tc.backends.cudnn.benchmark = False
| 269
| 21.5
| 42
|
py
|
Transformers-From-Optimization
|
Transformers-From-Optimization-main/apollo_circle.py
|
import torch as tc
import torch.nn as nn
import matplotlib.pyplot as plt
from tqdm import tqdm
from utils import set_random_seed
import pdb
import sklearn
import sklearn.decomposition
import scipy.spatial as spt
import itertools
set_random_seed(23333)
def norm(x):
return (x ** 2).sum(-1) ** 0.5
def paint(xf,xh,C):
fig = plt.figure(figsize = (6,6) , dpi=512)
X = tc.arange(-3.5,3.5 , 0.02)
Y = tc.arange(-3.5,3.5 , 0.02)
xys = tc.Tensor( list(itertools.product(X.numpy(),Y.numpy())) )
idx = norm( xys - xf ) / norm( xys - xh ) <= C
xys = xys[idx]
plt.scatter(xys[:,0] , xys[:,1] , s = 4)
plt.scatter(xf[0,0] , xf[0,1] , s = 122 , color = (0.4,0,0) , marker = "^")
plt.scatter(xh[0,0] , xh[0,1] , s = 122 , color = (0,0.4,0) , marker = "^")
plt.xlim(-3.5,3.5)
plt.ylim(-3.5,3.5)
# plt.xlabel("",fontsize = 15)
# plt.ylabel("",fontsize = 15)
plt.xticks(fontsize = 15)
plt.yticks(fontsize = 15)
plt.text( xf[0,0]-0.3 , xf[0,1]-0.5 , s = "$\mathbf{y}_f^*$" , fontsize = 18)
plt.text( xh[0,0]-0.3 , xh[0,1]-0.5 , s = "$\mathbf{y}_h^*$" , fontsize = 18)
fig.tight_layout()
plt.savefig("generated_figures/apollo_C={0}.png".format(C))
# plt.show()
xf = tc.randn(2).view(1,-1)
xh = tc.randn(2).view(1,-1)
paint(xf,xh,0.7)
paint(xf,xh,1.5)
print ("xf = {0}, xh = {1}".format(xf,xh))
| 1,370
| 25.365385
| 81
|
py
|
Transformers-From-Optimization
|
Transformers-From-Optimization-main/energy_curve/main.py
|
from model import Transformer
import torch as tc
import torch.nn as nn
import torch.nn.functional as F
from fastNLP.io import IMDBLoader
from fastNLP.io import IMDBPipe
from fastNLP.embeddings import StaticEmbedding
import pdb
import pickle
from pathlib import Path
import matplotlib.pyplot as plt
from load_data import load_data
from paint import paint
from tqdm import tqdm
import numpy as np
import torch as tc
import random
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
tc.manual_seed(seed)
tc.cuda.manual_seed_all(seed)
tc.backends.cudnn.deterministic = True
tc.backends.cudnn.benchmark = False
set_random_seed(2333)
dataset_names = ["imdb" , "sst2"]
for dataset_name in dataset_names:
norelu_name = ""
relu_name = ""
if dataset_name == "imdb":
norelu_name = "norelu"
relu_name = "relu"
else:
norelu_name = "norelu_%s" % dataset_name
relu_name = "relu_%s" % dataset_name
data_bundle , word2vec_embed = load_data(dataset_name)
train_data = data_bundle.get_dataset("train")
d = word2vec_embed.embedding_dim
num_layers = 12
model_1 = Transformer(d , num_layers , 2)
model_1.normalize_weight()
paint(model_1 , "../generated_figures/%s.png" % norelu_name)
model_2 = Transformer(d , num_layers , 2)
model_2.normalize_weight()
paint(model_2 , "../generated_figures/%s.png" % relu_name)
| 1,428
| 24.517857
| 64
|
py
|
Transformers-From-Optimization
|
Transformers-From-Optimization-main/energy_curve/paint.py
|
from model import Transformer
import torch as tc
import torch.nn as nn
import torch.nn.functional as F
from fastNLP.io import IMDBLoader
from fastNLP.io import IMDBPipe
from fastNLP.embeddings import StaticEmbedding
import pdb
import pickle
from pathlib import Path
import matplotlib.pyplot as plt
from load_data import load_data
from tqdm import tqdm
import random
num_test_epoch = 200
def paint(model , savepath = None):
model = model.eval()
data_bundle , word2vec_embed = load_data()
datas = data_bundle.get_dataset("test")
eners_tot = [ ]
for data_idx in tqdm( random.sample( list(range(len(datas))) , num_test_epoch) ):
Y = word2vec_embed(datas[data_idx]["words"])
eners = model(Y)["ener"]
eners = tc.Tensor(eners).view(-1)
eners_tot.append(eners)
eners_tot = tc.cat( [e.view(-1 , 1) for e in eners_tot] , dim = -1 )
eners_tot = eners_tot - eners_tot.min()
# eners_tot = eners_tot / eners_tot.max()
fig = plt.figure(figsize=(8,4))
plt.plot( range(model.num_layers + 1) , eners_tot.mean(dim = -1) )
pre_bp = plt.boxplot(
eners_tot ,
positions = list(range(model.num_layers + 1)),
showfliers = False
)
res = {key : [v.get_data() for v in value] for key, value in pre_bp.items()}
whiskers = res["whiskers"]
whisker_min = min( [ whiskers[i][1].min() for i in range(len(whiskers))] )
# plt.cla()
plt.close()
fig = plt.figure(figsize=(8,4))
eners_tot = eners_tot - float( whisker_min )
plt.plot( range(model.num_layers + 1) , eners_tot.mean(dim = -1) )
plt.boxplot(
eners_tot ,
positions = list(range(model.num_layers + 1)),
showfliers = False
)
plt.xlabel("$t$ (layer index)" , fontsize = 15)
plt.ylabel("$E\\left(Y^{(t)}\\right) - E_{\\min}$" , fontsize = 15)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.ticklabel_format(style='sci', scilimits=(-1,2), axis='y')
fig.tight_layout()
if savepath is None:
plt.show()
else:
plt.savefig(savepath)
| 2,092
| 28.069444
| 85
|
py
|
Transformers-From-Optimization
|
Transformers-From-Optimization-main/energy_curve/model.py
|
import torch as tc
import torch.nn as nn
import torch.nn.functional as F
import pdb
alpha_1 = 1
alpha_2 = 1
def norm2(X):
return (X ** 2).sum()
def inner(x,y):
return (x.view(-1) * y.view(-1)).sum()
idxs_cache = {}
class Attention(nn.Module):
def __init__(self , d):
super().__init__()
self.d = d
self._W = nn.Parameter( tc.zeros(d,d) )
self.reset_params()
def reset_params(self):
with tc.no_grad():
nn.init.xavier_normal_(self._W.data)
def normalize_weight(self):
pass
@property
def W(self):
return self._W.t() @ self._W
def get_energy(self , Y):
Y = Y @ self._W
n = Y.size(0)
rho = lambda x: - tc.exp( - x )
with tc.no_grad():
ener = 0
# 这他妈太慢了
# for i in range(n):
# for j in range(i):
# ener = ener + rho( 0.5 * norm2(Y[i] - Y[j]) )
if idxs_cache.get(n) is None:
idxs_cache[n] = {
"idxs_i": tc.LongTensor( [ i for i in range(n) for j in range(i)] ) ,
"idxs_j": tc.LongTensor( [ j for i in range(n) for j in range(i)] ) ,
}
idxs_i = idxs_cache[n]["idxs_i"]
idxs_j = idxs_cache[n]["idxs_j"]
ener_rho = rho( 0.5 * ((Y[idxs_i] - Y[idxs_j])**2).sum(-1) ).sum()
ener = ener_rho + 0.5 * norm2(Y)
return ener
def forward(self , Y):
'''
Y: (n,d)
'''
n , d = Y.size(0) , Y.size(1)
beta = -0.5 * ((Y @ self._W) ** 2).sum(-1)
# if not self.training:
# pdb.set_trace()
A = tc.softmax( Y @ self.W @ Y.t() , -1 )
Z = (1-alpha_1) * Y + alpha_1 * A @ Y
return Z
class FFN(nn.Module):
def __init__(self , d):
super().__init__()
self.d = d
self._Wf = nn.Parameter( tc.zeros(d,d) )
self.B = nn.Parameter( tc.zeros(1,d) )
self.reset_params()
def reset_params(self):
with tc.no_grad():
nn.init.xavier_normal_(self._Wf.data)
nn.init.xavier_normal_(self.B.data)
def normalize_weight(self):
with tc.no_grad():
W = self._Wf.data
W = W @ W.t()
L , U = tc.linalg.eigh(W) # W = U @ L.diag() @ U.t()
L[L > 0.95] = 0.95
L[L < -0.95] = -0.95
W = U @ L.diag() @ U.t()
self._Wf.data = W
@property
def Wf(self):
return - 0.5 * alpha_2 * (self._Wf + self._Wf.t()) + (1-alpha_2) * tc.eye(self.d)
def get_energy(self , Y):
with tc.no_grad():
return 0.5 * tc.trace(Y @ self._Wf @ Y.t()) + 0.5 * norm2(Y - self.B)
def forward(self , Y):
'''
Y: (n,d)
'''
Y = Y @ self.Wf + self.B
return Y
class TransformerLayer(nn.Module):
def __init__(self , d , relu):
super().__init__()
self.d = d
self.relu = relu
self.attn = Attention(self.d)
self.ffn = FFN(self.d)
def get_energy(self , Y):
ener = 0
ener = ener + self.attn.get_energy(Y)
ener = ener + self.ffn.get_energy(Y)
return ener
def normalize_weight(self):
self.attn.normalize_weight()
self.ffn.normalize_weight()
def forward(self , Y):
Y = self.attn(Y)
Y = self.ffn(Y)
if self.relu:
Y = F.relu(Y)
return Y
class Transformer(nn.Module):
def __init__(self , d , num_layers , output_size = 2 , relu = False):
super().__init__()
self.d = d
self.num_layers = num_layers
self.relu = relu
self.rec_layer = TransformerLayer(self.d , self.relu)
self.output = nn.Linear(d , output_size)
def normalize_weight(self):
self.rec_layer.normalize_weight()
def get_energy(self , Y ):
return self.rec_layer.get_energy(Y)
def forward(self , Y):
# if not self.training:
# pdb.set_trace()
energies = [ self.get_energy(Y) ]
for layer_idx in range(self.num_layers):
Y = self.rec_layer(Y)
energies.append( self.get_energy(Y) )
output = self.output(Y)
return {
"repr": Y ,
"ener": energies ,
"pred": output ,
}
| 4,459
| 21.989691
| 90
|
py
|
Transformers-From-Optimization
|
Transformers-From-Optimization-main/energy_curve/load_data.py
|
from model import Transformer
import torch as tc
import torch.nn as nn
import torch.nn.functional as F
from fastNLP.io import IMDBLoader , SST2Loader
from fastNLP.io import IMDBPipe , SST2Pipe
from fastNLP.embeddings import StaticEmbedding
import pdb
import pickle
from pathlib import Path
import matplotlib.pyplot as plt
def load_data(data_name = "imdb"):
if data_name == "imdb":
save_path = Path("./chach_datas.pkl")
if not save_path.exists():
loader = IMDBLoader()
pipe = IMDBPipe()
data_bundle = pipe.process( loader.load(loader.download()) )
word_vocab = data_bundle.get_vocab("words")
word2vec_embed = StaticEmbedding(word_vocab, model_dir_or_name = "en")
with open(save_path , "wb") as fil:
pickle.dump([data_bundle , word2vec_embed] , fil)
else:
with open(save_path , "rb") as fil:
data_bundle , word2vec_embed = pickle.load(fil)
return data_bundle , word2vec_embed
save_path = Path("./chach_datas_sst2.pkl")
if not save_path.exists():
loader = SST2Loader()
pipe = SST2Pipe()
data_bundle = pipe.process( loader.load(loader.download()) )
word_vocab = data_bundle.get_vocab("words")
word2vec_embed = StaticEmbedding(word_vocab, model_dir_or_name = "en")
with open(save_path , "wb") as fil:
pickle.dump([data_bundle , word2vec_embed] , fil)
else:
with open(save_path , "rb") as fil:
data_bundle , word2vec_embed = pickle.load(fil)
return data_bundle , word2vec_embed
| 1,638
| 29.351852
| 82
|
py
|
pybullet-gym
|
pybullet-gym-master/setup.py
|
from setuptools import setup, find_packages
import sys, os.path
# Don't import gym module here, since deps may not be installed
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'pybulletgym'))
VERSION = 0.1
setup_py_dir = os.path.dirname(os.path.realpath(__file__))
need_files = []
datadir = "pybulletgym/envs/assets"
hh = setup_py_dir + "/" + datadir
for root, dirs, files in os.walk(hh):
for fn in files:
ext = os.path.splitext(fn)[1][1:]
if ext and ext in 'png gif jpg urdf sdf obj mtl dae off stl STL xml '.split():
fn = root + "/" + fn
need_files.append(fn[1+len(hh):])
setup(name='pybulletgym',
version=VERSION,
description='PyBullet Gym is an open-source implementation of the OpenAI Gym MuJoCo environments for use with the OpenAI Gym Reinforcement Learning Research Platform in support of open research.',
url='https://github.com/benelot/pybullet-gym',
author='Benjamin Ellenberger',
author_email='be.ellenberger@gmail.com',
license='',
packages=[package for package in find_packages()
if package.startswith('pybulletgym')],
zip_safe=False,
install_requires=[
'pybullet>=1.7.8',
],
package_data={'pybulletgym': need_files},
)
| 1,288
| 32.921053
| 202
|
py
|
pybullet-gym
|
pybullet-gym-master/__init__.py
| 0
| 0
| 0
|
py
|
|
pybullet-gym
|
pybullet-gym-master/pybulletgym/__init__.py
|
from pybulletgym.envs import register # this is included to trigger env loading
| 80
| 80
| 80
|
py
|
pybullet-gym
|
pybullet-gym-master/pybulletgym/envs/gym_utils.py
|
import inspect
import os
from pybulletgym.envs.roboschool.robots.robot_bases import BodyPart
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
os.sys.path.insert(0, parentdir)
import pybullet_data
def get_cube(p, x, y, z):
body = p.loadURDF(os.path.join(pybullet_data.getDataPath(), "cube_small.urdf"), [x, y, z])
p.changeDynamics(body, -1, mass=1.2) # match Roboschool
part_name, _ = p.getBodyInfo(body)
part_name = part_name.decode("utf8")
bodies = [body]
return BodyPart(p, part_name, bodies, 0, -1)
def get_sphere(p, x, y, z):
body = p.loadURDF(os.path.join(pybullet_data.getDataPath(), "sphere2red_nocol.urdf"), [x, y, z])
part_name, _ = p.getBodyInfo(body)
part_name = part_name.decode("utf8")
bodies = [body]
return BodyPart(p, part_name, bodies, 0, -1)
| 889
| 31.962963
| 100
|
py
|
pybullet-gym
|
pybullet-gym-master/pybulletgym/envs/__init__.py
|
from gym.envs.registration import register
# roboschool envs
## pendula
register(
id='InvertedPendulumPyBulletEnv-v0',
entry_point='pybulletgym.envs.roboschool.envs.pendulum.inverted_pendulum_env:InvertedPendulumBulletEnv',
max_episode_steps=1000,
reward_threshold=950.0,
)
register(
id='InvertedDoublePendulumPyBulletEnv-v0',
entry_point='pybulletgym.envs.roboschool.envs.pendulum.inverted_double_pendulum_env:InvertedDoublePendulumBulletEnv',
max_episode_steps=1000,
reward_threshold=9100.0,
)
register(
id='InvertedPendulumSwingupPyBulletEnv-v0',
entry_point='pybulletgym.envs.roboschool.envs.pendulum.inverted_pendulum_env:InvertedPendulumSwingupBulletEnv',
max_episode_steps=1000,
reward_threshold=800.0,
)
## manipulators
register(
id='ReacherPyBulletEnv-v0',
entry_point='pybulletgym.envs.roboschool.envs.manipulation.reacher_env:ReacherBulletEnv',
max_episode_steps=150,
reward_threshold=18.0,
)
register(
id='PusherPyBulletEnv-v0',
entry_point='pybulletgym.envs.roboschool.envs.manipulation.pusher_env:PusherBulletEnv',
max_episode_steps=150,
reward_threshold=18.0,
)
register(
id='ThrowerPyBulletEnv-v0',
entry_point='pybulletgym.envs.roboschool.envs.manipulation.thrower_env:ThrowerBulletEnv',
max_episode_steps=100,
reward_threshold=18.0,
)
register(
id='StrikerPyBulletEnv-v0',
entry_point='pybulletgym.envs.roboschool.envs.manipulation.striker_env:StrikerBulletEnv',
max_episode_steps=100,
reward_threshold=18.0,
)
## locomotors
register(
id='Walker2DPyBulletEnv-v0',
entry_point='pybulletgym.envs.roboschool.envs.locomotion.walker2d_env:Walker2DBulletEnv',
max_episode_steps=1000,
reward_threshold=2500.0
)
register(
id='HalfCheetahPyBulletEnv-v0',
entry_point='pybulletgym.envs.roboschool.envs.locomotion.half_cheetah_env:HalfCheetahBulletEnv',
max_episode_steps=1000,
reward_threshold=3000.0
)
register(
id='AntPyBulletEnv-v0',
entry_point='pybulletgym.envs.roboschool.envs.locomotion.ant_env:AntBulletEnv',
max_episode_steps=1000,
reward_threshold=2500.0
)
register(
id='HopperPyBulletEnv-v0',
entry_point='pybulletgym.envs.roboschool.envs.locomotion.hopper_env:HopperBulletEnv',
max_episode_steps=1000,
reward_threshold=2500.0
)
register(
id='HumanoidPyBulletEnv-v0',
entry_point='pybulletgym.envs.roboschool.envs.locomotion.humanoid_env:HumanoidBulletEnv',
max_episode_steps=1000
)
register(
id='HumanoidFlagrunPyBulletEnv-v0',
entry_point='pybulletgym.envs.roboschool.envs.locomotion.humanoid_flagrun_env:HumanoidFlagrunBulletEnv',
max_episode_steps=1000,
reward_threshold=2000.0
)
register(
id='HumanoidFlagrunHarderPyBulletEnv-v0',
entry_point='pybulletgym.envs.roboschool.envs.locomotion.humanoid_flagrun_env:HumanoidFlagrunHarderBulletEnv',
max_episode_steps=1000
)
register(
id='AtlasPyBulletEnv-v0',
entry_point='pybulletgym.envs.roboschool.envs.locomotion.atlas_env:AtlasBulletEnv',
max_episode_steps=1000
)
# mujoco envs
register(
id='InvertedPendulumMuJoCoEnv-v0',
entry_point='pybulletgym.envs.mujoco.envs.pendulum.inverted_pendulum_env:InvertedPendulumMuJoCoEnv',
max_episode_steps=1000,
reward_threshold=950.0,
)
register(
id='InvertedDoublePendulumMuJoCoEnv-v0',
entry_point='pybulletgym.envs.mujoco.envs.pendulum.inverted_double_pendulum_env:InvertedDoublePendulumMuJoCoEnv',
max_episode_steps=1000,
reward_threshold=9100.0,
)
register(
id='Walker2DMuJoCoEnv-v0',
entry_point='pybulletgym.envs.mujoco.envs.locomotion.walker2d_env:Walker2DMuJoCoEnv',
max_episode_steps=1000,
reward_threshold=2500.0
)
register(
id='HalfCheetahMuJoCoEnv-v0',
entry_point='pybulletgym.envs.mujoco.envs.locomotion.half_cheetah_env:HalfCheetahMuJoCoEnv',
max_episode_steps=1000,
reward_threshold=3000.0
)
register(
id='AntMuJoCoEnv-v0',
entry_point='pybulletgym.envs.mujoco.envs.locomotion.ant_env:AntMuJoCoEnv',
max_episode_steps=1000,
reward_threshold=2500.0
)
register(
id='HopperMuJoCoEnv-v0',
entry_point='pybulletgym.envs.mujoco.envs.locomotion.hopper_env:HopperMuJoCoEnv',
max_episode_steps=1000,
reward_threshold=2500.0
)
register(
id='HumanoidMuJoCoEnv-v0',
entry_point='pybulletgym.envs.mujoco.envs.locomotion.humanoid_env:HumanoidMuJoCoEnv',
max_episode_steps=1000
)
def get_list():
envs = ['- ' + spec.id for spec in gym.pgym.envs.registry.all() if spec.id.find('Bullet') >= 0 or spec.id.find('MuJoCo') >= 0]
return envs
| 4,367
| 26.130435
| 127
|
py
|
pybullet-gym
|
pybullet-gym-master/pybulletgym/envs/assets/__init__.py
| 0
| 0
| 0
|
py
|
|
pybullet-gym
|
pybullet-gym-master/pybulletgym/envs/mujoco/__init__.py
| 0
| 0
| 0
|
py
|
|
pybullet-gym
|
pybullet-gym-master/pybulletgym/envs/mujoco/envs/env_bases.py
|
import gym, gym.spaces, gym.utils, gym.utils.seeding
import numpy as np
import pybullet
from pybullet_utils import bullet_client
from pkg_resources import parse_version
class BaseBulletEnv(gym.Env):
"""
Base class for Bullet physics simulation environments in a Scene.
These environments create single-player scenes and behave like normal Gym environments, if
you don't use multiplayer.
"""
metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 60
}
def __init__(self, robot, render=False):
self.scene = None
self.physicsClientId = -1
self.ownsPhysicsClient = 0
self.camera = Camera()
self.isRender = render
self.robot = robot
self._seed()
self._cam_dist = 3
self._cam_yaw = 0
self._cam_pitch = -30
self._render_width = 320
self._render_height = 240
self.action_space = robot.action_space
self.observation_space = robot.observation_space
def configure(self, args):
self.robot.args = args
def _seed(self, seed=None):
self.np_random, seed = gym.utils.seeding.np_random(seed)
self.robot.np_random = self.np_random # use the same np_randomizer for robot as for env
return [seed]
def _reset(self):
if self.physicsClientId < 0:
self.ownsPhysicsClient = True
if self.isRender:
self._p = bullet_client.BulletClient(connection_mode=pybullet.GUI)
else:
self._p = bullet_client.BulletClient()
self.physicsClientId = self._p._client
self._p.configureDebugVisualizer(pybullet.COV_ENABLE_GUI,0)
if self.scene is None:
self.scene = self.create_single_player_scene(self._p)
if not self.scene.multiplayer and self.ownsPhysicsClient:
self.scene.episode_restart(self._p)
self.robot.scene = self.scene
self.frame = 0
self.done = 0
self.reward = 0
dump = 0
s = self.robot.reset(self._p)
self.potential = self.robot.calc_potential()
return s
def _render(self, mode, close=False):
if mode == "human":
self.isRender = True
if mode != "rgb_array":
return np.array([])
base_pos = [0, 0, 0]
if hasattr(self,'robot'):
if hasattr(self.robot,'body_xyz'):
base_pos = self.robot.body_xyz
view_matrix = self._p.computeViewMatrixFromYawPitchRoll(
cameraTargetPosition=base_pos,
distance=self._cam_dist,
yaw=self._cam_yaw,
pitch=self._cam_pitch,
roll=0,
upAxisIndex=2)
proj_matrix = self._p.computeProjectionMatrixFOV(
fov=60, aspect=float(self._render_width)/self._render_height,
nearVal=0.1, farVal=100.0)
(_, _, px, _, _) = self._p.getCameraImage(
width = self._render_width, height=self._render_height, viewMatrix=view_matrix,
projectionMatrix=proj_matrix,
renderer=pybullet.ER_BULLET_HARDWARE_OPENGL
)
rgb_array = np.array(px)
rgb_array = rgb_array[:, :, :3]
return rgb_array
def _close(self):
if self.ownsPhysicsClient:
if self.physicsClientId >= 0:
self._p.disconnect()
self.physicsClientId = -1
def HUD(self, state, a, done):
pass
# backwards compatibility for gym >= v0.9.x
# for extension of this class.
def step(self, *args, **kwargs):
return self._step(*args, **kwargs)
if parse_version(gym.__version__)>=parse_version('0.9.6'):
close = _close
render = _render
reset = _reset
seed = _seed
class Camera:
def __init__(self):
pass
def move_and_look_at(self,i,j,k,x,y,z):
lookat = [x,y,z]
distance = 10
yaw = 10
self._p.resetDebugVisualizerCamera(distance, yaw, -20, lookat)
| 3,411
| 24.654135
| 91
|
py
|
pybullet-gym
|
pybullet-gym-master/pybulletgym/envs/mujoco/envs/__init__.py
| 0
| 0
| 0
|
py
|
|
pybullet-gym
|
pybullet-gym-master/pybulletgym/envs/mujoco/envs/manipulation/pusher_env.py
|
from pybulletgym.envs.mujoco.envs.env_bases import BaseBulletEnv
from pybulletgym.envs.mujoco.robots.manipulators.pusher import Pusher
from pybulletgym.envs.mujoco.scenes.scene_bases import SingleRobotEmptyScene
class PusherBulletEnv(BaseBulletEnv):
def __init__(self):
self.robot = Pusher()
BaseBulletEnv.__init__(self, self.robot)
def create_single_player_scene(self, bullet_client):
return SingleRobotEmptyScene(bullet_client, gravity=9.81, timestep=0.0020, frame_skip=5)
def _step(self, a):
self.robot.apply_action(a)
self.scene.global_step()
state = self.robot.calc_state() # sets self.to_target_vec
potential_old = self.potential
self.potential = self.robot.calc_potential()
joint_vel = np.array([
self.robot.shoulder_pan_joint.get_velocity(),
self.robot.shoulder_lift_joint.get_velocity(),
self.robot.upper_arm_roll_joint.get_velocity(),
self.robot.elbow_flex_joint.get_velocity(),
self.robot.forearm_roll_joint.get_velocity(),
self.robot.wrist_flex_joint.get_velocity(),
self.robot.wrist_roll_joint.get_velocity()
])
action_product = np.matmul(np.abs(a), np.abs(joint_vel))
action_sum = np.sum(a)
electricity_cost = (
-0.10 * action_product # work torque*angular_velocity
- 0.01 * action_sum # stall torque require some energy
)
stuck_joint_cost = 0
for j in self.robot.ordered_joints:
if np.abs(j.current_relative_position()[0]) - 1 < 0.01:
stuck_joint_cost += -0.1
self.rewards = [float(self.potential - potential_old), float(electricity_cost), float(stuck_joint_cost)]
self.HUD(state, a, False)
return state, sum(self.rewards), False, {}
def calc_potential(self):
return -100 * np.linalg.norm(self.to_target_vec)
def camera_adjust(self):
x, y, z = self.robot.fingertip.pose().xyz()
x *= 0.5
y *= 0.5
self.camera.move_and_look_at(0.3, 0.3, 0.3, x, y, z)
| 2,136
| 35.844828
| 112
|
py
|
pybullet-gym
|
pybullet-gym-master/pybulletgym/envs/mujoco/envs/manipulation/reacher_env.py
|
from pybulletgym.envs.mujoco.envs.env_bases import BaseBulletEnv
from pybulletgym.envs.mujoco.robots.manipulators.reacher import Reacher
from pybulletgym.envs.mujoco.scenes.scene_bases import SingleRobotEmptyScene
import numpy as np
class ReacherBulletEnv(BaseBulletEnv):
def __init__(self):
self.robot = Reacher()
BaseBulletEnv.__init__(self, self.robot)
def create_single_player_scene(self, bullet_client):
return SingleRobotEmptyScene(bullet_client, gravity=0.0, timestep=0.0165, frame_skip=1)
def _step(self, a):
assert (not self.scene.multiplayer)
self.robot.apply_action(a)
self.scene.global_step()
state = self.robot.calc_state() # sets self.to_target_vec
potential_old = self.potential
self.potential = self.robot.calc_potential()
electricity_cost = (
-0.10 * (np.abs(a[0] * self.robot.theta_dot) + np.abs(a[1] * self.robot.gamma_dot)) # work torque*angular_velocity
- 0.01 * (np.abs(a[0]) + np.abs(a[1])) # stall torque require some energy
)
stuck_joint_cost = -0.1 if np.abs(np.abs(self.robot.gamma) - 1) < 0.01 else 0.0
self.rewards = [float(self.potential - potential_old), float(electricity_cost), float(stuck_joint_cost)]
self.HUD(state, a, False)
return state, sum(self.rewards), False, {}
def camera_adjust(self):
x, y, z = self.robot.fingertip.pose().xyz()
x *= 0.5
y *= 0.5
self.camera.move_and_look_at(0.3, 0.3, 0.3, x, y, z)
| 1,556
| 38.923077
| 131
|
py
|
pybullet-gym
|
pybullet-gym-master/pybulletgym/envs/mujoco/envs/manipulation/striker_env.py
|
from pybulletgym.envs.mujoco.envs.env_bases import BaseBulletEnv
from pybulletgym.envs.mujoco.robots.manipulators.striker import Striker
from pybulletgym.envs.mujoco.scenes.scene_bases import SingleRobotEmptyScene
import numpy as np
class StrikerBulletEnv(BaseBulletEnv):
def __init__(self):
self.robot = Striker()
BaseBulletEnv.__init__(self, self.robot)
self._striked = False
self._min_strike_dist = np.inf
self.strike_threshold = 0.1
def create_single_player_scene(self, bullet_client):
return SingleRobotEmptyScene(bullet_client, gravity=9.81, timestep=0.0020, frame_skip=5)
def _step(self, a):
self.robot.apply_action(a)
self.scene.global_step()
state = self.robot.calc_state() # sets self.to_target_vec
potential_old = self.potential
self.potential = self.robot.calc_potential()
joint_vel = np.array([
self.robot.shoulder_pan_joint.get_velocity(),
self.robot.shoulder_lift_joint.get_velocity(),
self.robot.upper_arm_roll_joint.get_velocity(),
self.robot.elbow_flex_joint.get_velocity(),
self.robot.upper_arm_roll_joint.get_velocity(),
self.robot.wrist_flex_joint.get_velocity(),
self.robot.wrist_roll_joint.get_velocity()
])
action_product = np.matmul(np.abs(a), np.abs(joint_vel))
action_sum = np.sum(a)
electricity_cost = (
-0.10 * action_product # work torque*angular_velocity
- 0.01 * action_sum # stall torque require some energy
)
stuck_joint_cost = 0
for j in self.robot.ordered_joints:
if np.abs(j.current_relative_position()[0]) - 1 < 0.01:
stuck_joint_cost += -0.1
dist_object_finger = self.robot.object.pose().xyz() - self.robot.fingertip.pose().xyz()
reward_dist_vec = self.robot.object.pose().xyz() - self.robot.target.pose().xyz() # TODO: Should the object and target really belong to the robot? Maybe split this off
self._min_strike_dist = min(self._min_strike_dist, np.linalg.norm(reward_dist_vec))
if np.linalg.norm(dist_object_finger) < self.strike_threshold:
self._striked = True
self._strike_pos = self.robot.fingertip.pose().xyz()
if self._striked:
reward_near_vec = self.robot.object.pose().xyz() - self._strike_pos
else:
reward_near_vec = self.robot.object.pose().xyz() - self.robot.fingertip.pose().xyz()
reward_near = - np.linalg.norm(reward_near_vec)
reward_dist = - np.linalg.norm(self._min_strike_dist)
reward_ctrl = - np.square(a).sum()
self.rewards = [float(self.potential - potential_old), float(electricity_cost), float(stuck_joint_cost),
3 * reward_dist, 0.1 * reward_ctrl, 0.5 * reward_near]
self.HUD(state, a, False)
return state, sum(self.rewards), False, {}
def calc_potential(self):
return -100 * np.linalg.norm(self.to_target_vec)
def camera_adjust(self):
x, y, z = self.robot.fingertip.pose().xyz()
x *= 0.5
y *= 0.5
self.camera.move_and_look_at(0.3, 0.3, 0.3, x, y, z)
| 3,262
| 39.283951
| 176
|
py
|
pybullet-gym
|
pybullet-gym-master/pybulletgym/envs/mujoco/envs/manipulation/__init__.py
| 0
| 0
| 0
|
py
|
|
pybullet-gym
|
pybullet-gym-master/pybulletgym/envs/mujoco/envs/manipulation/thrower_env.py
|
from pybulletgym.envs.mujoco.envs.env_bases import BaseBulletEnv
from pybulletgym.envs.mujoco.robots.manipulators.thrower import Thrower
from pybulletgym.envs.mujoco.scenes.scene_bases import SingleRobotEmptyScene
import numpy as np
class ThrowerBulletEnv(BaseBulletEnv):
def __init__(self):
self.robot = Thrower()
BaseBulletEnv.__init__(self, self.robot)
def create_single_player_scene(self, bullet_client):
return SingleRobotEmptyScene(bullet_client, gravity=0.0, timestep=0.0020, frame_skip=5)
def _step(self, a):
self.robot.apply_action(a)
self.scene.global_step()
state = self.robot.calc_state() # sets self.to_target_vec
potential_old = self.potential
self.potential = self.robot.calc_potential()
joint_vel = np.array([
self.robot.shoulder_pan_joint.get_velocity(),
self.robot.shoulder_lift_joint.get_velocity(),
self.robot.upper_arm_roll_joint.get_velocity(),
self.robot.elbow_flex_joint.get_velocity(),
self.robot.upper_arm_roll_joint.get_velocity(),
self.robot.wrist_flex_joint.get_velocity(),
self.robot.wrist_roll_joint.get_velocity()
])
action_product = np.matmul(np.abs(a), np.abs(joint_vel))
action_sum = np.sum(a)
electricity_cost = (
-0.10 * action_product # work torque*angular_velocity
- 0.01 * action_sum # stall torque require some energy
)
stuck_joint_cost = 0
for j in self.robot.ordered_joints:
if np.abs(j.current_relative_position()[0]) - 1 < 0.01:
stuck_joint_cost += -0.1
object_xy = self.robot.object.pose().xyz()[:2]
target_xy = self.robot.target.pose().xyz()[:2]
if not self.robot._object_hit_ground and self.robot.object.pose().xyz()[2] < -0.25: # TODO: Should the object and target really belong to the robot? Maybe split this off
self.robot._object_hit_ground = True
self.robot._object_hit_location = self.robot.object.pose().xyz()
if self.robot._object_hit_ground:
object_hit_xy = self.robot._object_hit_location[:2]
reward_dist = -np.linalg.norm(object_hit_xy - target_xy)
else:
reward_dist = -np.linalg.norm(object_xy - target_xy)
reward_ctrl = - np.square(a).sum()
self.rewards = [float(self.potential - potential_old), float(electricity_cost), float(stuck_joint_cost),
reward_dist, 0.002 * reward_ctrl]
self.HUD(state, a, False)
return state, sum(self.rewards), False, {}
def camera_adjust(self):
x, y, z = self.robot.fingertip.pose().xyz()
x *= 0.5
y *= 0.5
self.camera.move_and_look_at(0.3, 0.3, 0.3, x, y, z)
| 2,849
| 39.140845
| 182
|
py
|
pybullet-gym
|
pybullet-gym-master/pybulletgym/envs/mujoco/envs/locomotion/ant_env.py
|
from pybulletgym.envs.mujoco.envs.locomotion.walker_base_env import WalkerBaseMuJoCoEnv
from pybulletgym.envs.mujoco.robots.locomotors.ant import Ant
class AntMuJoCoEnv(WalkerBaseMuJoCoEnv):
def __init__(self):
self.robot = Ant()
WalkerBaseMuJoCoEnv.__init__(self, self.robot)
| 299
| 32.333333
| 87
|
py
|
pybullet-gym
|
pybullet-gym-master/pybulletgym/envs/mujoco/envs/locomotion/walker_base_env.py
|
from pybulletgym.envs.mujoco.envs.env_bases import BaseBulletEnv
from pybulletgym.envs.roboschool.scenes import StadiumScene
import pybullet as p
import numpy as np
class WalkerBaseMuJoCoEnv(BaseBulletEnv):
def __init__(self, robot, render=False):
print("WalkerBase::__init__")
BaseBulletEnv.__init__(self, robot, render)
self.camera_x = 0
self.walk_target_x = 1e3 # kilometer away
self.walk_target_y = 0
self.stateId=-1
def create_single_player_scene(self, bullet_client):
self.stadium_scene = StadiumScene(bullet_client, gravity=9.8, timestep=0.0165/4, frame_skip=4)
return self.stadium_scene
def reset(self):
if self.stateId >= 0:
# print("restoreState self.stateId:",self.stateId)
self._p.restoreState(self.stateId)
r = BaseBulletEnv._reset(self)
self._p.configureDebugVisualizer(p.COV_ENABLE_RENDERING,0)
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.robot.addToScene(self._p,
self.stadium_scene.ground_plane_mjcf)
self.ground_ids = set([(self.parts[f].bodies[self.parts[f].bodyIndex], self.parts[f].bodyPartIndex) for f in
self.foot_ground_object_names])
self._p.configureDebugVisualizer(p.COV_ENABLE_RENDERING, 1)
if self.stateId < 0:
self.stateId=self._p.saveState()
#print("saving state self.stateId:",self.stateId)
return r
def move_robot(self, init_x, init_y, init_z):
"Used by multiplayer stadium to move sideways, to another running lane."
self.cpp_robot.query_position()
pose = self.cpp_robot.root_part.pose()
pose.move_xyz(init_x, init_y, init_z) # Works because robot loads around (0,0,0), and some robots have z != 0 that is left intact
self.cpp_robot.set_pose(pose)
electricity_cost = -2.0 # cost for using motors -- this parameter should be carefully tuned against reward for making progress, other values less improtant
stall_torque_cost = -0.1 # cost for running electric current through a motor even at zero rotational speed, small
foot_collision_cost = -1.0 # touches another leg, or other objects, that cost makes robot avoid smashing feet into itself
foot_ground_object_names = set(["floor"]) # to distinguish ground and other objects
joints_at_limit_cost = -0.1 # discourage stuck joints
def step(self, a):
if not self.scene.multiplayer: # if multiplayer, action first applied to all robots, then global step() called, then _step() for all robots with the same actions
self.robot.apply_action(a)
self.scene.global_step()
state = self.robot.calc_state() # also calculates self.joints_at_limit
alive = float(self.robot.alive_bonus(state[0]+self.robot.initial_z, self.robot.body_rpy[1])) # state[0] is body height above ground, body_rpy[1] is pitch
done = alive < 0
if not np.isfinite(state).all():
print("~INF~", state)
done = True
potential_old = self.potential
self.potential = self.robot.calc_potential()
progress = float(self.potential - potential_old)
feet_collision_cost = 0.0
for i,f in enumerate(self.robot.feet): # TODO: Maybe calculating feet contacts could be done within the robot code
contact_ids = set((x[2], x[4]) for x in f.contact_list())
# print("CONTACT OF '%d' WITH %d" % (contact_ids, ",".join(contact_names)) )
if self.ground_ids & contact_ids:
# see Issue 63: https://github.com/openai/roboschool/issues/63
# feet_collision_cost += self.foot_collision_cost
self.robot.feet_contact[i] = 1.0
else:
self.robot.feet_contact[i] = 0.0
#electricity_cost = self.electricity_cost * float(np.abs(a*self.robot.joint_speeds).mean()) # let's assume we have DC motor with controller, and reverse current braking
#electricity_cost += self.stall_torque_cost * float(np.square(a).mean())
joints_at_limit_cost = float(self.joints_at_limit_cost * self.robot.joints_at_limit)
debugmode = 0
if debugmode:
print("alive=")
print(alive)
print("progress")
print(progress)
#print("electricity_cost")
#print(electricity_cost)
print("joints_at_limit_cost")
print(joints_at_limit_cost)
print("feet_collision_cost")
print(feet_collision_cost)
self.rewards = [
alive,
progress,
#electricity_cost,
joints_at_limit_cost,
feet_collision_cost
]
if debugmode:
print("rewards=")
print(self.rewards)
print("sum rewards")
print(sum(self.rewards))
self.HUD(state, a, done)
self.reward += sum(self.rewards)
return state, sum(self.rewards), bool(done), {}
def camera_adjust(self):
x, y, z = self.robot.body_xyz
self.camera_x = 0.98*self.camera_x + (1-0.98)*x
self.camera.move_and_look_at(self.camera_x, y-2.0, 1.4, x, y, 1.0)
| 5,349
| 44.338983
| 179
|
py
|
pybullet-gym
|
pybullet-gym-master/pybulletgym/envs/mujoco/envs/locomotion/half_cheetah_env.py
|
from pybulletgym.envs.mujoco.envs.locomotion.walker_base_env import WalkerBaseMuJoCoEnv
from pybulletgym.envs.mujoco.robots.locomotors.half_cheetah import HalfCheetah
import numpy as np
class HalfCheetahMuJoCoEnv(WalkerBaseMuJoCoEnv):
def __init__(self):
self.robot = HalfCheetah()
WalkerBaseMuJoCoEnv.__init__(self, self.robot)
def step(self, a):
if not self.scene.multiplayer: # if multiplayer, action first applied to all robots, then global step() called, then _step() for all robots with the same actions
self.robot.apply_action(a)
self.scene.global_step()
potential = self.robot.calc_potential()
power_cost = -0.1 * np.square(a).sum()
state = self.robot.calc_state()
done = False
debugmode = 0
if debugmode:
print("potential=")
print(potential)
print("power_cost=")
print(power_cost)
self.rewards = [
potential,
power_cost
]
if debugmode:
print("rewards=")
print(self.rewards)
print("sum rewards")
print(sum(self.rewards))
self.HUD(state, a, done)
self.reward += sum(self.rewards)
return state, sum(self.rewards), bool(done), {}
| 1,316
| 30.357143
| 170
|
py
|
pybullet-gym
|
pybullet-gym-master/pybulletgym/envs/mujoco/envs/locomotion/hopper_env.py
|
from pybulletgym.envs.mujoco.envs.locomotion.walker_base_env import WalkerBaseMuJoCoEnv
from pybulletgym.envs.mujoco.robots.locomotors.hopper import Hopper
import numpy as np
class HopperMuJoCoEnv(WalkerBaseMuJoCoEnv):
def __init__(self):
self.robot = Hopper()
WalkerBaseMuJoCoEnv.__init__(self, self.robot)
def step(self, a):
if not self.scene.multiplayer: # if multiplayer, action first applied to all robots, then global step() called, then _step() for all robots with the same actions
self.robot.apply_action(a)
self.scene.global_step()
alive_bonus = 1.0
potential = self.robot.calc_potential()
power_cost = -1e-3 * np.square(a).sum()
state = self.robot.calc_state()
height, ang = state[0], state[1]
done = not (np.isfinite(state).all() and
(np.abs(state[2:]) < 100).all() and
(height > -0.3) and # height starts at 0 in pybullet
(abs(ang) < .2))
debugmode = 0
if debugmode:
print("potential=")
print(potential)
print("power_cost=")
print(power_cost)
self.rewards = [
potential,
alive_bonus,
power_cost
]
if debugmode:
print("rewards=")
print(self.rewards)
print("sum rewards")
print(sum(self.rewards))
self.HUD(state, a, done)
self.reward += sum(self.rewards)
return state, sum(self.rewards), bool(done), {}
| 1,583
| 31.326531
| 170
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.