repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
PROTES
|
PROTES-main/calc/opti/opti_optimatt.py
|
import numpy as np
from opti import Opti
try:
import teneva
with_teneva = True
except Exception as e:
with_teneva = False
class OptiOptimatt(Opti):
def __init__(self, name='optimatt', *args, **kwargs):
super().__init__(name, *args, **kwargs)
def opts(self, dr_max=2):
self.opts_dr_max = dr_max
def _init(self):
if not with_teneva:
self.err = 'Need "teneva" module'
return
def _optimize(self):
Y = teneva.rand(self.n, r=1)
Y = teneva.cross(self.f_batch, Y, e=1.E-16, m=self.m_max,
dr_max=self.opts_dr_max)
Y = teneva.truncate(Y, e=1.E-16)
i_min, y_min, i_max, y_max = teneva.optima_tt(Y)
self.f(i_max if self.is_max else i_min)
| 763
| 22.875
| 65
|
py
|
PROTES
|
PROTES-main/calc/opti/opti_opo.py
|
import numpy as np
from opti import Opti
try:
import nevergrad as ng
with_ng = True
except Exception as e:
with_ng = False
class OptiOPO(Opti):
def __init__(self, name='opo', *args, **kwargs):
super().__init__(name, *args, **kwargs)
def _init(self):
if not with_ng:
self.err = 'Need "nevergrad" module'
return
def _optimize(self):
self._optimize_ng(ng.optimizers.OnePlusOne)
| 453
| 18.73913
| 52
|
py
|
PROTES
|
PROTES-main/calc/opti/__init__.py
|
from .opti import Opti
from .opti_nb import OptiNB
from .opti_opo import OptiOPO
from .opti_optimatt import OptiOptimatt
from .opti_portfolio import OptiPortfolio
from .opti_pso import OptiPSO
from .opti_protes import OptiProtes
from .opti_spsa import OptiSPSA
from .opti_ttopt import OptiTTOpt
| 295
| 28.6
| 41
|
py
|
PROTES
|
PROTES-main/calc/opti/opti_spsa.py
|
import numpy as np
from opti import Opti
try:
import nevergrad as ng
with_ng = True
except Exception as e:
with_ng = False
class OptiSPSA(Opti):
def __init__(self, name='spsa', *args, **kwargs):
super().__init__(name, *args, **kwargs)
def _init(self):
if not with_ng:
self.err = 'Need "nevergrad" module'
return
def _optimize(self):
self._optimize_ng(ng.optimizers.SPSA)
| 449
| 18.565217
| 53
|
py
|
PROTES
|
PROTES-main/animation/animate.py
|
import numpy as np
import os
import sys
from protes import animation
def func_build_ackley(n):
"""Ackley function. See https://www.sfu.ca/~ssurjano/ackley.html."""
d = 2 # Dimension
a = -32.768 # Grid lower bound
b = +32.768 # Grid upper bound
par_a = 20. # Standard parameter values for Ackley function
par_b = 0.2
par_c = 2.*np.pi
def func(I):
"""Target function: y=f(I); [samples,d] -> [samples]."""
n_ext = np.repeat(n.reshape((1, -1)), I.shape[0], axis=0)
X = I / (n_ext - 1) * (b - a) + a
y1 = np.sqrt(np.sum(X**2, axis=1) / d)
y1 = - par_a * np.exp(-par_b * y1)
y2 = np.sum(np.cos(par_c * X), axis=1)
y2 = - np.exp(y2 / d)
y3 = par_a + np.exp(1.)
return y1 + y2 + y3
i_opt_real = np.array([int(n[0]/2), int(n[1]/2)])
return func, a, b, i_opt_real
def func_build_simple(n):
d = 2 # Dimension
a = -2. # Grid lower bound
b = +2. # Grid upper bound
i_opt_real = np.array([int(n[0] * 0.5), int(n[1] * 0.5)])
x_opt_real = i_opt_real / (n - 1) * (b - a) + a
def func(I):
"""Target function: y=f(I); [samples,d] -> [samples]."""
n_ext = np.repeat(n.reshape((1, -1)), I.shape[0], axis=0)
X = I / (n_ext - 1) * (b - a) + a
y0 = +0.
y1 = -(X[:, 0] - x_opt_real[0])**2 - 2.0 * np.sin(X[:, 0])**2
y2 = -(X[:, 1] - x_opt_real[1])**2 - 2.5 * np.sin(X[:, 1])**2
return y0 + y1 + y2
return func, a, b, i_opt_real
def func_build_two_optima(n, s=0.1, x0=4.):
d = 2 # Dimension
a = -5. # Grid lower bound
b = +5. # Grid upper bound
i_opt_real = None
def func(I):
"""Target function: y=f(I); [samples,d] -> [samples]."""
n_ext = np.repeat(n.reshape((1, -1)), I.shape[0], axis=0)
X = I / (n_ext - 1) * (b - a) + a
y1 = np.exp(-s*np.sum((X - x0)**2, axis=1))
y2 = np.exp(-s*np.sum((X + x0)**2, axis=1))
return y1 + y2
return func, a, b, i_opt_real
def animate(task):
"""Animation of the PROTES work for the 2D case."""
fpath = os.path.dirname(__file__) + f'/protes_{task}.gif'
if task == 'ackley':
n = 101
f, a, b, i_opt_real = func_build_ackley(np.array([n, n]))
animation(f, a, b, n, m=int(2.E+2), k=25, k_top=5, k_gd=10, lr=1.E-2,
i_opt_real=i_opt_real, fpath=fpath)
elif task == 'simple':
n = 101
f, a, b, i_opt_real = func_build_simple(np.array([n, n]))
animation(f, a, b, n, m=int(1.1E+3), k=100, k_top=10, k_gd=1, lr=5.E-2,
i_opt_real=i_opt_real, fpath=fpath, is_max=True)
elif task == 'two_optima':
n = 101
f, a, b, i_opt_real = func_build_two_optima(np.array([n, n]))
animation(f, a, b, n, m=int(5.E+2), k=25, k_top=1, k_gd=1, lr=1.E-1,
i_opt_real=i_opt_real, fpath=fpath, is_max=True)
else:
raise NotImplementedError(f'Task name "{task}" is not supported')
if __name__ == '__main__':
task = sys.argv[1] if len(sys.argv) > 1 else 'simple'
animate(task)
| 3,182
| 29.028302
| 79
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/setup.py
|
import setuptools
setuptools.setup(
name='DeblendingStarfieldsDevRunjingLiu120',
version='0.0.1',
author='Runjing Liu',
author_email='runjing_liu@berkeley.edu',
packages=['deblending_runjingdev'],
url='https://github.com/Runjing-Liu120/DeblendingStarfields',
description='A package to reproduce experiment results in our deblending starfields paper',
python_requires='>=3.6',
install_requires=['astropy', 'numpy', 'pandas', 'matplotlib', 'scipy']
)
| 486
| 33.785714
| 95
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/deblending_runjingdev/flux_utils.py
|
import torch
import numpy as np
from torch import nn
from torch import optim
import deblending_runjingdev.utils as utils
from deblending_runjingdev.simulated_datasets_lib import plot_one_star
from deblending_runjingdev.wake_lib import PlanarBackground
class FluxEstimator(nn.Module):
def __init__(self, observed_image, locs, n_stars,
psf,
planar_background_params,
fmin = 1e-3,
alpha = 0.5,
pad = 5,
init_fluxes = None):
super(FluxEstimator, self).__init__()
self.pad = pad
self.fmin = fmin
# observed image is batchsize (or 1) x n_bands x slen x slen
assert len(observed_image.shape) == 4
self.observed_image = observed_image
# batchsize
assert len(n_stars) == locs.shape[0]
batchsize = locs.shape[0]
# get n_bands
assert observed_image.shape[1] == psf.shape[0]
self.n_bands = psf.shape[0]
self.max_stars = locs.shape[1]
assert locs.shape[2] == 2
# boolean for stars being on
self.is_on_array = utils.get_is_on_from_n_stars(n_stars, self.max_stars)
# set star basis
self.slen = observed_image.shape[-1]
self.psf = psf
self.star_basis = \
plot_one_star(self.slen, locs.view(-1, 2), self.psf,
cached_grid = None).view(batchsize,
self.max_stars,
self.n_bands,
self.slen, self.slen) * \
self.is_on_array[:, :, None, None, None]
# get background
assert planar_background_params.shape[0] == self.n_bands
self.init_background_params = planar_background_params
self.planar_background = PlanarBackground(image_slen=self.slen,
init_background_params=self.init_background_params)
self.background = self.planar_background.forward().detach()
if init_fluxes is None:
self._init_fluxes(locs)
else:
self.init_fluxes = init_fluxes
self.init_param = torch.log(self.init_fluxes.clamp(min = self.fmin + 1) - self.fmin)
self.param = nn.Parameter(self.init_param.clone())
self.alpha = alpha
# TODO: pass these as an argument
self.color_mean = 0.3
self.color_var = 0.15**2
self.init_loss = self.get_loss()
def _init_fluxes(self, locs):
batchsize = locs.shape[0]
locs_indx = torch.round(locs * (self.slen - 1)).type(torch.long).clamp(max = self.slen - 2,
min = 2)
sky_subtr_image = self.observed_image - self.background
self.init_fluxes = torch.zeros(batchsize, self.max_stars, self.n_bands)
for i in range(locs.shape[0]):
if self.observed_image.shape[0] == 1:
obs_indx = 0
else:
obs_indx = i
# # take the min over a box of the location
# init_fluxes_i = torch.zeros(9, self.max_stars, self.n_bands)
# n = 0
# for j in [-1, 0, 1]:
# for k in [-1, 0, 1]:
# init_fluxes_i[n] = sky_subtr_image[obs_indx, :,
# locs_indx[i, :, 0] + j,
# locs_indx[i, :, 1] + k].transpose(0, 1)
# n +=1
#
# self.init_fluxes[i] = init_fluxes_i.mean(0)
self.init_fluxes[i] = \
sky_subtr_image[obs_indx, :,
locs_indx[i, :, 0], locs_indx[i, :, 1]].transpose(0, 1)
self.init_fluxes = self.init_fluxes / self.psf.view(self.n_bands, -1).max(1)[0][None, None, :]
def forward(self, train_background = True):
background = self.planar_background.forward()
if not train_background:
background = background.detach()
fluxes = torch.exp(self.param[:, :, :, None, None]) + self.fmin
recon_mean = (fluxes * self.star_basis).sum(1) + background
return recon_mean.clamp(min = 1e-6)
def get_loss(self, train_background = True):
# log likelihood terms
recon_mean = self.forward(train_background)
error = 0.5 * ((self.observed_image - recon_mean)**2 / recon_mean) + 0.5 * torch.log(recon_mean)
assert (~torch.isnan(error)).all()
neg_loglik = error[:, :, self.pad:(self.slen - self.pad), self.pad:(self.slen - self.pad)].sum()
# prior terms
log_flux = self.param + np.log(self.fmin)
flux_prior = - (self.alpha + 1) * (log_flux[:, :, 0] * self.is_on_array).sum()
if self.n_bands > 1:
colors = 2.5 * (log_flux[:, :, 1:] - log_flux[:, :, 0:1]) / np.log(10.)
color_prior = - 0.5 * (colors - self.color_mean)**2 / self.color_var
flux_prior += (color_prior * self.is_on_array.unsqueeze(-1)).sum()
assert ~torch.isnan(flux_prior)
loss = neg_loglik - flux_prior
return loss
def optimize(self,
train_background = True,
max_outer_iter = 10,
max_inner_iter = 20,
tol = 1e-3,
print_every = False):
optimizer = optim.LBFGS(self.parameters(),
max_iter = max_inner_iter,
line_search_fn = 'strong_wolfe')
def closure():
optimizer.zero_grad()
loss = self.get_loss(train_background)
loss.backward()
return loss
old_loss = 1e16
for i in range(max_outer_iter):
loss = optimizer.step(closure)
if print_every:
print(loss)
diff = (loss - old_loss).abs()
if diff < (tol * self.init_loss):
break
old_loss = loss
def return_fluxes(self):
return torch.exp(self.param.data) + self.fmin
| 6,185
| 34.348571
| 104
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/deblending_runjingdev/image_utils.py
|
import torch
from torch import nn
import deblending_runjingdev.utils as utils
from deblending_runjingdev.which_device import device
# This function copied from
# https://gist.github.com/dem123456789/23f18fd78ac8da9615c347905e64fc78
def _extract_patches_2d(img,patch_shape,step=[1.0,1.0],batch_first=False):
patch_H, patch_W = patch_shape[0], patch_shape[1]
if(img.size(2)<patch_H):
num_padded_H_Top = (patch_H - img.size(2))//2
num_padded_H_Bottom = patch_H - img.size(2) - num_padded_H_Top
padding_H = nn.ConstantPad2d((0,0,num_padded_H_Top,num_padded_H_Bottom),0)
img = padding_H(img)
if(img.size(3)<patch_W):
num_padded_W_Left = (patch_W - img.size(3))//2
num_padded_W_Right = patch_W - img.size(3) - num_padded_W_Left
padding_W = nn.ConstantPad2d((num_padded_W_Left,num_padded_W_Right,0,0),0)
img = padding_W(img)
step_int = [0,0]
step_int[0] = int(patch_H*step[0]) if(isinstance(step[0], float)) else step[0]
step_int[1] = int(patch_W*step[1]) if(isinstance(step[1], float)) else step[1]
patches_fold_H = img.unfold(2, patch_H, step_int[0])
if((img.size(2) - patch_H) % step_int[0] != 0):
patches_fold_H = torch.cat((patches_fold_H,img[:,:,-patch_H:,].permute(0,1,3,2).unsqueeze(2)),dim=2)
patches_fold_HW = patches_fold_H.unfold(3, patch_W, step_int[1])
if((img.size(3) - patch_W) % step_int[1] != 0):
patches_fold_HW = torch.cat((patches_fold_HW,patches_fold_H[:,:,:,-patch_W:,:].permute(0,1,2,4,3).unsqueeze(3)),dim=3)
patches = patches_fold_HW.permute(2,3,0,1,4,5)
patches = patches.reshape(-1,img.size(0),img.size(1),patch_H,patch_W)
if(batch_first):
patches = patches.permute(1,0,2,3,4)
return patches
def tile_images(images, subimage_slen, step):
# images should be batchsize x n_bands x slen x slen
# breaks up a large image into smaller tiles
# of size subimage_slen x subimage_slen
# the output tensor is (batchsize * tiles per image) x n_bands x subimage_slen x subimage_slen
# where tiles per image is (slen - subimage_sel / step)**2
# NOTE: input and output are torch tensors, not numpy arrays
# (need the unfold command from torch)
assert len(images.shape) == 4
image_xlen = images.shape[2]
image_ylen = images.shape[3]
# my tile coords doens't work otherwise ...
assert (image_xlen - subimage_slen) % step == 0
assert (image_ylen - subimage_slen) % step == 0
n_bands = images.shape[1]
for b in range(n_bands):
image_tiles_b = _extract_patches_2d(images[:, b:(b+1), :, :],
patch_shape = [subimage_slen, subimage_slen],
step = [step, step],
batch_first = True).reshape(-1, 1, subimage_slen, subimage_slen)
if b == 0:
image_tiles = image_tiles_b
else:
image_tiles = torch.cat((image_tiles, image_tiles_b), dim = 1)
return image_tiles
def get_tile_coords(image_xlen, image_ylen, subimage_slen, step):
# this function is used in conjuction with tile_images above.
# this records (x0, x1) indices each image image tile comes from
nx_tiles = ((image_xlen - subimage_slen) // step) + 1
ny_tiles = ((image_ylen - subimage_slen) // step) + 1
n_tiles = nx_tiles * ny_tiles
return_coords = lambda i : [(i // ny_tiles) * step,
(i % ny_tiles) * step]
tile_coords = torch.LongTensor([return_coords(i) \
for i in range(n_tiles)]).to(device)
return tile_coords
def get_params_in_tiles(tile_coords, locs, fluxes, slen, subimage_slen,
edge_padding = 0):
# locs are the coordinates in the full image, in coordinates between 0-1
assert torch.all(locs <= 1.)
assert torch.all(locs >= 0.)
n_tiles = tile_coords.shape[0] # number of tiles in a full image
fullimage_batchsize = locs.shape[0] # number of full images
subimage_batchsize = n_tiles * fullimage_batchsize # total number of tiles
max_stars = locs.shape[1]
tile_coords = tile_coords.unsqueeze(0).unsqueeze(2).float()
locs = locs * (slen - 1)
which_locs_array = (locs.unsqueeze(1) > tile_coords + edge_padding - 0.5) & \
(locs.unsqueeze(1) < tile_coords - 0.5 + subimage_slen - edge_padding) & \
(locs.unsqueeze(1) != 0)
which_locs_array = (which_locs_array[:, :, :, 0] * which_locs_array[:, :, :, 1]).float()
tile_locs = \
(which_locs_array.unsqueeze(3) * locs.unsqueeze(1) - \
(tile_coords + edge_padding - 0.5)).view(subimage_batchsize, max_stars, 2) / \
(subimage_slen - 2 * edge_padding)
tile_locs = torch.relu(tile_locs) # by subtracting off, some are negative now; just set these to 0
if fluxes is not None:
assert fullimage_batchsize == fluxes.shape[0]
assert max_stars == fluxes.shape[1]
n_bands = fluxes.shape[2]
tile_fluxes = \
(which_locs_array.unsqueeze(3) * fluxes.unsqueeze(1)).view(subimage_batchsize, max_stars, n_bands)
else:
tile_fluxes = torch.zeros(tile_locs.shape[0], tile_locs.shape[1], 1)
n_bands = 1
# sort locs so all the zeros are at the end
is_on_array = which_locs_array.view(subimage_batchsize, max_stars).type(torch.bool).to(device)
n_stars_per_tile = is_on_array.float().sum(dim = 1).type(torch.LongTensor).to(device)
is_on_array_sorted = utils.get_is_on_from_n_stars(n_stars_per_tile, n_stars_per_tile.max())
indx = is_on_array_sorted.clone()
indx[indx == 1] = torch.nonzero(is_on_array, as_tuple=False)[:, 1]
tile_fluxes = torch.gather(tile_fluxes, dim = 1, index = indx.unsqueeze(2).repeat(1, 1, n_bands)) * \
is_on_array_sorted.float().unsqueeze(2)
tile_locs = torch.gather(tile_locs, dim = 1, index = indx.unsqueeze(2).repeat(1, 1, 2)) * \
is_on_array_sorted.float().unsqueeze(2)
tile_is_on_array = is_on_array_sorted
return tile_locs, tile_fluxes, n_stars_per_tile, tile_is_on_array
def get_full_params_from_tile_params(tile_locs, tile_fluxes,
tile_coords,
full_slen,
stamp_slen,
edge_padding,
# TODO: default is to assume the full image is square
# make this a systematic change.
full_slen2 = None):
# off stars should have tile_locs == 0 and tile_fluxes == 0
assert (tile_fluxes.shape[0] % tile_coords.shape[0]) == 0
batchsize = int(tile_fluxes.shape[0] / tile_coords.shape[0])
assert (tile_fluxes.shape[0] % batchsize) == 0
n_stars_in_batch = int(tile_fluxes.shape[0] * tile_fluxes.shape[1] / batchsize)
n_bands = tile_fluxes.shape[2]
fluxes = tile_fluxes.view(batchsize, n_stars_in_batch, n_bands)
scale = (stamp_slen - 2 * edge_padding)
bias = tile_coords.repeat(batchsize, 1).unsqueeze(1).float() + edge_padding - 0.5
if full_slen2 is None:
locs = (tile_locs * scale + bias) / (full_slen - 1)
else:
locs = (tile_locs * scale + bias) / torch.Tensor([[[full_slen - 1, full_slen2 - 1]]]).to(device)
locs = locs.view(batchsize, n_stars_in_batch, 2)
tile_is_on_bool = (fluxes > 0).any(2).float() # if flux in any band is nonzero
n_stars = torch.sum(tile_is_on_bool > 0, dim = 1)
# puts all the on stars in front
is_on_array_full = utils.get_is_on_from_n_stars(n_stars, n_stars.max())
indx = is_on_array_full.clone()
indx[indx == 1] = torch.nonzero(tile_is_on_bool)[:, 1]
fluxes = torch.gather(fluxes, dim = 1, index = indx.unsqueeze(2).repeat(1, 1, n_bands)) * \
is_on_array_full.float().unsqueeze(2)
locs = torch.gather(locs, dim = 1, index = indx.unsqueeze(2).repeat(1, 1, 2)) * \
is_on_array_full.float().unsqueeze(2)
return locs, fluxes, n_stars
def trim_images(images, edge_padding):
slen = images.shape[-1] - edge_padding
return images[:, :, edge_padding:slen, edge_padding:slen]
| 8,377
| 42.409326
| 126
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/deblending_runjingdev/daophot_catalog_lib.py
|
import torch
import numpy as np
from deblending_runjingdev.which_device import device
from deblending_runjingdev.sdss_dataset_lib import convert_mag_to_nmgy
from deblending_runjingdev.image_statistics_lib import get_locs_error
def load_daophot_results(data_file,
nelec_per_nmgy,
wcs,
slen = 100,
x0 = 630,
x1 = 310):
daophot_file = np.loadtxt(data_file)
# load desired quantities
daophot_ra = daophot_file[:, 4]
daophot_decl = daophot_file[:, 5]
daophot_mags = daophot_file[:, 22]
# get pixel coordinates
pix_coords = wcs.wcs_world2pix(daophot_ra, daophot_decl, 0, ra_dec_order = True)
# get locations inside our square
which_locs = (pix_coords[1] > x0) & (pix_coords[1] < (x0 + slen - 1)) & \
(pix_coords[0] > x1) & (pix_coords[0] < (x1 + slen - 1))
# scale between zero and ones
daophot_locs0 = (pix_coords[1][which_locs] - x0) / (slen - 1)
daophot_locs1 = (pix_coords[0][which_locs] - x1) / (slen - 1)
daophot_locs = torch.Tensor(np.array([daophot_locs0, daophot_locs1]).transpose()).to(device)
# get fluxes
daophot_fluxes = convert_mag_to_nmgy(daophot_mags[which_locs]) * \
nelec_per_nmgy
daophot_fluxes = torch.Tensor(daophot_fluxes).unsqueeze(1).to(device)
return daophot_locs, daophot_fluxes
def align_daophot_locs(daophot_locs, daophot_fluxes, hubble_locs, hubble_fluxes,
slen = 100,
align_on_logflux = 4.5):
# take only bright stars
log10_fluxes = torch.log10(daophot_fluxes).squeeze()
log10_hubble_fluxes = torch.log10(hubble_fluxes).squeeze()
which_est_brightest = torch.nonzero(log10_fluxes > align_on_logflux).squeeze()
which_hubble_brightest = torch.nonzero(log10_hubble_fluxes > align_on_logflux).squeeze()
_daophot_locs = daophot_locs[which_est_brightest]
_hubble_locs = hubble_locs[which_hubble_brightest]
# match daophot locations to hubble locations
perm = get_locs_error(_daophot_locs, _hubble_locs).argmin(0)
# get error and estimate bias
locs_err = (_daophot_locs- _hubble_locs[perm]) * (slen - 1)
bias_x1 = locs_err[:, 1].median() / (slen - 1)
bias_x0 = locs_err[:, 0].median() / (slen - 1)
# shift by bias
daophot_locs[:, 0] -= bias_x0
daophot_locs[:, 1] -= bias_x1
# after filtering, some locs are less than 0 or
which_filter = (daophot_locs[:, 0] > 0) & (daophot_locs[:, 0] < 1) & \
(daophot_locs[:, 1] > 0) & (daophot_locs[:, 1] < 1)
daophot_locs = daophot_locs[which_filter]
daophot_fluxes = daophot_fluxes[which_filter]
return daophot_locs, daophot_fluxes
| 2,859
| 38.178082
| 96
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/deblending_runjingdev/utils.py
|
import torch
import numpy as np
from torch.distributions import normal, categorical
from deblending_runjingdev.which_device import device
# Functions to work with n_stars
def get_is_on_from_n_stars(n_stars, max_stars):
assert len(n_stars.shape) == 1
batchsize = len(n_stars)
is_on_array = torch.zeros((batchsize, max_stars),
dtype = torch.long, device = device)
for i in range(max_stars):
is_on_array[:, i] = (n_stars > i)
return is_on_array
def get_is_on_from_n_stars_2d(n_stars, max_stars):
# n stars sis n_samples x batchsize
assert not torch.any(torch.isnan(n_stars)); assert torch.all(n_stars >= 0)
assert torch.all(n_stars <= max_stars)
n_samples = n_stars.shape[0]
batchsize = n_stars.shape[1]
is_on_array = torch.zeros((n_samples, batchsize, max_stars),
dtype = torch.long, device = device)
for i in range(max_stars):
is_on_array[:, :, i] = (n_stars > i)
return is_on_array
def get_one_hot_encoding_from_int(z, n_classes):
z = z.long()
assert len(torch.unique(z)) <= n_classes
z_one_hot = torch.zeros(len(z), n_classes, device = device)
z_one_hot.scatter_(1, z.view(-1, 1), 1)
z_one_hot = z_one_hot.view(len(z), n_classes)
return z_one_hot
# sampling functions
def sample_class_weights(class_weights, n_samples = 1):
"""
draw a sample from Categorical variable with
probabilities class_weights
"""
# draw a sample from Categorical variable with
# probabilities class_weights
assert not torch.any(torch.isnan(class_weights));
cat_rv = categorical.Categorical(probs = class_weights)
return cat_rv.sample((n_samples, )).detach().squeeze()
def sample_normal(mean, logvar):
return mean + torch.exp(0.5 * logvar) * torch.randn(mean.shape, device = device)
# log probabilities
def _logit(x, tol = 1e-8):
return torch.log(x + tol) - torch.log(1 - x + tol)
def eval_normal_logprob(x, mu, log_var):
return - 0.5 * log_var - 0.5 * (x - mu)**2 / (torch.exp(log_var)) - 0.5 * np.log(2 * np.pi)
def eval_logitnormal_logprob(x, mu, log_var):
logit_x = _logit(x)
return eval_normal_logprob(logit_x, mu, log_var)
def eval_lognormal_logprob(x, mu, log_var, tol = 1e-8):
log_x = torch.log(x + tol)
return eval_normal_logprob(log_x, mu, log_var)
| 2,374
| 29.448718
| 95
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/deblending_runjingdev/starnet_lib.py
|
import torch
import torch.nn as nn
import numpy as np
import deblending_runjingdev.image_utils as image_utils
import deblending_runjingdev.utils as utils
from deblending_runjingdev.which_device import device
from itertools import product
from torch.distributions import poisson
class Flatten(nn.Module):
def forward(self, tensor):
return tensor.view(tensor.size(0), -1)
class Normalize2d(nn.Module):
def forward(self, tensor):
assert len(tensor.shape) == 4
mean = tensor.view(tensor.shape[0], tensor.shape[1], -1).mean(2, keepdim = True).unsqueeze(-1)
var = tensor.view(tensor.shape[0], tensor.shape[1], -1).var(2, keepdim = True).unsqueeze(-1)
return (tensor - mean) / torch.sqrt(var + 1e-5)
class StarEncoder(nn.Module):
def __init__(self, slen, ptile_slen, step, edge_padding,
n_bands, max_detections,
n_source_params = None,
momentum = 0.5,
track_running_stats = True,
constrain_logflux_mean = False,
fmin = 0.0):
super(StarEncoder, self).__init__()
# image parameters
self.slen = slen # dimension of full image: we assume its square for now
self.ptile_slen = ptile_slen # dimension of the individual image padded tiles
self.step = step # number of pixels to shift every subimage
self.n_bands = n_bands # number of bands
self.fmin = fmin
self.constrain_logflux_mean = constrain_logflux_mean
self.edge_padding = edge_padding
self.tile_coords = image_utils.get_tile_coords(self.slen, self.slen,
self.ptile_slen, self.step)
self.n_tiles = self.tile_coords.shape[0]
# max number of detections
self.max_detections = max_detections
# convolutional NN paramters
enc_conv_c = 20
enc_kern = 3
enc_hidden = 256
# convolutional NN
self.enc_conv = nn.Sequential(
nn.Conv2d(self.n_bands, enc_conv_c, enc_kern,
stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(enc_conv_c, enc_conv_c, enc_kern,
stride=1, padding=1),
nn.BatchNorm2d(enc_conv_c, momentum=momentum, track_running_stats=track_running_stats),
nn.ReLU(),
nn.Conv2d(enc_conv_c, enc_conv_c, enc_kern,
stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(enc_conv_c, enc_conv_c, enc_kern,
stride=1, padding=1),
nn.BatchNorm2d(enc_conv_c, momentum=momentum, track_running_stats=track_running_stats),
nn.ReLU(),
Flatten()
)
# output dimension of convolutions
conv_out_dim = \
self.enc_conv(torch.zeros(1, n_bands, ptile_slen, ptile_slen)).size(1)
# fully connected layers
self.enc_fc = nn.Sequential(
nn.Linear(conv_out_dim, enc_hidden),
nn.BatchNorm1d(enc_hidden, momentum=momentum, track_running_stats=track_running_stats),
nn.ReLU(),
nn.Linear(enc_hidden, enc_hidden),
nn.BatchNorm1d(enc_hidden, momentum=momentum, track_running_stats=track_running_stats),
nn.ReLU(),
nn.Linear(enc_hidden, enc_hidden),
nn.BatchNorm1d(enc_hidden, momentum=momentum, track_running_stats=track_running_stats),
nn.ReLU(),
)
if n_source_params is None:
self.n_source_params = self.n_bands
# we will take exp for fluxes
self.constrain_source_params = True
else:
self.n_source_params = n_source_params
# these can be anywhere in the reals
self.constrain_source_params = False
self.n_params_per_star = (4 + 2 * self.n_source_params)
self.dim_out_all = \
int(0.5 * self.max_detections * (self.max_detections + 1) * self.n_params_per_star + \
1 + self.max_detections)
self._get_hidden_indices()
self.enc_final = nn.Linear(enc_hidden, self.dim_out_all)
self.log_softmax = nn.LogSoftmax(dim = 1)
############################
# The layers of our neural network
############################
def _forward_to_pooled_hidden(self, image):
# forward to the layer that is shared by all n_stars
log_img = torch.log(image - image.min() + 1.)
h = self.enc_conv(log_img)
return self.enc_fc(h)
def get_var_params_all(self, image_ptiles):
# concatenate all output parameters for all possible n_stars
h = self._forward_to_pooled_hidden(image_ptiles)
return self.enc_final(h)
######################
# Forward modules
######################
def forward(self, image_ptiles, n_stars = None):
# pass through neural network
h = self.get_var_params_all(image_ptiles)
# get probability of n_stars
log_probs_n = self.get_logprob_n_from_var_params(h)
if n_stars is None:
n_stars = torch.argmax(log_probs_n, dim = 1)
# extract parameters
loc_mean, loc_logvar, \
log_flux_mean, log_flux_logvar = \
self.get_var_params_for_n_stars(h, n_stars)
return loc_mean, loc_logvar, \
log_flux_mean, log_flux_logvar, log_probs_n
def get_logprob_n_from_var_params(self, h):
free_probs = h[:, self.prob_indx]
return self.log_softmax(free_probs)
def get_var_params_for_n_stars(self, h, n_stars):
if len(n_stars.shape) == 1:
n_stars = n_stars.unsqueeze(0)
squeeze_output = True
else:
squeeze_output = False
# this class takes in an array of n_stars, n_samples x batchsize
assert h.shape[1] == self.dim_out_all
assert h.shape[0] == n_stars.shape[1]
n_samples = n_stars.shape[0]
batchsize = h.size(0)
_h = torch.cat((h, torch.zeros(batchsize, 1, device = device)), dim = 1)
loc_logit_mean = torch.gather(_h, 1, self.locs_mean_indx_mat[n_stars.transpose(0, 1)].reshape(batchsize, -1))
loc_logvar = torch.gather(_h, 1, self.locs_var_indx_mat[n_stars.transpose(0, 1)].reshape(batchsize, -1))
log_flux_mean = torch.gather(_h, 1, self.fluxes_mean_indx_mat[n_stars.transpose(0, 1)].reshape(batchsize, -1))
log_flux_logvar = torch.gather(_h, 1, self.fluxes_var_indx_mat[n_stars.transpose(0, 1)].reshape(batchsize, -1))
# reshape
loc_logit_mean = loc_logit_mean.reshape(batchsize, n_samples, self.max_detections, 2).transpose(0, 1)
loc_logvar = loc_logvar.reshape(batchsize, n_samples, self.max_detections, 2).transpose(0, 1)
log_flux_mean = log_flux_mean.reshape(batchsize, n_samples, self.max_detections, self.n_source_params).transpose(0, 1)
log_flux_logvar = log_flux_logvar.reshape(batchsize, n_samples, self.max_detections, self.n_source_params).transpose(0, 1)
loc_mean = torch.sigmoid(loc_logit_mean) * (loc_logit_mean != 0).float()
if self.constrain_logflux_mean:
log_flux_mean = log_flux_mean ** 2
if squeeze_output:
return loc_mean.squeeze(0), loc_logvar.squeeze(0), \
log_flux_mean.squeeze(0), log_flux_logvar.squeeze(0)
else:
return loc_mean, loc_logvar, \
log_flux_mean, log_flux_logvar
def _get_hidden_indices(self):
self.locs_mean_indx_mat = \
torch.full((self.max_detections + 1, 2 * self.max_detections),
self.dim_out_all, device = device, dtype = torch.long)
self.locs_var_indx_mat = \
torch.full((self.max_detections + 1, 2 * self.max_detections),
self.dim_out_all, device = device, dtype = torch.long)
self.fluxes_mean_indx_mat = \
torch.full((self.max_detections + 1, self.n_source_params * self.max_detections),
self.dim_out_all, device = device, dtype = torch.long)
self.fluxes_var_indx_mat = \
torch.full((self.max_detections + 1, self.n_source_params * self.max_detections),
self.dim_out_all, device = device, dtype = torch.long)
self.prob_indx = torch.zeros(self.max_detections + 1, device = device).long()
for n_detections in range(1, self.max_detections + 1):
indx0 = int(0.5 * n_detections * (n_detections - 1) * self.n_params_per_star) + \
(n_detections - 1) + 1
indx1 = (2 * n_detections) + indx0
indx2 = (2 * n_detections) * 2 + indx0
# indices for locations
self.locs_mean_indx_mat[n_detections, 0:(2 * n_detections)] = torch.arange(indx0, indx1)
self.locs_var_indx_mat[n_detections, 0:(2 * n_detections)] = torch.arange(indx1, indx2)
indx3 = indx2 + (n_detections * self.n_source_params)
indx4 = indx3 + (n_detections * self.n_source_params)
# indices for fluxes
self.fluxes_mean_indx_mat[n_detections, 0:(n_detections * self.n_source_params)] = torch.arange(indx2, indx3)
self.fluxes_var_indx_mat[n_detections, 0:(n_detections * self.n_source_params)] = torch.arange(indx3, indx4)
self.prob_indx[n_detections] = indx4
######################
# Modules for tiling images and parameters
######################
def get_image_ptiles(self, images, locs = None, fluxes = None,
clip_max_stars = False):
assert len(images.shape) == 4 # should be batchsize x n_bands x slen x slen
assert images.shape[1] == self.n_bands
slen = images.shape[-1]
if not (images.shape[-1] == self.slen):
# get the coordinates
tile_coords = image_utils.get_tile_coords(slen, slen,
self.ptile_slen,
self.step);
else:
# else, use the cached coordinates
tile_coords = self.tile_coords
batchsize = images.shape[0]
image_ptiles = \
image_utils.tile_images(images,
self.ptile_slen,
self.step)
if (locs is not None) and (fluxes is not None):
assert fluxes.shape[2] == self.n_source_params
# get parameters in tiles as well
tile_locs, tile_fluxes, tile_n_stars, tile_is_on_array = \
image_utils.get_params_in_tiles(tile_coords,
locs,
fluxes,
slen,
self.ptile_slen,
self.edge_padding)
# if (self.weights is None) or (images.shape[0] != self.batchsize):
# self.weights = get_weights(n_stars.clamp(max = self.max_detections))
if tile_locs.shape[1] < self.max_detections:
n_pad = self.max_detections - tile_locs.shape[1]
pad_zeros = torch.zeros(tile_locs.shape[0], n_pad, tile_locs.shape[-1], device = device)
tile_locs = torch.cat((tile_locs, pad_zeros), dim = 1)
pad_zeros2 = torch.zeros(tile_fluxes.shape[0], n_pad, tile_fluxes.shape[-1], device = device)
tile_fluxes = torch.cat((tile_fluxes, pad_zeros2), dim = 1)
pad_zeros3 = torch.zeros((tile_fluxes.shape[0], n_pad), dtype = torch.long, device = device)
tile_is_on_array = torch.cat((tile_is_on_array, pad_zeros3), dim = 1)
if clip_max_stars:
tile_n_stars = tile_n_stars.clamp(max = self.max_detections)
tile_locs = tile_locs[:, 0:self.max_detections, :]
tile_fluxes = tile_fluxes[:, 0:self.max_detections, :]
tile_is_on_array = tile_is_on_array[:, 0:self.max_detections]
else:
tile_locs = None
tile_fluxes = None
tile_n_stars = None
tile_is_on_array = None
return image_ptiles, tile_locs, tile_fluxes, \
tile_n_stars, tile_is_on_array
######################
# Modules to sample our variational distribution and get parameters on the full image
######################
def _get_full_params_from_sampled_params(self, tile_locs_sampled,
tile_fluxes_sampled,
slen):
n_samples = tile_locs_sampled.shape[0]
n_image_ptiles = tile_locs_sampled.shape[1]
assert self.n_source_params == tile_fluxes_sampled.shape[-1]
if not (slen == self.slen):
tile_coords = image_utils.get_tile_coords(slen, slen,
self.ptile_slen,
self.step);
else:
tile_coords = self.tile_coords
assert (n_image_ptiles % tile_coords.shape[0]) == 0
locs, fluxes, n_stars = \
image_utils.get_full_params_from_tile_params(
tile_locs_sampled.reshape(n_samples * n_image_ptiles, -1, 2),
tile_fluxes_sampled.reshape(n_samples * n_image_ptiles, -1, self.n_source_params),
tile_coords,
slen,
self.ptile_slen,
self.edge_padding)
return locs, fluxes, n_stars
def sample_star_encoder(self, image,
n_samples = 1,
return_map_n_stars = False,
return_map_star_params = False,
tile_n_stars = None,
return_log_q = False,
training = False,
enumerate_all_n_stars = False):
# our sampling only works for one image at a time at the moment ...
assert image.shape[0] == 1
slen = image.shape[-1]
# the image ptiles
image_ptiles = self.get_image_ptiles(image,
locs = None, fluxes = None)[0]
# pass through NN
h = self.get_var_params_all(image_ptiles)
# get log probs for number of stars
log_probs_nstar_tile = self.get_logprob_n_from_var_params(h);
if not training:
h = h.detach()
log_probs_nstar_tile = log_probs_nstar_tile.detach()
# sample number of stars
if tile_n_stars is None:
if return_map_n_stars:
tile_n_stars_sampled = \
torch.argmax(log_probs_nstar_tile.detach(), dim = 1).repeat(n_samples).view(n_samples, -1)
elif enumerate_all_n_stars:
all_combs = product(range(0, self.max_detections + 1),
repeat = image_ptiles.shape[0])
l = np.array([comb for comb in all_combs])
tile_n_stars_sampled = torch.Tensor(l).type(torch.LongTensor).to(device)
# repeat if necessary
_n_samples = int(np.ceil(n_samples / tile_n_stars_sampled.shape[0]))
tile_n_stars_sampled = tile_n_stars_sampled.repeat(_n_samples, 1)
n_samples = tile_n_stars_sampled.shape[0]
else:
tile_n_stars_sampled = \
utils.sample_class_weights(torch.exp(log_probs_nstar_tile.detach()), n_samples).view(n_samples, -1)
else:
tile_n_stars_sampled = tile_n_stars.repeat(n_samples).view(n_samples, -1)
# print(tile_n_stars_sampled)
tile_n_stars_sampled = tile_n_stars_sampled.detach()
is_on_array = utils.get_is_on_from_n_stars_2d(tile_n_stars_sampled,
self.max_detections)
# get variational parameters: these are on image ptiles
loc_mean, loc_logvar, \
log_flux_mean, log_flux_logvar = \
self.get_var_params_for_n_stars(h, tile_n_stars_sampled)
if return_map_star_params:
loc_sd = torch.zeros(loc_logvar.shape, device=device)
log_flux_sd = torch.zeros(log_flux_logvar.shape, device=device)
else:
loc_sd = torch.exp(0.5 * loc_logvar)
log_flux_sd = torch.exp(0.5 * log_flux_logvar) # .clamp(max = 0.5)
# sample locations
_locs_randn = torch.randn(loc_mean.shape, device=device)
tile_locs_sampled = (loc_mean + _locs_randn * loc_sd) * \
is_on_array.unsqueeze(3).float()
tile_locs_sampled = tile_locs_sampled.clamp(min = 0., max = 1.)
# sample fluxes
_fluxes_randn = torch.randn(log_flux_mean.shape, device=device);
tile_log_flux_sampled = log_flux_mean + _fluxes_randn * log_flux_sd
tile_log_flux_sampled = tile_log_flux_sampled.clamp(max = np.log(1e12))
if self.constrain_source_params:
tile_fluxes_sampled = \
(torch.exp(tile_log_flux_sampled) + self.fmin) * is_on_array.unsqueeze(3).float()
else:
tile_fluxes_sampled = \
tile_log_flux_sampled * is_on_array.unsqueeze(3).float()
# get parameters on full image
locs, fluxes, n_stars = \
self._get_full_params_from_sampled_params(tile_locs_sampled,
tile_fluxes_sampled,
slen)
if return_log_q:
log_q_locs = (utils.eval_normal_logprob(tile_locs_sampled, loc_mean,
loc_logvar) * \
is_on_array.float().unsqueeze(3)).reshape(n_samples, -1).sum(1)
log_q_fluxes = (utils.eval_normal_logprob(tile_log_flux_sampled, log_flux_mean,
log_flux_logvar) * \
is_on_array.float().unsqueeze(3)).reshape(n_samples, -1).sum(1)
log_q_n_stars = torch.gather(log_probs_nstar_tile, 1,
tile_n_stars_sampled.transpose(0, 1)).transpose(0, 1).sum(1)
else:
log_q_locs = None
log_q_fluxes = None
log_q_n_stars = None
return locs, fluxes, n_stars, \
log_q_locs, log_q_fluxes, log_q_n_stars
| 18,839
| 40.045752
| 130
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/deblending_runjingdev/elbo_lib.py
|
import torch
import numpy as np
import time
from torch import nn
import deblending_runjingdev.starnet_lib as starnet_lib
from deblending_runjingdev.which_device import device
def get_neg_elbo(simulator, full_image, locs, fluxes, n_stars, \
log_q_locs, log_q_fluxes, log_q_n_stars,
mean_stars,
pad = 0,
clamp = None,
uniform_nstars = False):
# get reconstruction
recon = \
simulator.draw_image_from_params(locs, fluxes, n_stars, add_noise = False)
# option to mask outliers
if clamp is not None:
mask_bool = (((full_image - recon) / recon).abs() < clamp).detach().float()
else:
mask_bool = 1
# get log likelihood-
loglik = (- 0.5 * (full_image - recon)**2 / recon - 0.5 * torch.log(recon)) * mask_bool
padm = full_image.shape[-1] - pad
loglik = loglik[:, :, pad:padm, pad:padm]
loglik = loglik.sum(-1).sum(-1).sum(-1)
# get entropy terms
entropy = - log_q_locs - log_q_fluxes - log_q_n_stars
# TODO: need to pass in prior parameters
alpha = 0.5
if uniform_nstars:
log_prior_nstars = 0.0
else:
log_prior_nstars = n_stars * np.log(mean_stars) - torch.lgamma(n_stars.float() + 1)
is_on_fluxes = (fluxes[:, :, 0] > 0.).detach().float()
log_prior_fluxes = (- (alpha + 1) * torch.log(fluxes[:, :, 0] + 1e-16) * \
is_on_fluxes).sum(-1)
if fluxes.shape[-1] > 1:
# TODO assumes two bands
color = -2.5 * (torch.log10(fluxes[:, :, 0] + 1e-16) - \
torch.log10(fluxes[:, :, 1] + 1e-16)) * is_on_fluxes
log_prior_color = (- 0.5 * color**2).sum(-1)
else:
log_prior_color = 0.0
log_prior = log_prior_nstars + log_prior_fluxes + log_prior_color
return -(loglik + entropy + log_prior), -loglik, -log_prior, recon
def eval_star_encoder_on_elbo(full_image, star_encoder, simulator,
n_samples,
mean_stars,
return_map = False,
training = True,
clamp = None,
pad = 0):
# sample
locs_sampled, fluxes_sampled, n_stars_sampled, \
log_q_locs, log_q_fluxes, log_q_n_stars = \
star_encoder.sample_star_encoder(full_image,
n_samples = n_samples,
training = training,
return_map_n_stars = return_map,
return_map_star_params = return_map,
return_log_q = True)
# get elbo
neg_elbo, neg_loglik, neg_logprior, recon = \
get_neg_elbo(simulator, full_image,
locs_sampled, fluxes_sampled, n_stars_sampled.detach(),
log_q_locs, log_q_fluxes, log_q_n_stars, mean_stars,
clamp = clamp,
pad = pad)
return neg_elbo, neg_loglik, recon, log_q_n_stars
def save_elbo_results(full_image, star_encoder, simulator, mean_stars,
n_samples = 100, pad = 0):
neg_elbo, neg_loglik, _, _ = \
eval_star_encoder_on_elbo(full_image, star_encoder,
simulator,
n_samples = n_samples,
mean_stars = mean_stars,
training = False,
pad = pad)
map_neg_elbo, map_neg_loglik, _, _ = \
eval_star_encoder_on_elbo(full_image, star_encoder,
simulator,
n_samples = 1,
mean_stars = mean_stars,
return_map = True,
training = False,
pad = pad)
print('neg elbo: {:.3e}; neg log-likelihood: {:.3e}'.format(neg_elbo.mean(), neg_loglik.mean()))
print('neg elbo (map): {:.3e}; neg log-likelihood (map): {:.3e}'.format(map_neg_elbo.mean(),
map_neg_loglik.mean()))
return np.array([neg_elbo.detach().mean().cpu().numpy(),
neg_loglik.detach().mean().cpu().numpy(),
map_neg_elbo.detach().mean().cpu().numpy(),
map_neg_loglik.detach().mean().cpu().numpy(),
time.time()])
def get_pseudo_loss(full_image, star_encoder, simulator, mean_stars, n_samples,
pad = 0):
# get elbo
neg_elbo, loglik, _, log_q_n_stars = \
eval_star_encoder_on_elbo(full_image, star_encoder, simulator,
n_samples,
mean_stars,
training = True,
pad = pad)
# get control variate
cv, loglik, _, _ = \
eval_star_encoder_on_elbo(full_image, star_encoder, simulator,
n_samples,
mean_stars,
training = False,
pad = pad)
# get pseudo-loss
ps_loss = ((neg_elbo.detach() - cv.detach()) * log_q_n_stars + \
neg_elbo).mean()
return ps_loss
def get_pseudo_loss_all_sum(full_image, star_encoder, simulator,
mean_stars, n_samples,
clamp = None,
pad = 0):
locs_sampled, fluxes_sampled, n_stars_sampled, \
log_q_locs, log_q_fluxes, log_q_n_stars = \
star_encoder.sample_star_encoder(full_image,
return_map_n_stars = False,
return_map_star_params = False,
n_samples = n_samples,
return_log_q = True,
training = True,
enumerate_all_n_stars = True)
# get elbo
neg_elbo, neg_loglik, neg_logprior, recon = \
get_neg_elbo(simulator, full_image,
locs_sampled, fluxes_sampled, n_stars_sampled.detach(),
log_q_locs, log_q_fluxes, log_q_n_stars, mean_stars,
clamp = clamp,
pad = pad)
return (neg_elbo * log_q_n_stars.exp()).sum() / n_samples
def loss_on_true_nstars(full_image, star_encoder, simulator,
mean_stars, n_samples,
true_locs, true_fluxes,
clamp = None,
pad = 0):
image_ptiles, tile_locs, tile_fluxes, \
tile_n_stars, tile_is_on_array = \
star_encoder.get_image_ptiles(full_image,
true_locs.unsqueeze(0),
true_fluxes.unsqueeze(0))
locs_sampled, fluxes_sampled, n_stars_sampled, \
log_q_locs, log_q_fluxes, log_q_n_stars = \
star_encoder.sample_star_encoder(full_image,
return_map_star_params = False,
n_samples = n_samples,
return_log_q = True,
training = True,
tile_n_stars = tile_n_stars);
# get elbo
neg_elbo, neg_loglik, neg_logprior, recon = \
get_neg_elbo(simulator, full_image,
locs_sampled, fluxes_sampled, n_stars_sampled.detach(),
log_q_locs, log_q_fluxes, log_q_n_stars, mean_stars,
clamp = clamp,
pad = pad)
return neg_elbo.mean()
| 8,194
| 40.180905
| 107
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/deblending_runjingdev/wake_lib.py
|
import torch
import torch.nn as nn
from torch import optim
import numpy as np
from deblending_runjingdev.simulated_datasets_lib import _get_mgrid, plot_multiple_stars
from deblending_runjingdev.psf_transform_lib import PowerLawPSF
import deblending_runjingdev.utils
from deblending_runjingdev.which_device import device
import time
def _sample_image(observed_image, sample_every = 10):
batchsize = observed_image.shape[0]
n_bands = observed_image.shape[1]
slen = observed_image.shape[-1]
samples = torch.zeros(n_bands,
int(np.floor(slen / sample_every)),
int(np.floor(slen / sample_every)))
for i in range(samples.shape[1]):
for j in range(samples.shape[2]):
x0 = i*sample_every
x1 = j*sample_every
samples[:, i, j] = \
observed_image[:, :,\
x0:(x0+sample_every), x1:(x1+sample_every)].reshape(
batchsize,
n_bands, -1).min(2)[0].mean(0)
return samples
def _fit_plane_to_background(background):
assert len(background.shape) == 3
n_bands = background.shape[0]
slen = background.shape[-1]
planar_params = np.zeros((n_bands, 3))
for i in range(n_bands):
y = background[i].flatten().detach().cpu().numpy()
grid = _get_mgrid(slen).detach().cpu().numpy()
x = np.ones((slen**2, 3))
x[:, 1:] = np.array([grid[:, :, 0].flatten(), grid[:, :, 1].flatten()]).transpose()
xtx = np.einsum('ki, kj -> ij', x, x)
xty = np.einsum('ki, k -> i', x, y)
planar_params[i, :] = np.linalg.solve(xtx, xty)
return planar_params
class PlanarBackground(nn.Module):
def __init__(self, init_background_params,
image_slen = 101):
super(PlanarBackground, self).__init__()
assert len(init_background_params.shape) == 2
self.n_bands = init_background_params.shape[0]
self.init_background_params = init_background_params.clone()
self.image_slen = image_slen
# get grid
_mgrid = _get_mgrid(image_slen).to(device)
self.mgrid = torch.stack([_mgrid for i in range(self.n_bands)], dim = 0)
# initial weights
self.params = nn.Parameter(init_background_params.clone())
def forward(self):
return self.params[:, 0][:, None, None] + \
self.params[:, 1][:, None, None] * self.mgrid[:, :, :, 0] + \
self.params[:, 2][:, None, None] * self.mgrid[:, :, :, 1]
class ModelParams(nn.Module):
def __init__(self, observed_image,
init_psf_params,
init_background_params,
pad = 5):
super(ModelParams, self).__init__()
self.pad = pad
# observed image is batchsize (or 1) x n_bands x slen x slen
assert len(observed_image.shape) == 4
self.observed_image = observed_image
self.slen = observed_image.shape[-1]
# get n_bands
assert observed_image.shape[1] == init_psf_params.shape[0]
self.n_bands = init_psf_params.shape[0]
# get psf
self.init_psf_params = init_psf_params
# if image slen is even, add one. psf dimension must be odd
psf_slen = self.slen + ((self.slen % 2) == 0) * 1
self.power_law_psf = PowerLawPSF(self.init_psf_params,
image_slen = psf_slen)
self.init_psf = self.power_law_psf.forward().detach().clone()
self.psf = self.power_law_psf.forward()
# set up initial background parameters
if init_background_params is None:
self._get_init_background()
else:
assert init_background_params.shape[0] == self.n_bands
self.init_background_params = init_background_params
self.planar_background = PlanarBackground(image_slen=self.slen,
init_background_params=self.init_background_params)
self.init_background = self.planar_background.forward().detach()
self.cached_grid = _get_mgrid(observed_image.shape[-1]).to(device)
def _plot_stars(self, locs, fluxes, n_stars, psf):
self.stars = plot_multiple_stars(self.slen, locs, n_stars,
fluxes, psf, self.cached_grid)
def _get_init_background(self, sample_every = 25):
sampled_background = _sample_image(self.observed_image, sample_every)
self.init_background_params = torch.Tensor(_fit_plane_to_background(sampled_background)).to(device)
self.planar_background = PlanarBackground(image_slen=self.slen,
init_background_params=self.init_background_params)
def get_background(self):
return self.planar_background.forward().unsqueeze(0)
def get_psf(self):
return self.power_law_psf.forward()
def get_loss(self, use_cached_stars = False,
locs = None, fluxes = None, n_stars = None):
background = self.get_background()
if not use_cached_stars:
assert locs is not None
assert fluxes is not None
assert n_stars is not None
psf = self.get_psf()
self._plot_stars(locs, fluxes, n_stars, psf)
else:
assert hasattr(self, 'stars')
self.stars = self.stars.detach()
recon_mean = (self.stars + background).clamp(min = 1e-6)
error = 0.5 * ((self.observed_image - recon_mean)**2 / recon_mean) + 0.5 * torch.log(recon_mean)
loss = error[:, :, self.pad:(self.slen - self.pad),
self.pad:(self.slen - self.pad)].reshape(error.shape[0], -1).sum(1)
return recon_mean, loss
def get_wake_loss(image, star_encoder, model_params, n_samples, run_map = False):
locs_sampled, fluxes_sampled, n_stars_sampled = \
star_encoder.sample_star_encoder(image,
return_map_n_stars = run_map,
return_map_star_params = run_map,
n_samples = n_samples)[0:3]
loss = model_params.get_loss(locs = locs_sampled.detach(),
fluxes = fluxes_sampled.detach(),
n_stars = n_stars_sampled.detach())[1].mean()
return loss
def run_wake(image, star_encoder, init_psf_params,
init_background_params,
n_samples,
out_filename,
n_epochs = 100,
lr = 1e-3,
print_every = 20,
run_map = False):
model_params = ModelParams(image,
init_psf_params,
init_background_params)
avg_loss = 0.0
counter = 0
t0 = time.time()
test_losses = []
optimizer = optim.Adam([{'params': model_params.power_law_psf.parameters(),
'lr': lr},
{'params': model_params.planar_background.parameters(),
'lr': lr}])
# optimizer = optim.LBFGS(model_params.parameters(),
# line_search_fn = 'strong_wolfe')
if run_map:
n_samples = 1
for epoch in range(1, n_epochs + 1):
def closure():
optimizer.zero_grad()
loss = get_wake_loss(image, star_encoder, model_params,
n_samples, run_map)
loss.backward()
return loss
optimizer.step(closure)
# avg_loss += loss.detach()
# counter += 1
if ((epoch % print_every) == 0) or (epoch == n_epochs):
eval_loss = get_wake_loss(image, star_encoder, model_params,
n_samples = 1, run_map = True).detach()
elapsed = time.time() - t0
print('[{}] loss: {:0.4f} \t[{:.1f} seconds]'.format(\
epoch, eval_loss, elapsed))
test_losses.append(eval_loss)
np.savetxt(out_filename + '-wake_losses', test_losses)
# reset
avg_loss = 0.0
counter = 0
t0 = time.time()
np.save(out_filename + '-powerlaw_psf_params',
list(model_params.power_law_psf.parameters())[0].data.cpu().numpy())
np.save(out_filename + '-planarback_params',
list(model_params.planar_background.parameters())[0].data.cpu().numpy())
map_loss = get_wake_loss(image, star_encoder, model_params,
n_samples = 1, run_map = True)
return model_params, map_loss
# class FluxParams(nn.Module):
# def __init__(self, init_fluxes, fmin):
# super(FluxParams, self).__init__()
#
# self.fmin = fmin
# self.init_flux_params = self._free_flux_params(init_fluxes)
# self.flux_params = nn.Parameter(self.init_flux_params.clone())
#
# def _free_flux_params(self, fluxes):
# return torch.log(fluxes.clamp(min = self.fmin + 1) - self.fmin)
#
# def get_fluxes(self):
# return torch.exp(self.flux_params) + self.fmin
#
# class EstimateModelParams(nn.Module):
# def __init__(self, observed_image, locs, n_stars,
# init_psf_params,
# init_background_params,
# init_fluxes = None,
# fmin = 1e-3,
# alpha = 0.5,
# pad = 5):
#
# super(EstimateModelParams, self).__init__()
#
# self.pad = pad
# self.alpha = alpha
# self.fmin = fmin
# self.locs = locs
# self.n_stars = n_stars
#
# # observed image is batchsize (or 1) x n_bands x slen x slen
# assert len(observed_image.shape) == 4
#
# self.observed_image = observed_image
# self.slen = observed_image.shape[-1]
#
# # batchsize
# assert len(n_stars) == locs.shape[0]
# self.batchsize = locs.shape[0]
#
# # get n_bands
# assert observed_image.shape[1] == init_psf_params.shape[0]
# self.n_bands = init_psf_params.shape[0]
#
# # get psf
# self.init_psf_params = init_psf_params
# self.power_law_psf = PowerLawPSF(self.init_psf_params,
# image_slen = self.slen)
# self.init_psf = self.power_law_psf.forward().detach()
#
# self.max_stars = locs.shape[1]
# assert locs.shape[2] == 2
#
# # boolean for stars being on
# self.is_on_array = utils.get_is_on_from_n_stars(n_stars, self.max_stars)
#
# # set up initial background parameters
# if init_background_params is None:
# self._get_init_background()
# else:
# assert init_background_params.shape[0] == self.n_bands
# self.init_background_params = init_background_params
#
# self.planar_background = PlanarBackground(image_slen=self.slen,
# init_background_params=self.init_background_params)
#
# self.init_background = self.planar_background.forward().detach()
#
# # initial flux parameters
# if init_fluxes is None:
# self._get_init_fluxes()
# else:
# self.init_fluxes = init_fluxes
#
# self.flux_params_class = FluxParams(self.init_fluxes, self.fmin)
#
# # TODO: pass these as an argument
# self.color_mean = 0.3
# self.color_var = 0.15**2
#
# self.cached_grid = _get_mgrid(observed_image.shape[-1]).to(device)
# self._set_star_basis(self.init_psf)
#
# def _set_star_basis(self, psf):
# self.star_basis = \
# plot_one_star(self.slen, self.locs.view(-1, 2), psf,
# cached_grid = self.cached_grid).view(self.batchsize,
# self.max_stars,
# self.n_bands,
# self.slen, self.slen) * \
# self.is_on_array[:, :, None, None, None]
#
# def _get_init_background(self, sample_every = 25):
# sampled_background = _sample_image(self.observed_image, sample_every)
# self.init_background_params = torch.Tensor(_fit_plane_to_background(sampled_background)).to(device)
#
# def _get_init_fluxes(self):
#
# locs_indx = torch.round(self.locs * (self.slen - 1)).type(torch.long).clamp(max = self.slen - 2,
# min = 2)
#
# sky_subtr_image = self.observed_image - self.init_background
# self.init_fluxes = torch.zeros(self.batchsize, self.max_stars, self.n_bands).to(device)
#
# for i in range(self.locs.shape[0]):
# if self.observed_image.shape[0] == 1:
# obs_indx = 0
# else:
# obs_indx = i
#
# # # take the min over a box of the location
# # init_fluxes_i = torch.zeros(9, self.max_stars, self.n_bands)
# # n = 0
# # for j in [-1, 0, 1]:
# # for k in [-1, 0, 1]:
# # init_fluxes_i[n] = sky_subtr_image[obs_indx, :,
# # locs_indx[i, :, 0] + j,
# # locs_indx[i, :, 1] + k].transpose(0, 1)
# # n +=1
# #
# # self.init_fluxes[i] = init_fluxes_i.mean(0)
#
# self.init_fluxes[i] = \
# sky_subtr_image[obs_indx, :,
# locs_indx[i, :, 0], locs_indx[i, :, 1]].transpose(0, 1)
#
# self.init_fluxes = self.init_fluxes / self.init_psf.view(self.n_bands, -1).max(1)[0][None, None, :]
#
# def get_fluxes(self):
# return self.flux_params_class.get_fluxes()
#
# def get_background(self):
# return self.planar_background.forward().unsqueeze(0)
#
# def get_psf(self):
# return self.power_law_psf.forward()
#
# def get_loss(self, use_cached_star_basis = False):
# background = self.get_background()
# fluxes = self.get_fluxes()
#
# if not use_cached_star_basis:
# psf = self.get_psf()
# self._set_star_basis(psf)
# else:
# self.star_basis = self.star_basis.detach()
#
#
# recon_mean = (fluxes[:, :, :, None, None] * self.star_basis).sum(1) + \
# background
# recon_mean = recon_mean.clamp(min = 1)
#
# error = 0.5 * ((self.observed_image - recon_mean)**2 / recon_mean) + 0.5 * torch.log(recon_mean)
#
# neg_loglik = error[:, :, self.pad:(self.slen - self.pad), self.pad:(self.slen - self.pad)].sum()
# assert ~torch.isnan(neg_loglik)
#
# # prior terms
# log_flux = torch.log(fluxes)
# flux_prior = - (self.alpha + 1) * (log_flux[:, :, 0] * self.is_on_array).sum()
# if self.n_bands > 1:
# colors = 2.5 * (log_flux[:, :, 1:] - log_flux[:, :, 0:1]) / np.log(10.)
# color_prior = - 0.5 * (colors - self.color_mean)**2 / self.color_var
# flux_prior += (color_prior * self.is_on_array.unsqueeze(-1)).sum()
# assert ~torch.isnan(flux_prior)
#
# loss = neg_loglik - flux_prior
#
# return recon_mean, loss
#
# def _run_optimizer(self, optimizer, tol,
# use_cached_star_basis = False,
# max_iter = 20, print_every = False):
#
# def closure():
# optimizer.zero_grad()
# loss = self.get_loss(use_cached_star_basis)[1]
# loss.backward()
#
# return loss
#
# init_loss = optimizer.step(closure)
# old_loss = init_loss.clone()
#
# for i in range(1, max_iter):
# loss = optimizer.step(closure)
#
# if print_every:
# print(loss)
#
# if (old_loss - loss) < (init_loss * tol):
# break
#
# old_loss = loss
#
# if max_iter > 1:
# if i == (max_iter - 1):
# print('warning: max iterations reached')
#
# def optimize_fluxes_background(self, max_iter = 10):
# optimizer1 = optim.LBFGS(list(self.flux_params_class.parameters()) +
# list(self.planar_background.parameters()),
# max_iter = 10,
# line_search_fn = 'strong_wolfe')
#
# self._run_optimizer(optimizer1,
# tol = 1e-3,
# max_iter = max_iter,
# use_cached_star_basis = True)
#
# def run_coordinate_ascent(self, tol = 1e-3,
# max_inner_iter = 10,
# max_outer_iter = 20):
#
# old_loss = 1e16
# init_loss = self.get_loss(use_cached_star_basis = True)[1].detach()
#
# for i in range(max_outer_iter):
# print('\noptimizing fluxes + background. ')
# optimizer1 = optim.LBFGS(list(self.flux_params_class.parameters()) +
# list(self.planar_background.parameters()),
# max_iter = max_inner_iter,
# line_search_fn = 'strong_wolfe')
#
# self._run_optimizer(optimizer1, tol = 1e-3, max_iter = 1,
# use_cached_star_basis = True)
#
# print('loss: ', self.get_loss(use_cached_star_basis = True)[1].detach())
#
# print('\noptimizing psf. ')
# psf_optimizer = optim.LBFGS(list(self.power_law_psf.parameters()),
# max_iter = max_inner_iter,
# line_search_fn = 'strong_wolfe')
#
# self._run_optimizer(psf_optimizer, tol = 1e-3, max_iter = 1,
# use_cached_star_basis = False)
#
# loss = self.get_loss(use_cached_star_basis = False)[1].detach()
# print('loss: ', loss)
#
# if (old_loss - loss) < (tol * init_loss):
# break
#
# old_loss = loss
#
# if max_outer_iter > 1:
# if i == (max_outer_iter - 1):
# print('warning: max iterations reached')
| 18,610
| 36.147705
| 109
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/deblending_runjingdev/plotting_utils.py
|
import matplotlib.pyplot as plt
import torch
import numpy as np
import deblending_runjingdev.image_utils as image_utils
from deblending_runjingdev.which_device import device
def plot_image(fig, image,
true_locs = None, estimated_locs = None,
vmin = None, vmax = None,
add_colorbar = False,
global_fig = None,
diverging_cmap = False,
color = 'r', marker = 'x', alpha = 1):
# locations are coordinates in the image, on scale from 0 to 1
image = image.cpu()
slen = image.shape[-1]
if diverging_cmap:
if vmax is None:
vmax = image.abs().max()
im = fig.matshow(image, vmin = -vmax, vmax = vmax,
cmap=plt.get_cmap('bwr'))
else:
im = fig.matshow(image, vmin = vmin, vmax = vmax,
cmap=plt.cm.gray)
if not(true_locs is None):
true_locs = true_locs.cpu()
assert len(true_locs.shape) == 2
assert true_locs.shape[1] == 2
fig.scatter(x = true_locs[:, 1] * (slen - 1),
y = true_locs[:, 0] * (slen - 1),
color = 'b')
if not(estimated_locs is None):
estimated_locs = estimated_locs.cpu()
assert len(estimated_locs.shape) == 2
assert estimated_locs.shape[1] == 2
fig.scatter(x = estimated_locs[:, 1] * (slen - 1),
y = estimated_locs[:, 0] * (slen - 1),
color = color, marker = marker, alpha = alpha)
if add_colorbar:
assert global_fig is not None
global_fig.colorbar(im, ax = fig)
def plot_categorical_probs(log_prob_vec, fig):
n_cat = len(log_prob_vec)
points = [(i, torch.exp(log_prob_vec[i])) for i in range(n_cat)]
for pt in points:
# plot (x,y) pairs.
# vertical line: 2 x,y pairs: (a,0) and (a,b)
fig.plot([pt[0],pt[0]], [0,pt[1]], color = 'blue')
fig.plot(np.arange(n_cat),
torch.exp(log_prob_vec).detach().numpy(),
'o', markersize = 5, color = 'blue')
def plot_subimage(fig, full_image, full_est_locs, full_true_locs,
x0, x1, patch_slen,
vmin = None, vmax = None,
add_colorbar = False,
global_fig = None,
diverging_cmap = False,
color = 'r', marker = 'x', alpha = 1):
assert len(full_image.shape) == 2
# full_est_locs and full_true_locs are locations in the coordinates of the
# full image, in pixel units, scaled between 0 and 1
# trim image to subimage
image_patch = full_image[x0:(x0 + patch_slen), x1:(x1 + patch_slen)]
# get locations in the subimage
if full_est_locs is not None:
assert torch.all(full_est_locs <= 1)
assert torch.all(full_est_locs >= 0)
_full_est_locs = full_est_locs * (full_image.shape[-1] - 1)
which_est_locs = (_full_est_locs[:, 0] > x0) & \
(_full_est_locs[:, 0] < (x0 + patch_slen - 1)) & \
(_full_est_locs[:, 1] > x1) & \
(_full_est_locs[:, 1] < (x1 + patch_slen - 1))
shift = torch.Tensor([[x0, x1]]).to(device)
est_locs = (_full_est_locs[which_est_locs, :] - shift) / (patch_slen - 1)
else:
est_locs = None
which_est_locs = None
if full_true_locs is not None:
assert torch.all(full_true_locs <= 1)
assert torch.all(full_true_locs >= 0)
_full_true_locs = full_true_locs * (full_image.shape[-1] - 1)
which_true_locs = (_full_true_locs[:, 0] > x0) & \
(_full_true_locs[:, 0] < (x0 + patch_slen - 1)) & \
(_full_true_locs[:, 1] > x1) & \
(_full_true_locs[:, 1] < (x1 + patch_slen - 1))
shift = torch.Tensor([[x0, x1]]).to(device)
true_locs = (_full_true_locs[which_true_locs, :] - shift) / (patch_slen - 1)
else:
true_locs = None
which_true_locs = None
plot_image(fig, image_patch,
true_locs = true_locs,
estimated_locs = est_locs,
vmin = vmin, vmax = vmax,
add_colorbar = add_colorbar,
global_fig = global_fig,
diverging_cmap = diverging_cmap,
color = color, marker = marker, alpha = alpha)
return which_true_locs, which_est_locs
| 4,500
| 35.593496
| 84
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/deblending_runjingdev/sdss_dataset_lib.py
|
import pathlib
import os
import pickle
import numpy as np
from scipy.interpolate import RegularGridInterpolator
import scipy.stats as stats
import torch
from torch.utils.data import Dataset
from astropy.io import fits
from astropy.wcs import WCS
import matplotlib.pyplot as plt
from deblending_runjingdev.simulated_datasets_lib import _trim_psf
from deblending_runjingdev.flux_utils import FluxEstimator
import deblending_runjingdev.psf_transform_lib as psf_transform_lib
from deblending_runjingdev.wake_lib import _sample_image, _fit_plane_to_background
from deblending_runjingdev.which_device import device
def _get_mgrid2(slen0, slen1):
offset0 = (slen0 - 1) / 2
offset1 = (slen1 - 1) / 2
x, y = np.mgrid[-offset0:(offset0 + 1), -offset1:(offset1 + 1)]
# return torch.Tensor(np.dstack((x, y))) / offset
return torch.Tensor(np.dstack((y, x))) / torch.Tensor([[[offset1, offset0]]])
class SloanDigitalSkySurvey(Dataset):
# this is adapted from
# https://github.com/jeff-regier/celeste_net/blob/935fbaa96d8da01dd7931600dee059bf6dd11292/datasets.py#L10
# to run on a specified run, camcol, field, and band
# returns one 1 x 1489 x 2048 image
def __init__(self, sdssdir = '../sdss_stage_dir/',
run = 3900, camcol = 6, field = 269, bands = [2]):
super(SloanDigitalSkySurvey, self).__init__()
self.sdss_path = pathlib.Path(sdssdir)
self.rcfgs = []
self.bands = bands
# meta data for the run + camcol
pf_file = "photoField-{:06d}-{:d}.fits".format(run, camcol)
camcol_path = self.sdss_path.joinpath(str(run), str(camcol))
pf_path = camcol_path.joinpath(pf_file)
pf_fits = fits.getdata(pf_path)
fieldnums = pf_fits["FIELD"]
fieldgains = pf_fits["GAIN"]
# get desired field
for i in range(len(fieldnums)):
_field = fieldnums[i]
gain = fieldgains[i]
if _field == field:
self.rcfgs.append((run, camcol, field, gain))
self.items = [None] * len(self.rcfgs)
def __len__(self):
return len(self.rcfgs)
def __getitem__(self, idx):
if not self.items[idx]:
self.items[idx] = self.get_from_disk(idx)
return self.items[idx]
def get_from_disk(self, idx):
run, camcol, field, gain = self.rcfgs[idx]
camcol_dir = self.sdss_path.joinpath(str(run), str(camcol))
field_dir = camcol_dir.joinpath(str(field))
image_list = []
background_list = []
nelec_per_nmgy_list = []
calibration_list = []
gain_list = []
cache_path = field_dir.joinpath("cache.pkl")
# if cache_path.exists():
# print('loading cached sdss image from ', cache_path)
# return pickle.load(cache_path.open("rb"))
for b, bl in enumerate("ugriz"):
if not(b in self.bands):
continue
frame_name = "frame-{}-{:06d}-{:d}-{:04d}.fits".format(bl, run, camcol, field)
frame_path = str(field_dir.joinpath(frame_name))
print("loading sdss image from", frame_path)
frame = fits.open(frame_path)
calibration = frame[1].data
nelec_per_nmgy = gain[b] / calibration
(sky_small,) = frame[2].data["ALLSKY"]
(sky_x,) = frame[2].data["XINTERP"]
(sky_y,) = frame[2].data["YINTERP"]
small_rows = np.mgrid[0:sky_small.shape[0]]
small_cols = np.mgrid[0:sky_small.shape[1]]
sky_interp = RegularGridInterpolator((small_rows, small_cols), sky_small, method="nearest")
sky_y = sky_y.clip(0, sky_small.shape[0] - 1)
sky_x = sky_x.clip(0, sky_small.shape[1] - 1)
large_points = np.stack(np.meshgrid(sky_y, sky_x)).transpose()
large_sky = sky_interp(large_points)
large_sky_nelec = large_sky * gain[b]
pixels_ss_nmgy = frame[0].data
pixels_ss_nelec = pixels_ss_nmgy * nelec_per_nmgy
pixels_nelec = pixels_ss_nelec + large_sky_nelec
image_list.append(pixels_nelec)
background_list.append(large_sky_nelec)
gain_list.append(gain[b])
nelec_per_nmgy_list.append(nelec_per_nmgy)
calibration_list.append(calibration)
frame.close()
ret = {'image': np.stack(image_list),
'background': np.stack(background_list),
'nelec_per_nmgy': np.stack(nelec_per_nmgy_list),
'gain': np.stack(gain_list),
'calibration': np.stack(calibration_list)}
pickle.dump(ret, field_dir.joinpath("cache.pkl").open("wb+"))
return ret
def convert_mag_to_nmgy(mag):
return 10**((22.5 - mag) / 2.5)
def convert_nmgy_to_mag(nmgy):
return 22.5 - 2.5 * torch.log10(nmgy)
def load_m2_data(sdss_dir = '../../sdss_stage_dir/',
hubble_dir = '../hubble_data/',
slen = 100,
x0 = 630,
x1 = 310,
f_min = 1000.):
# returns the SDSS image of M2 in the r and i bands
# along with the corresponding Hubble catalog
#####################
# Load SDSS data
#####################
run = 2583
camcol = 2
field = 136
sdss_data = SloanDigitalSkySurvey(sdss_dir,
run = run,
camcol = camcol,
field = field,
# returns the r and i band
bands = [2, 3])
# the full SDSS image, ~1500 x 2000 pixels
sdss_image_full = torch.Tensor(sdss_data[0]['image'])
sdss_background_full = torch.Tensor(sdss_data[0]['background'])
#####################
# load hubble catalog
#####################
hubble_cat_file = hubble_dir + \
'hlsp_acsggct_hst_acs-wfc_ngc7089_r.rdviq.cal.adj.zpt'
print('loading hubble data from ', hubble_cat_file)
HTcat = np.loadtxt(hubble_cat_file, skiprows=True)
# hubble magnitude
hubble_rmag_full = HTcat[:,9]
# right ascension and declination
hubble_ra_full = HTcat[:,21]
hubble_dc_full = HTcat[:,22]
# convert hubble r.a and declination to pixel coordinates
# (0, 0) is top left of sdss_image_full
frame_name = "frame-{}-{:06d}-{:d}-{:04d}.fits".format('r', run, camcol, field)
field_dir = pathlib.Path(sdss_dir).joinpath(str(run), str(camcol), str(field))
frame_path = str(field_dir.joinpath(frame_name))
print('getting sdss coordinates from: ', frame_path)
hdulist = fits.open(str(frame_path))
wcs = WCS(hdulist['primary'].header)
# NOTE: pix_coordinates are (column x row), i.e. pix_coord[0] corresponds to a column
pix_coordinates = \
wcs.wcs_world2pix(hubble_ra_full, hubble_dc_full, 0, ra_dec_order = True)
hubble_locs_full_x0 = pix_coordinates[1] # the row of pixel
hubble_locs_full_x1 = pix_coordinates[0] # the column of pixel
# convert hubble magnitude to n_electron count
# only take r band
nelec_per_nmgy_full = sdss_data[0]['nelec_per_nmgy'][0].squeeze()
which_cols = np.floor(hubble_locs_full_x1 / len(nelec_per_nmgy_full)).astype(int)
hubble_nmgy = convert_mag_to_nmgy(hubble_rmag_full)
hubble_r_fluxes_full = hubble_nmgy * nelec_per_nmgy_full[which_cols]
#####################
# using hubble ground truth locations,
# align i-band with r-band
#####################
frame_name_i = "frame-{}-{:06d}-{:d}-{:04d}.fits".format('i',
run, camcol, field)
frame_path_i = str(field_dir.joinpath(frame_name_i))
print('\n aligning images. \n Getting sdss coordinates from: ', frame_path_i)
hdu = fits.open(str(frame_path_i))
wcs_other = WCS(hdu['primary'].header)
# get pixel coords
pix_coordinates_other = wcs_other.wcs_world2pix(hubble_ra_full,
hubble_dc_full, 0,
ra_dec_order = True)
# estimate the amount to shift
shift_x0 = np.median(hubble_locs_full_x0 - pix_coordinates_other[1]) / (sdss_image_full.shape[-2] - 1)
shift_x1 = np.median(hubble_locs_full_x1 - pix_coordinates_other[0]) / (sdss_image_full.shape[-1] - 1)
shift = torch.Tensor([[[[shift_x1, shift_x0 ]]]]) * 2
# align image
grid = _get_mgrid2(sdss_image_full.shape[-2],
sdss_image_full.shape[-1]).unsqueeze(0) - shift
sdss_image_full[1] = \
torch.nn.functional.grid_sample(sdss_image_full[1].unsqueeze(0).unsqueeze(0),
grid, align_corners=True).squeeze()
##################
# Filter to desired subimage
##################
print('\n returning image at x0 = {}, x1 = {}'.format(x0, x1))
which_locs = (hubble_locs_full_x0 > x0) & (hubble_locs_full_x0 < (x0 + slen - 1)) & \
(hubble_locs_full_x1 > x1) & (hubble_locs_full_x1 < (x1 + slen - 1))
# just a subset
sdss_image = sdss_image_full[:, x0:(x0 + slen), x1:(x1 + slen)].to(device)
sdss_background = sdss_background_full[:, x0:(x0 + slen), x1:(x1 + slen)].to(device)
locs = np.array([hubble_locs_full_x0[which_locs] - x0,
hubble_locs_full_x1[which_locs] - x1]).transpose()
hubble_r_fluxes = torch.Tensor(hubble_r_fluxes_full[which_locs])
hubble_locs = torch.Tensor(locs) / (slen - 1)
hubble_fluxes = torch.stack([hubble_r_fluxes,
hubble_r_fluxes]).transpose(0, 1)
# filter by bright stars only
which_bright = hubble_fluxes[:, 0] > f_min
hubble_locs = hubble_locs[which_bright].to(device)
hubble_fluxes = hubble_fluxes[which_bright].to(device)
return sdss_image, sdss_background, \
hubble_locs, hubble_fluxes, \
sdss_data, wcs
| 10,043
| 37.482759
| 110
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/deblending_runjingdev/psf_transform_lib.py
|
import torch
import torch.nn as nn
from torch.nn.functional import unfold, softmax, pad
from astropy.io import fits
import deblending_runjingdev.image_utils as image_utils
from deblending_runjingdev.utils import eval_normal_logprob
from deblending_runjingdev.simulated_datasets_lib import _get_mgrid, plot_multiple_stars
from deblending_runjingdev.which_device import device
#######################
# Convolutional PSF transform
########################
class PsfLocalTransform(nn.Module):
def __init__(self, psf,
image_slen = 101,
kernel_size = 3,
init_bias = 5):
super(PsfLocalTransform, self).__init__()
assert len(psf.shape) == 3
self.n_bands = psf.shape[0]
assert psf.shape[1] == psf.shape[2]
self.psf_slen = psf.shape[-1]
# only implemented for this case atm
assert image_slen > psf.shape[1]
assert (image_slen % 2) == 1
assert (psf.shape[1] % 2) == 1
self.image_slen = image_slen
self.kernel_size = kernel_size
self.psf = psf.unsqueeze(0)
self.tile_psf()
# for renormalizing the PSF
self.normalization = psf.view(self.n_bands, -1).sum(1)
# initializtion
init_weight = torch.zeros(self.psf_slen ** 2, self.n_bands,\
kernel_size ** 2)
init_weight[:, :, 4] = init_bias
self.weight = nn.Parameter(init_weight)
def tile_psf(self):
psf_unfolded = unfold(self.psf,
kernel_size = self.kernel_size,
padding = (self.kernel_size - 1) // 2).squeeze(0).transpose(0, 1)
self.psf_tiled = psf_unfolded.view(psf_unfolded.shape[0], self.n_bands,
self.kernel_size**2)
def apply_weights(self, w):
tile_psf_transformed = torch.sum(w * self.psf_tiled, dim = 2).transpose(0, 1)
return tile_psf_transformed.view(self.n_bands, self.psf_slen,
self.psf_slen)
def forward(self):
weights_constrained = torch.nn.functional.softmax(self.weight, dim = 2)
psf_transformed = self.apply_weights(weights_constrained)
# TODO: this is experimental
# which_center = (self.psf.squeeze(0) > 1e-2).float()
# psf_transformed = psf_transformed * which_center + \
# (1 - which_center) * self.psf.squeeze(0)
# pad psf for full image
l_pad = (self.image_slen - self.psf_slen) // 2
psf_image = pad(psf_transformed, (l_pad, ) * 4)
psf_image_normalization = psf_image.view(self.n_bands, -1).sum(1)
return psf_image * (self.normalization / psf_image_normalization).unsqueeze(-1).unsqueeze(-1)
########################
# function for Power law PSF
########################
def get_psf_params(psfield_fit_file, bands):
psfield = fits.open(psfield_fit_file)
psf_params = torch.zeros(len(bands), 6)
for i in range(len(bands)):
band = bands[i]
sigma1 = psfield[6].data["psf_sigma1"][0][band] ** 2
sigma2 = psfield[6].data["psf_sigma2"][0][band] ** 2
sigmap = psfield[6].data["psf_sigmap"][0][band] ** 2
beta = psfield[6].data["psf_beta"][0][band]
b = psfield[6].data["psf_b"][0][band]
p0 = psfield[6].data["psf_p0"][0][band]
# I think these parameters are constrained to be positive
# take log; we will take exp later
psf_params[i] = torch.log(torch.Tensor([sigma1, sigma2, sigmap,
beta, b, p0]))
return psf_params
def psf_fun(r, sigma1, sigma2, sigmap, beta, b, p0):
term1 = torch.exp(-r**2 / (2 * sigma1))
term2 = b * torch.exp(-r**2 / (2 * sigma2))
term3 = p0 * (1 + r**2 / (beta * sigmap))**(-beta / 2)
return (term1 + term2 + term3) / (1 + b + p0)
def get_psf(slen, psf_params, cached_radii_grid = None):
assert (slen % 2) == 1
if cached_radii_grid is None:
grid = simulated_datasets_lib._get_mgrid(slen) * (slen - 1) / 2
radii_grid = (grid**2).sum(2).sqrt()
else:
radii_grid = cached_radii_grid
_psf_params = torch.exp(psf_params)
return psf_fun(radii_grid, _psf_params[0], _psf_params[1], _psf_params[2],
_psf_params[3], _psf_params[4], _psf_params[5])
class PowerLawPSF(nn.Module):
def __init__(self, init_psf_params,
psf_slen = 25,
image_slen = 101):
super(PowerLawPSF, self).__init__()
assert len(init_psf_params.shape) == 2
assert image_slen % 2 == 1, 'image_slen must be odd'
self.n_bands = init_psf_params.shape[0]
self.init_psf_params = init_psf_params.clone()
self.psf_slen = psf_slen
self.image_slen = image_slen
grid = _get_mgrid(self.psf_slen) * (self.psf_slen - 1) / 2
self.cached_radii_grid = (grid**2).sum(2).sqrt().to(device)
# initial weights
self.params = nn.Parameter(init_psf_params.clone())
# get normalization_constant
self.normalization_constant = torch.zeros(self.n_bands)
for i in range(self.n_bands):
self.normalization_constant[i] = \
1 / get_psf(self.psf_slen,
self.init_psf_params[i],
self.cached_radii_grid).sum()
# initial psf
self.init_psf = self.get_psf()
# TODO: I belive this init_psf_sum is vacuous (should just be one)
self.init_psf_sum = self.init_psf.sum(-1).sum(-1).detach()
def get_psf(self):
# TODO make the psf function vectorized ...
for i in range(self.n_bands):
_psf = get_psf(self.psf_slen, self.params[i], self.cached_radii_grid) * \
self.normalization_constant[i]
if i == 0:
psf = _psf.unsqueeze(0)
else:
psf = torch.cat((psf, _psf.unsqueeze(0)))
assert (psf >= 0).all()
return psf
def forward(self):
psf = self.get_psf()
psf = psf * (self.init_psf_sum / psf.sum(-1).sum(-1)).unsqueeze(-1).unsqueeze(-1)
l_pad = (self.image_slen - self.psf_slen) // 2
return pad(psf, (l_pad, ) * 4)
| 6,346
| 32.582011
| 101
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/deblending_runjingdev/simulated_datasets_lib.py
|
import numpy as np
import scipy.stats as stats
import torch
from torch.utils.data import Dataset, DataLoader
import torch
import torch.nn.functional as F
import deblending_runjingdev.utils as utils
from deblending_runjingdev.which_device import device
def _trim_psf(psf, slen):
# crop the psf to length slen x slen
# centered at the middle
assert len(psf.shape) == 3
n_bands = psf.shape[0]
# dimension of the psf should be odd
psf_slen = psf.shape[2]
assert psf.shape[1] == psf_slen
assert (psf_slen % 2) == 1
assert (slen % 2) == 1
psf_center = (psf_slen - 1) / 2
assert psf_slen >= slen
r = np.floor(slen / 2)
l_indx = int(psf_center - r)
u_indx = int(psf_center + r + 1)
return psf[:, l_indx:u_indx, l_indx:u_indx]
def _expand_psf(psf, slen):
# pad the psf with zeros so that it is size slen
# first dimension of psf is number of bands
assert len(psf.shape) == 3
n_bands = psf.shape[0]
psf_slen = psf.shape[2]
assert psf.shape[1] == psf_slen
# dimension of psf should be odd
assert (psf_slen % 2) == 1
# sim for slen
assert (slen % 2) == 1
assert psf_slen <= slen
psf_expanded = torch.zeros((n_bands, slen, slen))
offset = int((slen - psf_slen) / 2)
psf_expanded[:, offset:(offset+psf_slen), offset:(offset+psf_slen)] = psf
return psf_expanded
def _get_mgrid(slen):
offset = (slen - 1) / 2
x, y = np.mgrid[-offset:(offset + 1), -offset:(offset + 1)]
# return torch.Tensor(np.dstack((x, y))) / offset
return (torch.Tensor(np.dstack((y, x))) / offset).to(device)
def plot_one_star(slen, locs, psf, cached_grid = None):
# locs is batchsize x 2: takes values between 0 and 1
# psf is a slen x slen tensor
# assert torch.all(locs <= 1)
# assert torch.all(locs >= 0)
# slen = psf.shape[-1]
# assert slen == psf.shape[-2]
assert len(psf.shape) == 3
n_bands = psf.shape[0]
batchsize = locs.shape[0]
assert locs.shape[1] == 2
if cached_grid is None:
grid = _get_mgrid(slen)
else:
assert cached_grid.shape[0] == slen
assert cached_grid.shape[1] == slen
grid = cached_grid
# scale locs so they take values between -1 and 1 for grid sample
locs = (locs - 0.5) * 2
locs = locs.index_select(1, torch.tensor([1, 0], device=device))
grid_loc = grid.view(1, slen, slen, 2) - locs.view(batchsize, 1, 1, 2)
star = F.grid_sample(psf.expand(batchsize, n_bands, -1, -1), grid_loc, align_corners = True)
# normalize so one star still sums to 1
return star
def plot_multiple_stars(slen, locs, n_stars, fluxes, psf, cached_grid = None):
# locs is batchsize x max_stars x x_loc x y_loc
# fluxes is batchsize x n_bands x max_stars
# n_stars is length batchsize
# psf is a n_bands x slen x slen tensor
n_bands = psf.shape[0]
batchsize = locs.shape[0]
max_stars = locs.shape[1]
assert locs.shape[2] == 2
assert fluxes.shape[0] == locs.shape[0]
assert fluxes.shape[1] == locs.shape[1]
assert fluxes.shape[2] == n_bands
assert len(n_stars) == batchsize
assert len(n_stars.shape) == 1
assert max(n_stars) <= locs.shape[1]
if cached_grid is None:
grid = _get_mgrid(slen)
else:
assert cached_grid.shape[0] == slen
assert cached_grid.shape[1] == slen
grid = cached_grid
stars = 0.
for n in range(max(n_stars)):
is_on_n = (n < n_stars).float()
locs_n = locs[:, n, :] * is_on_n.unsqueeze(1)
fluxes_n = fluxes[:, n, :]
one_star = plot_one_star(slen, locs_n, psf, cached_grid = grid)
stars += one_star * (is_on_n.unsqueeze(1) * fluxes_n).view(batchsize, n_bands, 1, 1)
return stars
def _draw_pareto(f_min, alpha, shape):
uniform_samples = torch.rand(shape, device = device)
return f_min / (1 - uniform_samples)**(1 / alpha)
def _draw_pareto_maxed(f_min, f_max, alpha, shape):
# draw pareto conditioned on being less than f_max
pareto_samples = _draw_pareto(f_min, alpha, shape)
while torch.any(pareto_samples > f_max):
indx = pareto_samples > f_max
pareto_samples[indx] = \
_draw_pareto(f_min, alpha, torch.sum(indx))
return pareto_samples
class StarSimulator:
def __init__(self, psf, slen, background, transpose_psf):
assert len(psf.shape) == 3
assert len(background.shape) == 3
assert background.shape[0] == psf.shape[0]
assert background.shape[1] == slen
assert background.shape[2] == slen
self.background = background
self.n_bands = psf.shape[0]
self.psf_og = psf
# side length of the image
self.slen = slen
# get psf shape to match image shape
# if slen is even, we still make psf dimension odd.
# otherwise, the psf won't have a peak in the center pixel.
_slen = slen + ((slen % 2) == 0) * 1
if (slen >= self.psf_og.shape[-1]):
self.psf = _expand_psf(self.psf_og, _slen).to(device)
else:
self.psf = _trim_psf(self.psf_og, _slen).to(device)
if transpose_psf:
self.psf = self.psf.transpose(1, 2)
self.cached_grid = _get_mgrid(slen)
def draw_image_from_params(self, locs, fluxes, n_stars,
add_noise = True):
images_mean = \
plot_multiple_stars(self.slen, locs, n_stars, fluxes,
self.psf, self.cached_grid) + \
self.background[None, :, :, :]
# add noise
if add_noise:
if torch.any(images_mean <= 0):
print('warning: image mean less than 0')
images_mean = images_mean.clamp(min = 1.0)
images = torch.sqrt(images_mean) * torch.randn(images_mean.shape, device = device) + \
images_mean
else:
images = images_mean
return images
class StarsDataset(Dataset):
def __init__(self, psf, n_images,
slen,
max_stars,
mean_stars,
min_stars,
f_min,
f_max,
background,
alpha,
draw_poisson = True,
transpose_psf = False,
add_noise = True):
self.slen = slen
self.n_bands = psf.shape[0]
self.simulator = StarSimulator(psf, slen, background, transpose_psf)
self.background = background[None, :, :, :]
# image parameters
self.max_stars = max_stars
self.mean_stars = mean_stars
self.min_stars = min_stars
self.add_noise = add_noise
self.draw_poisson = draw_poisson
# prior parameters
self.f_min = f_min
self.f_max = f_max
self.alpha = alpha
# dataset parameters
self.n_images = n_images
# set data
self.set_params_and_images()
def __len__(self):
return self.n_images
def __getitem__(self, idx):
return {'image': self.images[idx],
'background': self.background[0],
'locs': self.locs[idx],
'fluxes': self.fluxes[idx],
'n_stars': self.n_stars[idx]}
def draw_batch_parameters(self, batchsize, return_images = True):
if self.draw_poisson:
# draw number of stars
p = torch.full((1,), self.mean_stars, device=device, dtype = torch.float)
m = torch.distributions.Poisson(p)
n_stars = m.sample((batchsize, ))
n_stars = n_stars.clamp(max = self.max_stars,
min = self.min_stars).long().squeeze(-1)
else:
# TODODODO
assert 1 == 2, 'foo'
is_on_array = utils.get_is_on_from_n_stars(n_stars, self.max_stars)
# draw locations
locs = torch.rand((batchsize, self.max_stars, 2), device = device) * \
is_on_array.unsqueeze(2).float()
# draw fluxes
base_fluxes = _draw_pareto_maxed(self.f_min, self.f_max, alpha = self.alpha,
shape = (batchsize, self.max_stars))
if self.n_bands > 1:
# TODO: we may need to change the color priors
colors = torch.randn(batchsize, self.max_stars, self.n_bands - 1,
device = device) * 1.0
_fluxes = 10**( colors / 2.5) * base_fluxes.unsqueeze(2)
fluxes = torch.cat((base_fluxes.unsqueeze(2), _fluxes), dim = 2) * \
is_on_array.unsqueeze(2).float()
else:
fluxes = (base_fluxes * is_on_array.float()).unsqueeze(2)
if return_images:
images = self.simulator.draw_image_from_params(locs, fluxes, n_stars,
add_noise = self.add_noise)
return locs, fluxes, n_stars, images
else:
return locs, fluxes, n_stars
def set_params_and_images(self):
self.locs, self.fluxes, self.n_stars, self.images = \
self.draw_batch_parameters(self.n_images, return_images = True)
def load_dataset_from_params(psf, data_params,
n_images,
background,
draw_poisson = True,
transpose_psf = False,
add_noise = True):
# data parameters
slen = data_params['slen']
f_min = data_params['f_min']
f_max = data_params['f_max']
alpha = data_params['alpha']
max_stars = data_params['max_stars']
mean_stars = data_params['mean_stars']
min_stars = data_params['min_stars']
# draw data
return StarsDataset(psf,
n_images,
slen = slen,
f_min=f_min,
f_max=f_max,
max_stars = max_stars,
mean_stars = mean_stars,
min_stars = min_stars,
alpha = alpha,
background = background,
draw_poisson = draw_poisson,
transpose_psf = transpose_psf,
add_noise = add_noise)
| 10,604
| 30.751497
| 98
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/deblending_runjingdev/image_statistics_lib.py
|
import torch
import numpy as np
from deblending_runjingdev.sdss_dataset_lib import convert_nmgy_to_mag
from deblending_runjingdev.which_device import device
def filter_params(locs, fluxes, slen, pad = 5):
assert len(locs.shape) == 2
if fluxes is not None:
assert len(fluxes.shape) == 1
assert len(fluxes) == len(locs)
_locs = locs * (slen - 1)
which_params = (_locs[:, 0] > pad) & (_locs[:, 0] < (slen - pad)) & \
(_locs[:, 1] > pad) & (_locs[:, 1] < (slen - pad))
if fluxes is not None:
return locs[which_params], fluxes[which_params]
else:
return locs[which_params], None
def get_locs_error(locs, true_locs):
# get matrix of Linf error in locations
# truth x estimated
return torch.abs(locs.unsqueeze(0) - true_locs.unsqueeze(1)).max(2)[0]
def get_fluxes_error(fluxes, true_fluxes):
# get matrix of l1 error in log flux
# truth x estimated
return torch.abs(torch.log10(fluxes).unsqueeze(0) - \
torch.log10(true_fluxes).unsqueeze(1))
def get_mag_error(mags, true_mags):
# get matrix of l1 error in magnitude
# truth x estimated
return torch.abs(mags.unsqueeze(0) - \
true_mags.unsqueeze(1))
def get_summary_stats(est_locs, true_locs, slen, est_fluxes, true_fluxes,
nelec_per_nmgy,
pad = 5, slack = 0.5):
# remove border
est_locs, est_fluxes = filter_params(est_locs, est_fluxes, slen, pad)
true_locs, true_fluxes = filter_params(true_locs, true_fluxes, slen, pad)
if (est_fluxes is None) or (true_fluxes is None):
mag_error = 0.
else:
# convert to magnitude
est_mags = convert_nmgy_to_mag(est_fluxes / nelec_per_nmgy)
true_mags = convert_nmgy_to_mag(true_fluxes / nelec_per_nmgy)
mag_error = get_mag_error(est_mags, true_mags)
locs_error = get_locs_error(est_locs * (slen - 1), true_locs * (slen - 1))
tpr_bool = torch.any((locs_error < slack) * (mag_error < slack), dim = 1).float()
ppv_bool = torch.any((locs_error < slack) * (mag_error < slack), dim = 0).float()
return tpr_bool.mean(), ppv_bool.mean(), tpr_bool, ppv_bool
def get_tpr_vec(est_locs, true_locs, slen, est_fluxes, true_fluxes,
nelec_per_nmgy,
pad = 5, mag_vec = None):
est_locs, est_fluxes = filter_params(est_locs, est_fluxes, slen, pad)
true_locs, true_fluxes = filter_params(true_locs, true_fluxes, slen, pad)
# convert to magnitude
true_mags = convert_nmgy_to_mag(true_fluxes / nelec_per_nmgy)
if mag_vec is None:
percentiles = np.linspace(0, 1, 11) * 100
mag_vec = np.percentile(true_mags.cpu(), percentiles)
mag_vec = torch.Tensor(mag_vec).to(device)
tpr_vec = np.zeros(len(mag_vec) - 1)
counts_vec = np.zeros(len(mag_vec) - 1)
for i in range(len(mag_vec) - 1):
which_true = (true_mags > mag_vec[i]) & (true_mags < mag_vec[i + 1])
counts_vec[i] = torch.sum(which_true)
tpr_vec[i] = \
get_summary_stats(est_locs, true_locs[which_true], slen,
est_fluxes, true_fluxes[which_true],
nelec_per_nmgy, pad = pad)[0]
return tpr_vec, mag_vec, counts_vec
def get_ppv_vec(est_locs, true_locs, slen, est_fluxes, true_fluxes,
nelec_per_nmgy,
pad = 5, mag_vec = None):
est_locs, est_fluxes = filter_params(est_locs, est_fluxes, slen, pad)
true_locs, true_fluxes = filter_params(true_locs, true_fluxes, slen, pad)
est_mags = convert_nmgy_to_mag(est_fluxes / nelec_per_nmgy)
if mag_vec is None:
percentiles = np.linspace(0, 1, 11) * 100
mag_vec = np.percentile(est_mags.cpu(), percentiles)
mag_vec = torch.Tensor(mag_vec).to(device)
ppv_vec = np.zeros(len(mag_vec) - 1)
counts_vec = np.zeros(len(mag_vec) - 1)
for i in range(len(mag_vec) - 1):
which_est = (est_mags > mag_vec[i]) & (est_mags < mag_vec[i + 1])
counts_vec[i] = torch.sum(which_est)
if torch.sum(which_est) == 0:
continue
ppv_vec[i] = \
get_summary_stats(est_locs[which_est], true_locs, slen,
est_fluxes[which_est], true_fluxes,
nelec_per_nmgy, pad = pad)[1]
return ppv_vec, mag_vec, counts_vec
def get_l1_error(est_locs, true_locs, slen, est_fluxes, true_fluxes, pad = 5):
est_locs, est_fluxes = filter_params(est_locs, est_fluxes, slen, pad)
true_locs, true_fluxes = filter_params(true_locs, true_fluxes, slen, pad)
fluxes_error = get_fluxes_error(est_fluxes, true_fluxes)
locs_error = get_locs_error(est_locs * (slen - 1), true_locs * (slen - 1))
ppv_bool = torch.any((locs_error < 0.5) * (fluxes_error < 0.5), dim = 0).float()
locs_matched_error = locs_error[:, ppv_bool == 1]
fluxes_matched_error = fluxes_error[:, ppv_bool == 1]
seq_tensor = torch.Tensor([i for i in range(fluxes_matched_error.shape[1])]).type(torch.long)
locs_error, which_match = locs_matched_error.min(0)
return locs_error, fluxes_matched_error[which_match, seq_tensor]
| 5,243
| 35.416667
| 97
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/deblending_runjingdev/sleep_lib.py
|
import torch
import numpy as np
import math
import time
from torch.distributions import normal
from torch.nn import CrossEntropyLoss
import deblending_runjingdev.utils as utils
import deblending_runjingdev.elbo_lib as elbo_lib
from deblending_runjingdev.which_device import device
from itertools import permutations
def isnan(x):
return x != x
#############################
# functions to get loss for training the counter
############################
def get_categorical_loss(log_probs, one_hot_encoding):
assert torch.all(log_probs <= 0)
assert log_probs.shape[0] == one_hot_encoding.shape[0]
assert log_probs.shape[1] == one_hot_encoding.shape[1]
return torch.sum(
-log_probs * one_hot_encoding, dim = 1)
def _permute_losses_mat(losses_mat, perm):
batchsize = losses_mat.shape[0]
max_stars = losses_mat.shape[1]
assert perm.shape[0] == batchsize
assert perm.shape[1] == max_stars
return torch.gather(losses_mat, 2, perm.unsqueeze(2)).squeeze()
def get_locs_logprob_all_combs(true_locs, loc_mean, loc_log_var):
batchsize = true_locs.shape[0]
# get losses for locations
_loc_mean = loc_mean.view(batchsize, 1, loc_mean.shape[1], 2)
_loc_log_var = loc_log_var.view(batchsize, 1, loc_mean.shape[1], 2)
_true_locs = true_locs.view(batchsize, true_locs.shape[1], 1, 2)
# this is to return a large error if star is off
_true_locs = _true_locs + (_true_locs == 0).float() * 1e16
# this is batchsize x (max_stars x max_detections)
# the log prob for each observed location x mean
locs_log_probs_all = utils.eval_normal_logprob(_true_locs,
_loc_mean, _loc_log_var).sum(dim = 3)
return locs_log_probs_all
def get_fluxes_logprob_all_combs(true_fluxes, log_flux_mean, log_flux_log_var):
batchsize = true_fluxes.shape[0]
n_bands = true_fluxes.shape[2]
_log_flux_mean = log_flux_mean.view(batchsize, 1, log_flux_mean.shape[1], n_bands)
_log_flux_log_var = log_flux_log_var.view(batchsize, 1, log_flux_mean.shape[1], n_bands)
_true_fluxes = true_fluxes.view(batchsize, true_fluxes.shape[1], 1, n_bands)
# this is batchsize x (max_stars x max_detections)
# the log prob for each observed location x mean
flux_log_probs_all = utils.eval_lognormal_logprob(_true_fluxes,
_log_flux_mean, _log_flux_log_var).sum(dim = 3)
return flux_log_probs_all
def _get_log_probs_all_perms(locs_log_probs_all, flux_log_probs_all, is_on_array):
max_detections = flux_log_probs_all.shape[-1]
batchsize = flux_log_probs_all.shape[0]
locs_loss_all_perm = torch.zeros(batchsize,
math.factorial(max_detections),
device = device)
fluxes_loss_all_perm = torch.zeros(batchsize,
math.factorial(max_detections),
device = device)
i = 0
for perm in permutations(range(max_detections)):
locs_loss_all_perm[:, i] = \
(locs_log_probs_all[:, perm, :].diagonal(dim1 = 1, dim2 = 2) * \
is_on_array).sum(1)
fluxes_loss_all_perm[:, i] = \
(flux_log_probs_all[:, perm].diagonal(dim1 = 1, dim2 = 2) * \
is_on_array).sum(1)
i += 1
return locs_loss_all_perm, fluxes_loss_all_perm
def get_min_perm_loss(locs_log_probs_all, flux_log_probs_all, is_on_array):
locs_log_probs_all_perm, fluxes_log_probs_all_perm = \
_get_log_probs_all_perms(locs_log_probs_all, flux_log_probs_all, is_on_array)
locs_loss, indx = torch.min(-locs_log_probs_all_perm, dim = 1)
fluxes_loss = -torch.gather(fluxes_log_probs_all_perm, 1, indx.unsqueeze(1)).squeeze()
return locs_loss, fluxes_loss, indx
def get_params_loss(loc_mean, loc_log_var, \
log_flux_mean, log_flux_log_var, log_probs,
true_locs, true_fluxes, true_is_on_array):
max_detections = log_flux_mean.shape[1]
# this is batchsize x (max_stars x max_detections)
# the log prob for each observed location x mean
locs_log_probs_all = \
get_locs_logprob_all_combs(true_locs,
loc_mean,
loc_log_var)
flux_log_probs_all = \
get_fluxes_logprob_all_combs(true_fluxes, \
log_flux_mean, log_flux_log_var)
locs_loss, fluxes_loss, perm_indx = \
get_min_perm_loss(locs_log_probs_all, flux_log_probs_all, true_is_on_array)
true_n_stars = true_is_on_array.sum(1)
cross_entropy = CrossEntropyLoss(reduction="none").requires_grad_(False)
counter_loss = cross_entropy(log_probs, true_n_stars.long())
loss_vec = (locs_loss * (locs_loss.detach() < 1e6).float() + fluxes_loss + counter_loss)
loss = loss_vec.mean()
return loss, counter_loss, locs_loss, fluxes_loss, perm_indx
def get_inv_kl_loss(star_encoder,
images,
true_locs,
true_fluxes, use_l2_loss = False):
# extract image ptiles
image_ptiles, true_tile_locs, true_tile_fluxes, \
true_tile_n_stars, true_tile_is_on_array = \
star_encoder.get_image_ptiles(images, true_locs, true_fluxes,
clip_max_stars = True)
# get variational parameters on each tile
loc_mean, loc_log_var, \
log_flux_mean, log_flux_log_var, log_probs = \
star_encoder(image_ptiles, true_tile_n_stars)
if use_l2_loss:
loc_log_var = torch.zeros((loc_log_var.shape), device = device)
log_flux_log_var = torch.zeros((log_flux_log_var.shape), device = device)
loss, counter_loss, locs_loss, fluxes_loss, perm_indx = \
get_params_loss(loc_mean, loc_log_var, \
log_flux_mean, log_flux_log_var, log_probs, \
true_tile_locs, true_tile_fluxes,
true_tile_is_on_array.float())
return loss, counter_loss, locs_loss, fluxes_loss, perm_indx, log_probs
def eval_sleep(star_encoder, train_loader,
optimizer = None, train = False):
avg_loss = 0.0
avg_counter_loss = 0.0
avg_locs_loss = 0.0
avg_fluxes_loss = 0.0
for _, data in enumerate(train_loader):
true_fluxes = data['fluxes']
true_locs = data['locs']
images = data['image']
if train:
star_encoder.train()
if optimizer is not None:
optimizer.zero_grad()
else:
star_encoder.eval()
# evaluate log q
loss, counter_loss, locs_loss, fluxes_loss = \
get_inv_kl_loss(star_encoder, images,
true_locs, true_fluxes)[0:4]
if train:
if optimizer is not None:
loss.backward()
optimizer.step()
avg_loss += loss.item() * images.shape[0] / len(train_loader.dataset)
avg_counter_loss += counter_loss.sum().item() / (len(train_loader.dataset) * star_encoder.n_tiles)
avg_fluxes_loss += fluxes_loss.sum().item() / (len(train_loader.dataset) * star_encoder.n_tiles)
avg_locs_loss += locs_loss.sum().item() / (len(train_loader.dataset) * star_encoder.n_tiles)
return avg_loss, avg_counter_loss, avg_locs_loss, avg_fluxes_loss
def run_sleep(star_encoder, loader, optimizer, n_epochs,
out_filename, print_every = 10,
full_image = None, mean_stars = None):
test_losses = np.zeros((4, n_epochs))
# save ELBO as well
if full_image is not None:
star_encoder.eval();
elbo_results_vec = elbo_lib.save_elbo_results(full_image, star_encoder,
loader.dataset.simulator, mean_stars = mean_stars)
for epoch in range(n_epochs):
t0 = time.time()
# draw fresh data
loader.dataset.set_params_and_images()
avg_loss, counter_loss, locs_loss, fluxes_loss = \
eval_sleep(star_encoder, loader, optimizer, train = True)
elapsed = time.time() - t0
print('[{}] loss: {:0.4f}; counter loss: {:0.4f}; locs loss: {:0.4f}; fluxes loss: {:0.4f} \t[{:.1f} seconds]'.format(\
epoch, avg_loss, counter_loss, locs_loss, fluxes_loss, elapsed))
test_losses[:, epoch] = np.array([avg_loss, counter_loss, locs_loss, fluxes_loss])
np.savetxt(out_filename + '-test_losses', test_losses)
if ((epoch % print_every) == 0) or (epoch == (n_epochs-1)):
loader.dataset.set_params_and_images()
foo = eval_sleep(star_encoder, loader, train = True)[0];
star_encoder.eval();
loader.dataset.set_params_and_images()
test_loss, test_counter_loss, test_locs_loss, test_fluxes_loss = \
eval_sleep(star_encoder, loader, train = False)
print('**** test loss: {:.3f}; counter loss: {:.3f}; locs loss: {:.3f}; fluxes loss: {:.3f} ****'.format(\
test_loss, test_counter_loss, test_locs_loss, test_fluxes_loss))
# save ELBO as well
if (full_image is not None) & (epoch > 0):
elbo_results = elbo_lib.save_elbo_results(full_image, star_encoder, loader.dataset.simulator,
mean_stars = mean_stars, pad = star_encoder.edge_padding)
elbo_results_vec = np.vstack((elbo_results_vec, elbo_results))
np.savetxt(out_filename + '-elbo_results', elbo_results_vec)
print("writing the encoder parameters to " + out_filename)
torch.save(star_encoder.state_dict(), out_filename)
| 9,800
| 37.136187
| 127
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/deblending_runjingdev/__init__.py
| 0
| 0
| 0
|
py
|
|
DeblendingStarfields
|
DeblendingStarfields-master/deblending_runjingdev/which_device.py
|
import torch
device = torch.device("cuda:6" if torch.cuda.is_available() else "cpu")
# device = 'cpu'
| 102
| 24.75
| 71
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/experiments_sparse_field/sparse_field_lib.py
|
import numpy as np
import torch
import fitsio
from astropy.io import fits
from astropy.wcs import WCS
import deblending_runjingdev.sdss_dataset_lib as sdss_dataset_lib
from deblending_runjingdev.sdss_dataset_lib import _get_mgrid2
def load_data(catalog_file = '../coadd_field_catalog_runjing_liu.fit',
sdss_dir = '../sdss_stage_dir/',
run = 94, camcol = 1, field = 12, bands = [2, 3],
align_bands = True):
n_bands = len(bands)
band_letters = ['ugriz'[bands[i]] for i in range(n_bands)]
##################
# load sdss data
##################
sdss_data = sdss_dataset_lib.SloanDigitalSkySurvey(sdssdir = sdss_dir,
run = run, camcol = camcol,
field = field, bands = bands)
image = torch.Tensor(sdss_data[0]['image'])
slen0 = image.shape[-2]
slen1 = image.shape[-1]
##################
# load coordinate files
##################
frame_names = ["frame-{}-{:06d}-{:d}-{:04d}.fits".format(band_letters[i],
run, camcol, field) for i in range(n_bands)]
wcs_list = []
for i in range(n_bands):
hdulist = fits.open(sdss_dir + str(run) + '/' + str(camcol) + '/' + str(field) + \
'/' + frame_names[i])
wcs_list += [WCS(hdulist['primary'].header)]
min_coords = wcs_list[0].wcs_pix2world(np.array([[0, 0]]), 0)
max_coords = wcs_list[0].wcs_pix2world(np.array([[slen1, slen0]]), 0)
##################
# load catalog
##################
fits_file = fitsio.FITS(catalog_file)[1]
true_ra = fits_file['ra'][:]
true_decl = fits_file['dec'][:]
# make sure our catalog covers the whole image
assert true_ra.min() < min_coords[0, 0]
assert true_ra.max() > max_coords[0, 0]
assert true_decl.min() < min_coords[0, 1]
assert true_decl.max() > max_coords[0, 1]
##################
# align image
##################
if align_bands:
pix_coords_list = [wcs_list[i].wcs_world2pix(true_ra, true_decl, 0, \
ra_dec_order = True) \
for i in range(n_bands)]
for i in range(1, n_bands):
shift_x0 = np.median(pix_coords_list[0][1] - pix_coords_list[i][1])
shift_x1 = np.median(pix_coords_list[0][0] - pix_coords_list[i][0])
grid = _get_mgrid2(slen0, slen1).unsqueeze(0) - \
torch.Tensor([[[[shift_x1 / (slen1 - 1),
shift_x0 / (slen0 - 1)]]]]) * 2
image_i = image[i].unsqueeze(0).unsqueeze(0)
band_aligned = torch.nn.functional.grid_sample(image_i, grid,
mode = 'nearest', align_corners=True).squeeze()
image[i] = band_aligned
return image, fits_file, wcs_list, sdss_data
| 2,936
| 33.151163
| 90
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/experiments_sparse_field/train_sleep-sparse_field.py
|
import numpy as np
import torch
import torch.optim as optim
from deblending_runjingdev import simulated_datasets_lib
from deblending_runjingdev import starnet_lib
from deblending_runjingdev import sleep_lib
from deblending_runjingdev import psf_transform_lib
from deblending_runjingdev import wake_lib
import json
import time
from deblending_runjingdev.which_device import device
print('device: ', device)
print('torch version: ', torch.__version__)
###############
# set seed
###############
np.random.seed(65765)
_ = torch.manual_seed(3453453)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
###############
# data parameters
###############
with open('../model_params/default_star_parameters.json', 'r') as fp:
data_params = json.load(fp)
data_params['mean_stars'] = 50
data_params['slen'] = 500
print(data_params)
###############
# load psf
###############
bands = [2]
psfield_file = '../sdss_stage_dir/94/1/12/psField-000094-1-0012.fit'
init_psf_params = psf_transform_lib.get_psf_params(
psfield_file,
bands = bands)
power_law_psf = psf_transform_lib.PowerLawPSF(init_psf_params.to(device))
psf_og = power_law_psf.forward().detach()
###############
# sky intensity: for the r band
###############
init_background_params = torch.zeros(len(bands), 3).to(device)
init_background_params[0, 0] = 862.
planar_background = wake_lib.PlanarBackground(image_slen = data_params['slen'],
init_background_params = init_background_params.to(device))
background = planar_background.forward().detach()
###############
# draw data
###############
print('generating data: ')
n_images = 200
t0 = time.time()
star_dataset = \
simulated_datasets_lib.load_dataset_from_params(psf_og,
data_params,
background = background,
n_images = n_images,
transpose_psf = False,
add_noise = True)
print('data generation time: {:.3f}secs'.format(time.time() - t0))
# get loader
batchsize = 1
loader = torch.utils.data.DataLoader(
dataset=star_dataset,
batch_size=batchsize,
shuffle=True)
###############
# define VAE
###############
star_encoder = starnet_lib.StarEncoder(slen = data_params['slen'],
ptile_slen = 50,
step = 50,
edge_padding = 0,
n_bands = psf_og.shape[0],
max_detections = 3,
track_running_stats = False)
star_encoder.to(device)
###############
# define optimizer
###############
learning_rate = 1e-3
weight_decay = 1e-5
optimizer = optim.Adam([
{'params': star_encoder.parameters(),
'lr': learning_rate}],
weight_decay = weight_decay)
###############
# Train!
###############
n_epochs = 141
print_every = 10
print('training')
out_filename = './starnet_sparsefield'
sleep_lib.run_sleep(star_encoder,
loader,
optimizer,
n_epochs,
out_filename = out_filename,
print_every = print_every)
# star_dataset2 = \
# simulated_datasets_lib.load_dataset_from_params(psf_og,
# data_params,
# background = background,
# n_images = 1,
# transpose_psf = False,
# add_noise = True)
# sim_image = star_dataset2[0]['image'].unsqueeze(0)
# true_locs = star_dataset2[0]['locs'][0:star_dataset[0]['n_stars']].unsqueeze(0)
# true_fluxes = star_dataset2[0]['fluxes'][0:star_dataset[0]['n_stars']].unsqueeze(0)
# np.savez('./fits/results_2020-05-10/starnet_ri_sparse_field',
# sim_image = sim_image.cpu().numpy(),
# true_locs = true_locs.cpu().numpy(),
# true_fluxes = true_fluxes.cpu().numpy())
# # check loss
# loss, counter_loss, locs_loss, fluxes_loss, perm_indx = \
# sleep_lib.get_inv_kl_loss(star_encoder, sim_image,
# true_locs, true_fluxes)[0:5]
# print(loss)
# print(counter_loss.mean())
# print(locs_loss.mean())
# print(fluxes_loss.mean())
| 4,469
| 29.827586
| 87
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/experiments_m2/train_wake_sleep.py
|
import numpy as np
import torch
import torch.optim as optim
import deblending_runjingdev.sdss_dataset_lib as sdss_dataset_lib
import deblending_runjingdev.simulated_datasets_lib as simulated_datasets_lib
import deblending_runjingdev.starnet_lib as starnet_lib
import deblending_runjingdev.sleep_lib as sleep_lib
from deblending_runjingdev.sleep_lib import run_sleep
import deblending_runjingdev.wake_lib as wake_lib
import deblending_runjingdev.psf_transform_lib as psf_transform_lib
import time
import json
import os
from deblending_runjingdev.which_device import device
print('device: ', device)
print('torch version: ', torch.__version__)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--x0', type=int, default=630)
parser.add_argument('--x1', type=int, default=310)
parser.add_argument('--init_encoder', type=str, default='./fits/results_2020-05-15/starnet_ri')
parser.add_argument('--outfolder', type=str, default='./fits/results_2020-05-15/')
parser.add_argument('--outfilename', type=str, default='starnet_ri_wake-sleep')
parser.add_argument('--n_iter', type=int, default=2)
parser.add_argument('--prior_mu', type=int, default=1500)
parser.add_argument('--prior_alpha', type=float, default=0.5)
args = parser.parse_args()
assert os.path.isfile(args.init_encoder)
assert os.path.isdir(args.outfolder)
#######################
# set seed
########################
np.random.seed(32090275)
_ = torch.manual_seed(120457)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
#######################
# get sdss data
#######################
sdss_image = sdss_dataset_lib.load_m2_data(sdss_dir = './../sdss_stage_dir/',
hubble_dir = './hubble_data/',
x0 = args.x0,
x1 = args.x1)[0]
sdss_image = sdss_image.unsqueeze(0).to(device)
#######################
# simulated data parameters
#######################
with open('../model_params/default_star_parameters.json', 'r') as fp:
data_params = json.load(fp)
data_params['alpha'] = args.prior_alpha
data_params['mean_stars'] = args.prior_mu
print(data_params)
###############
# load model parameters
###############
#### the psf
psfield_file = '../sdss_stage_dir/2583/2/136/psField-002583-2-0136.fit'
init_psf_params = psf_transform_lib.get_psf_params(
psfield_file,
bands = [2, 3]).to(device)
model_params = wake_lib.ModelParams(sdss_image,
init_psf_params = init_psf_params,
init_background_params = None)
psf_og = model_params.get_psf().detach()
background_og = model_params.get_background().detach().squeeze(0)
###############
# draw data
###############
print('generating data: ')
n_images = 200
t0 = time.time()
star_dataset = \
simulated_datasets_lib.load_dataset_from_params(psf_og,
data_params,
n_images = n_images,
background = background_og,
transpose_psf = False,
add_noise = True)
print('data generation time: {:.3f}secs'.format(time.time() - t0))
# get loader
batchsize = 20
loader = torch.utils.data.DataLoader(
dataset=star_dataset,
batch_size=batchsize,
shuffle=True)
###############
# define VAE
###############
star_encoder = starnet_lib.StarEncoder(slen = data_params['slen'],
ptile_slen = 8,
step = 2,
edge_padding = 3,
n_bands = 2,
max_detections = 2)
init_encoder = args.init_encoder
star_encoder.load_state_dict(torch.load(init_encoder,
map_location=lambda storage, loc: storage))
star_encoder.to(device)
star_encoder.eval();
####################
# optimzer
#####################
encoder_lr = 1e-5
sleep_optimizer = optim.Adam([
{'params': star_encoder.parameters(),
'lr': encoder_lr}],
weight_decay = 1e-5)
# initial loss:
sleep_loss, sleep_counter_loss, sleep_locs_loss, sleep_fluxes_loss = \
sleep_lib.eval_sleep(star_encoder, loader, train = False)
print('**** INIT SLEEP LOSS: {:.3f}; counter loss: {:.3f}; locs loss: {:.3f}; fluxes loss: {:.3f} ****'.format(\
sleep_loss, sleep_counter_loss, sleep_locs_loss, sleep_fluxes_loss))
wake_loss = wake_lib.get_wake_loss(sdss_image, star_encoder, model_params,
n_samples = 1, run_map = True).detach()
print('**** INIT WAKE LOSS: {:.3f}'.format(wake_loss))
# file header to save results
outfolder = args.outfolder # './fits/results_2020-03-04/'
outfile_base = outfolder + args.outfilename
print(outfile_base)
############################
# Run wake-sleep
############################
t0 = time.time()
n_iter = args.n_iter
map_losses = torch.zeros(n_iter)
for iteration in range(0, n_iter):
#######################
# wake phase training
#######################
print('RUNNING WAKE PHASE. ITER = ' + str(iteration))
if iteration == 0:
powerlaw_psf_params = init_psf_params
planar_background_params = None
encoder_file = init_encoder
else:
powerlaw_psf_params = \
torch.Tensor(np.load(outfile_base + '-iter' + str(iteration -1) + \
'-powerlaw_psf_params.npy')).to(device)
planar_background_params = \
torch.Tensor(np.load(outfile_base + '-iter' + str(iteration -1) + \
'-planarback_params.npy')).to(device)
encoder_file = outfile_base + '-encoder-iter' + str(iteration)
print('loading encoder from: ', encoder_file)
star_encoder.load_state_dict(torch.load(encoder_file,
map_location=lambda storage, loc: storage))
star_encoder.to(device);
star_encoder.eval();
model_params, map_losses[iteration] = \
wake_lib.run_wake(sdss_image, star_encoder, powerlaw_psf_params,
planar_background_params,
n_samples = 25,
out_filename = outfile_base + '-iter' + str(iteration),
lr = 1e-3,
n_epochs = 100,
run_map = False,
print_every = 10)
print(list(model_params.planar_background.parameters())[0])
print(list(model_params.power_law_psf.parameters())[0])
print(map_losses[iteration])
np.save(outfolder + 'map_losses', map_losses.cpu().detach())
########################
# sleep phase training
########################
print('RUNNING SLEEP PHASE. ITER = ' + str(iteration + 1))
# update psf and background
loader.dataset.simulator.psf = model_params.get_psf().detach()
loader.dataset.simulator.background = model_params.get_background().squeeze(0).detach()
run_sleep(star_encoder,
loader,
sleep_optimizer,
n_epochs = 11,
out_filename = outfile_base + '-encoder-iter' + str(iteration + 1))
print('DONE. Elapsed: {}secs'.format(time.time() - t0))
| 7,431
| 33.567442
| 112
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/experiments_m2/train_sleep.py
|
import numpy as np
import torch
import torch.optim as optim
import deblending_runjingdev.simulated_datasets_lib as simulated_datasets_lib
import deblending_runjingdev.sdss_dataset_lib as sdss_dataset_lib
import deblending_runjingdev.starnet_lib as starnet_lib
import deblending_runjingdev.sleep_lib as sleep_lib
import deblending_runjingdev.psf_transform_lib as psf_transform_lib
import deblending_runjingdev.wake_lib as wake_lib
import json
import time
from deblending_runjingdev.which_device import device
print('device: ', device)
print('torch version: ', torch.__version__)
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--outfolder', type=str, default='./fits/results_2020-05-15/')
parser.add_argument('--outfilename', type=str, default='starnet_ri')
parser.add_argument('--prior_mu', type=int, default=1500)
parser.add_argument('--prior_alpha', type=float, default=0.5)
args = parser.parse_args()
import os
assert os.path.isdir(args.outfolder)
###############
# set seed
###############
np.random.seed(65765)
_ = torch.manual_seed(3453453)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
###############
# data parameters
###############
with open('../model_params/default_star_parameters.json', 'r') as fp:
data_params = json.load(fp)
data_params['mean_stars'] = args.prior_mu
data_params['alpha'] = args.prior_alpha
print(data_params)
###############
# load psf and background
###############
psfield_file = '../sdss_stage_dir/2583/2/136/psField-002583-2-0136.fit'
init_psf_params = psf_transform_lib.get_psf_params(
psfield_file,
bands = [2, 3])
# init_psf_params = torch.Tensor(np.load('./data/fitted_powerlaw_psf_params.npy'))
power_law_psf = psf_transform_lib.PowerLawPSF(init_psf_params.to(device))
psf_og = power_law_psf.forward().detach()
# load background
sdss_background = \
sdss_dataset_lib.load_m2_data(sdss_dir = './../sdss_stage_dir/',
hubble_dir = './hubble_data/')[1]
sdss_background = sdss_background.to(device)
###############
# draw data
###############
print('generating data: ')
n_images = 200
t0 = time.time()
star_dataset = \
simulated_datasets_lib.load_dataset_from_params(psf_og,
data_params,
background = sdss_background,
n_images = n_images,
transpose_psf = False,
add_noise = True)
print('data generation time: {:.3f}secs'.format(time.time() - t0))
# get loader
batchsize = 20
loader = torch.utils.data.DataLoader(
dataset=star_dataset,
batch_size=batchsize,
shuffle=True)
###############
# define VAE
###############
star_encoder = starnet_lib.StarEncoder(slen = data_params['slen'],
ptile_slen = 8,
step = 2,
edge_padding = 3,
n_bands = psf_og.shape[0],
max_detections = 2)
star_encoder.to(device)
###############
# define optimizer
###############
learning_rate = 1e-3
weight_decay = 1e-5
optimizer = optim.Adam([
{'params': star_encoder.parameters(),
'lr': learning_rate}],
weight_decay = weight_decay)
###############
# Train!
###############
n_epochs = 201
print_every = 5
print('training')
t0 = time.time()
out_filename = args.outfolder + args.outfilename # './fits/results_2020-05-15/starnet_ri'
sleep_lib.run_sleep(star_encoder, loader, optimizer, n_epochs,
out_filename = out_filename,
print_every = print_every,
full_image = None)
print('DONE. Elapsed: {}secs'.format(time.time() - t0))
| 3,978
| 28.474074
| 89
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/experiments_deblending/train_encoder.py
|
import numpy as np
import torch
import torch.optim as optim
import deblending_runjingdev.simulated_datasets_lib as simulated_datasets_lib
import deblending_runjingdev.starnet_lib as starnet_lib
import deblending_runjingdev.sleep_lib as sleep_lib
import deblending_runjingdev.psf_transform_lib as psf_transform_lib
import json
import time
from deblending_runjingdev.which_device import device
print('device: ', device)
print('torch version: ', torch.__version__)
###############
# set seed
###############
np.random.seed(5751)
_ = torch.manual_seed(1151)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
###############
# data parameters
###############
with open('../model_params/default_star_parameters.json', 'r') as fp:
data_params = json.load(fp)
data_params['min_stars'] = 1
# mean set so that P(n_stars <= 1) \approx 0.5
data_params['mean_stars'] = 1.65
data_params['max_stars'] = 2
data_params['slen'] = 7
data_params['f_max'] = 10000
print(data_params)
###############
# load psf
###############
bands = [2, 3]
psfield_file = '../sdss_stage_dir/2583/2/136/psField-002583-2-0136.fit'
init_psf_params = psf_transform_lib.get_psf_params(
psfield_file,
bands = bands)
power_law_psf = psf_transform_lib.PowerLawPSF(init_psf_params.to(device))
psf = power_law_psf.forward().detach()
###############
# set background
###############
background = torch.zeros(len(bands), data_params['slen'], data_params['slen']).to(device)
background[0] = 686.
background[1] = 1123.
###############
# draw data
###############
print('generating data: ')
n_images = 60000
t0 = time.time()
star_dataset = \
simulated_datasets_lib.load_dataset_from_params(psf,
data_params,
background = background,
n_images = n_images,
transpose_psf = False,
add_noise = True)
print('data generation time: {:.3f}secs'.format(time.time() - t0))
# get data loader
batchsize = 2000
loader = torch.utils.data.DataLoader(
dataset=star_dataset,
batch_size=batchsize,
shuffle=True)
###############
# define VAE
###############
star_encoder = starnet_lib.StarEncoder(slen = data_params['slen'],
ptile_slen = data_params['slen'],
step = data_params['slen'],
edge_padding = 0,
n_bands = len(bands),
max_detections = 2)
star_encoder.to(device)
###############
# define optimizer
###############
learning_rate = 1e-3
weight_decay = 1e-3
optimizer = optim.Adam([
{'params': star_encoder.parameters(),
'lr': learning_rate}],
weight_decay = weight_decay)
###############
# Train!
###############
n_epochs = 30
print_every = 10
print('training')
out_filename = './starnet'
sleep_lib.run_sleep(star_encoder,
loader,
optimizer,
n_epochs,
out_filename = out_filename,
print_every = print_every)
| 3,330
| 25.862903
| 89
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/experiments_elbo_vs_sleep/train_elbo.py
|
import numpy as np
import torch
import torch.optim as optim
import deblending_runjingdev.elbo_lib as elbo_lib
import deblending_runjingdev.simulated_datasets_lib as simulated_datasets_lib
import deblending_runjingdev.starnet_lib as starnet_lib
import deblending_runjingdev.psf_transform_lib as psf_transform_lib
import time
from deblending_runjingdev.which_device import device
print('device: ', device)
print('torch version: ', torch.__version__)
###############
# set seed
###############
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type = int, default = 0)
parser.add_argument("--test_image",
type = str,
default = 'small')
parser.add_argument("--grad_estimator",
type = str,
default = 'reinforce')
args = parser.parse_args()
print(args.seed)
np.random.seed(8910 + args.seed * 17)
_ = torch.manual_seed(8910 + args.seed * 13)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
###############
# load psf
###############
bands = [2, 3]
psfield_file = '../sdss_stage_dir/2583/2/136/psField-002583-2-0136.fit'
init_psf_params = psf_transform_lib.get_psf_params(
psfield_file,
bands = bands)
power_law_psf = psf_transform_lib.PowerLawPSF(init_psf_params.to(device))
psf_og = power_law_psf.forward().detach()
###############
# Get image
###############
if args.test_image == 'small':
# test image file
test_image_file = './test_image_20x20.npz'
# parameters for encoder
ptile_slen = 10
step = 10
edge_padding = 0
# prior parameters
mean_stars = 4
elif args.test_image == 'large':
# test image file
test_image_file = './test_image_100x100.npz'
# parameters for encoder
ptile_slen = 20
step = 10
edge_padding = 5
# prior parameters
mean_stars = 50
else:
print('Specify whether to use the large (100 x 100) test image',
'or the small (20 x 20) test image')
raise NotImplementedError()
full_image_np = np.load(test_image_file)['image']
full_image = torch.Tensor(full_image_np).unsqueeze(0).to(device)
slen = full_image.shape[-1]
fmin = 1000.
###############
# background
###############
background = torch.zeros(len(bands), slen, slen).to(device)
background[0] = 686.
background[1] = 1123.
###############
# Get simulator
###############
simulator = simulated_datasets_lib.StarSimulator(psf_og,
slen,
background,
transpose_psf = False)
###############
# define VAE
###############
star_encoder = starnet_lib.StarEncoder(slen = slen,
ptile_slen = ptile_slen,
step = step,
edge_padding = edge_padding,
n_bands = psf_og.shape[0],
max_detections = 2,
fmin = fmin,
constrain_logflux_mean = True,
track_running_stats = False)
star_encoder.eval();
star_encoder.to(device);
###############
# define optimizer
###############
learning_rate = 1e-3
weight_decay = 1e-3
optimizer = optim.Adam([
{'params': star_encoder.parameters(),
'lr': learning_rate}],
weight_decay = weight_decay)
###############
# Train!
###############
out_filename = './fits/starnet_elbo_' + args.grad_estimator + '-restart' + str(args.seed)
if args.test_image == 'small':
n_epochs = 2500
print_every = 100
n_samples = 2000
out_filename = out_filename + '_20x20'
else:
raise NotImplementedError()
out_filename = out_filename + '_100x100'
print('training')
elbo_results_vec = elbo_lib.save_elbo_results(full_image, star_encoder,
simulator, mean_stars,
n_samples = n_samples,
pad = star_encoder.edge_padding)
t0 = time.time()
for epoch in range(1, n_epochs + 1):
optimizer.zero_grad()
# get pseudo loss
if args.grad_estimator == 'reinforce':
ps_loss = elbo_lib.get_pseudo_loss(full_image, star_encoder,
simulator, mean_stars,
n_samples = n_samples,
pad = star_encoder.edge_padding)
elif args.grad_estimator == 'reparam':
ps_loss = elbo_lib.get_pseudo_loss_all_sum(full_image, star_encoder,
simulator, mean_stars, n_samples)
else:
print(args.grad_estimator, 'not implemented. Specify either reinforce or reparam')
raise NotImplementedError()
ps_loss.backward()
optimizer.step()
if ((epoch % print_every) == 0) or (epoch == n_epochs):
print('epoch = {}; elapsed = {:.1f}sec'.format(epoch, time.time() - t0))
elbo_results = elbo_lib.save_elbo_results(full_image, star_encoder,
simulator, mean_stars,
n_samples = n_samples,
pad = star_encoder.edge_padding)
elbo_results_vec = np.vstack((elbo_results_vec, elbo_results))
np.savetxt(out_filename + '-elbo_results', elbo_results_vec)
print("writing the encoder parameters to " + out_filename)
torch.save(star_encoder.state_dict(), out_filename)
t0 = time.time()
| 5,869
| 30.55914
| 90
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/experiments_elbo_vs_sleep/train_elbo-Copy1.py
|
import numpy as np
import torch
import torch.optim as optim
import deblending_runjingdev.elbo_lib as elbo_lib
import deblending_runjingdev.simulated_datasets_lib as simulated_datasets_lib
import deblending_runjingdev.starnet_lib as starnet_lib
import deblending_runjingdev.psf_transform_lib as psf_transform_lib
import time
from deblending_runjingdev.which_device import device
print('device: ', device)
print('torch version: ', torch.__version__)
###############
# set seed
###############
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type = int, default = 0)
args = parser.parse_args()
print(args.seed)
np.random.seed(575 + args.seed * 17)
_ = torch.manual_seed(1512 + args.seed * 13)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
###############
# load psf
###############
bands = [2, 3]
psfield_file = '../sdss_stage_dir/2583/2/136/psField-002583-2-0136.fit'
init_psf_params = psf_transform_lib.get_psf_params(
psfield_file,
bands = bands)
# init_psf_params = torch.Tensor(np.load('./data/fitted_powerlaw_psf_params.npy'))
power_law_psf = psf_transform_lib.PowerLawPSF(init_psf_params.to(device))
psf_og = power_law_psf.forward().detach()
###############
# Get image
###############
test_image_file = './test_image_20x20.npz'
full_image_np = np.load(test_image_file)['image']
full_image = torch.Tensor(full_image_np).unsqueeze(0).to(device)
slen = full_image.shape[-1]
fmin = 1000.
mean_stars = 4
# load true locations and fluxes
true_locs = torch.Tensor(np.load(test_image_file)['locs']).to(device)
true_fluxes = torch.Tensor(np.load(test_image_file)['fluxes']).to(device)
###############
# background
###############
background = torch.zeros(len(bands), slen, slen).to(device)
background[0] = 686.
background[1] = 1123.
###############
# Get simulator
###############
simulator = simulated_datasets_lib.StarSimulator(psf_og,
slen,
background,
transpose_psf = False)
###############
# define VAE
###############
star_encoder = starnet_lib.StarEncoder(slen = slen,
ptile_slen = 10,
step = 10,
edge_padding = 0,
n_bands = psf_og.shape[0],
max_detections = 2,
fmin = fmin,
constrain_logflux_mean = True,
track_running_stats = False)
# star_encoder = elbo_lib.MFVBEncoder(slen = slen,
# patch_slen = 10,
# step = 10,
# edge_padding = 0,
# n_bands = psf_og.shape[0],
# max_detections = 2,
# fmin = 1000.)
#
# star_encoder.load_state_dict(torch.load('./fits/results_2020-04-29/starnet_klpq',
# map_location=lambda storage, loc: storage))
star_encoder.eval();
star_encoder.to(device);
###############
# define optimizer
###############
learning_rate = 1e-3
weight_decay = 1e-3
optimizer = optim.Adam([
{'params': star_encoder.parameters(),
'lr': learning_rate}],
weight_decay = weight_decay)
###############
# Train!
###############
out_filename = './foo' # './fits/results_2020-05-06/starnet_encoder_allsum-restart' + str(args.seed)
n_epochs = 2500
print_every = 100
n_samples = 2000
print('training')
elbo_results_vec = elbo_lib.save_elbo_results(full_image, star_encoder,
simulator, mean_stars, n_samples)
t0 = time.time()
for epoch in range(1, n_epochs + 1):
optimizer.zero_grad()
# get pseudo loss
# ps_loss = elbo_lib.get_pseudo_loss(full_image, star_encoder,
# simulator,mean_stars, n_samples)
ps_loss = elbo_lib.get_pseudo_loss_all_sum(full_image, star_encoder,
simulator, mean_stars, n_samples)
# ps_loss = elbo_lib.loss_on_true_nstars(full_image, star_encoder, simulator,
# mean_stars, n_samples,
# true_locs, true_fluxes)
ps_loss.backward()
optimizer.step()
if ((epoch % print_every) == 0) or (epoch == n_epochs):
print('epoch = {}; elapsed = {:.1f}sec'.format(epoch, time.time() - t0))
elbo_results = elbo_lib.save_elbo_results(full_image, star_encoder,
simulator, mean_stars, n_samples)
elbo_results_vec = np.vstack((elbo_results_vec, elbo_results))
np.savetxt(out_filename + '-elbo_results', elbo_results_vec)
print("writing the encoder parameters to " + out_filename)
torch.save(star_encoder.state_dict(), out_filename)
# torch.save(star_encoder.params, out_filename)
t0 = time.time()
| 5,279
| 33.509804
| 100
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/experiments_elbo_vs_sleep/simulate_test_images.py
|
import numpy as np
import torch
import json
import matplotlib.pyplot as plt
import deblending_runjingdev.simulated_datasets_lib as simulated_datasets_lib
import deblending_runjingdev.psf_transform_lib as psf_transform_lib
from deblending_runjingdev.which_device import device
np.random.seed(65765)
_ = torch.manual_seed(3453453)
# get the SDSS point spread function
bands = [2, 3]
psfield_file = './../sdss_stage_dir/2583/2/136/psField-002583-2-0136.fit'
init_psf_params = psf_transform_lib.get_psf_params(
psfield_file,
bands = bands)
power_law_psf = psf_transform_lib.PowerLawPSF(init_psf_params.to(device))
psf_og = power_law_psf.forward().detach().to(device)
##############################
# Simulate the 20 x 20 image
##############################
slen = 20
# get background
background = torch.zeros(len(bands), slen, slen)
background[0] = 686.
background[1] = 1123.
background = background.to(device)
# the simulator
simulator = simulated_datasets_lib.StarSimulator(psf_og, slen, background, transpose_psf = False)
# set locations and fluxes
true_locs = torch.Tensor([[2, 3],
[5.5, 7.5],
[12.5, 6.5],
[8.5, 14.5]]).unsqueeze(0) / slen
true_locs = true_locs.to(device)
true_fluxes = torch.zeros(true_locs.shape[0], true_locs.shape[1], len(bands),
device = device) + 4000.
# simulate image
full_image = simulator.draw_image_from_params(locs = true_locs,
fluxes = true_fluxes,
n_stars= torch.Tensor([4]).to(device).long(),
add_noise = True)
# save
fname = './test_image_20x20.npz'
print('saving 20 x 20 test image into: ', fname)
np.savez(fname,
image = full_image.cpu().squeeze(0),
locs = true_locs.cpu().squeeze(0),
fluxes = true_fluxes.cpu().squeeze(0))
##############################
# Simulate 100 x 100 image
##############################
np.random.seed(652)
_ = torch.manual_seed(3143)
# data parameters
with open('./../model_params/default_star_parameters.json', 'r') as fp:
data_params = json.load(fp)
data_params['min_stars'] = 50
data_params['max_stars'] = 50
data_params['mean_stars'] = 50
data_params['slen'] = 110
# background
background = torch.zeros(len(bands), data_params['slen'], data_params['slen'])
background[0] = 686.
background[1] = 1123.
background = background.to(device)
# simulate image
n_images = 1
star_dataset = \
simulated_datasets_lib.load_dataset_from_params(psf_og,
data_params,
background = background,
n_images = n_images,
transpose_psf = False,
add_noise = True)
fname = './test_image_100x100.npz'
print('saving 100 x 100 test image into: ', fname)
full_image = star_dataset[0]['image'].unsqueeze(0)
true_locs = star_dataset[0]['locs']
true_fluxes = star_dataset[0]['fluxes']
np.savez(fname,
image = full_image.cpu().squeeze(0),
locs = true_locs.cpu().squeeze(0),
fluxes = true_fluxes.cpu().squeeze(0))
| 3,256
| 29.439252
| 97
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/experiments_elbo_vs_sleep/train_sleep.py
|
import numpy as np
import torch
import torch.optim as optim
import deblending_runjingdev.simulated_datasets_lib as simulated_datasets_lib
import deblending_runjingdev.starnet_lib as starnet_lib
import deblending_runjingdev.sleep_lib as sleep_lib
import deblending_runjingdev.psf_transform_lib as psf_transform_lib
import deblending_runjingdev.elbo_lib as elbo_lib
import json
import time
from deblending_runjingdev.which_device import device
print('device: ', device)
print('torch version: ', torch.__version__)
###############
# set seed
###############
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type = int, default = 0)
parser.add_argument("--test_image",
type = str,
default = 'small')
args = parser.parse_args()
print(args.seed)
np.random.seed(5751 + args.seed * 17)
_ = torch.manual_seed(11512 + args.seed * 13)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
####################
# Get test image
#####################
if args.test_image == 'small':
# test image file
test_image_file = './test_image_20x20.npz'
# parameters for encoder
ptile_slen = 10
step = 10
edge_padding = 0
# prior parameters
mean_stars = 4
max_stars = 6
elif args.test_image == 'large':
# test image file
test_image_file = './test_image_100x100.npz'
# parameters for encoder
ptile_slen = 20
step = 10
edge_padding = 5
# prior parameters
mean_stars = 50
max_stars = 100
else:
print('Specify whether to use the large (100 x 100) test image',
'or the small (30 x 30) test image')
raise NotImplementedError()
full_image_np = np.load(test_image_file)['image']
full_image = torch.Tensor(full_image_np).unsqueeze(0).to(device)
###############
# data parameters
###############
with open('../model_params/default_star_parameters.json', 'r') as fp:
data_params = json.load(fp)
data_params['mean_stars'] = mean_stars
data_params['min_stars'] = 0
data_params['max_stars'] = max_stars
data_params['slen'] = full_image.shape[-1]
print(data_params)
###############
# load psf
###############
bands = [2, 3]
psfield_file = '../sdss_stage_dir/2583/2/136/psField-002583-2-0136.fit'
init_psf_params = psf_transform_lib.get_psf_params(
psfield_file,
bands = bands)
power_law_psf = psf_transform_lib.PowerLawPSF(init_psf_params.to(device))
psf_og = power_law_psf.forward().detach()
###############
# set background
###############
background = torch.zeros(len(bands), data_params['slen'], data_params['slen']).to(device)
background[0] = 686.
background[1] = 1123.
###############
# draw data
###############
print('generating data: ')
n_images = 20000
t0 = time.time()
star_dataset = \
simulated_datasets_lib.load_dataset_from_params(psf_og,
data_params,
background = background,
n_images = n_images,
transpose_psf = False,
add_noise = True)
print('data generation time: {:.3f}secs'.format(time.time() - t0))
# get loader
batchsize = 64
loader = torch.utils.data.DataLoader(
dataset=star_dataset,
batch_size=batchsize,
shuffle=True)
###############
# define VAE
###############
star_encoder = starnet_lib.StarEncoder(slen = data_params['slen'],
ptile_slen = ptile_slen,
step = step,
edge_padding = edge_padding,
n_bands = psf_og.shape[0],
max_detections = 2)
star_encoder.to(device)
###############
# define optimizer
###############
learning_rate = 1e-3
weight_decay = 1e-5
optimizer = optim.Adam([
{'params': star_encoder.parameters(),
'lr': learning_rate}],
weight_decay = weight_decay)
###############
# Train!
###############
out_filename = './fits/starnet_klpq-restart' + str(args.seed)
if args.test_image == 'small':
n_epochs = 31
print_every = 1
out_filename = out_filename + '_20x20'
else:
n_epochs = 500
print_every = 10
out_filename = out_filename + '_100x100'
print('training')
sleep_lib.run_sleep(star_encoder, loader, optimizer, n_epochs,
out_filename = out_filename,
print_every = print_every,
full_image = full_image,
mean_stars = data_params['mean_stars'])
| 4,752
| 26.316092
| 89
|
py
|
DeblendingStarfields
|
DeblendingStarfields-master/blip_wrapper/performance_eval.py
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
mag_vec = np.concatenate((np.array([15]),
np.arange(19, 22.2, 0.4),
np.array([100])))
MAG_CONST = 2.5
def convert_nmgy_to_mag(nmgy):
return 22.5 - MAG_CONST * np.log10(nmgy)
def filter_params(locs, fluxes, slen, pad = 5):
"""
Remove sources too close to the border of the image.
"""
assert len(locs.shape) == 2
if fluxes is not None:
assert len(fluxes.shape) == 1
assert len(fluxes) == len(locs)
_locs = locs * (slen - 1)
which_params = (_locs[:, 0] >= pad) & (_locs[:, 0] <= (slen - pad)) & \
(_locs[:, 1] >= pad) & (_locs[:, 1] <= (slen - pad))
if fluxes is not None:
return locs[which_params], fluxes[which_params], which_params
else:
return locs[which_params], None, which_params
def get_locs_error(locs, true_locs):
"""
Parameters
----------
locs : (n, 2) shaped array of estimated locations
true_locs : (m, 2) shaped array of true locations
Returns
-------
(m, n)-shaped array of taxicab distance between
each estimated location and the true location.
"""
n = locs.shape[0]
return np.sqrt(np.power(
locs.reshape(1, -1, 2) - true_locs.reshape(-1, 1, 2), 2
).sum(axis=2))
def get_mag_error(mags, true_mags):
"""
Parameters
----------
mags : (n,) shaped array of estimated magnitudes
true_locs : (m,) shaped array of true magnitudes
Returns
-------
(m, n)-shaped array of abs distance between log mags
and log true mags.
"""
return np.abs(
mags.reshape(1, -1) - \
true_mags.reshape(-1, 1)
)
def get_summary_stats(
est_locs,
true_locs,
loc_errors,
slen,
nsignal_ci=None,
nelec_per_nmgy=None,
est_fluxes=None,
true_fluxes=None,
flux_errors=None,
pad = 5,
slack = 0.5
):
"""
Parameters
----------
loc_errors : np.ndarray
(n,)-length array of radii of bounding boxes for locations
flux_errors : np.ndarray
(n,)-length array of radii of bounding interval (in log space)
for fluxes.
slen : int
number of pixels in the (square) image
nsignal_ci : list of sets
List of sets specifyng the confidence intervals for the number
of sources.
pad : int
number of pixels of border to exclude
slack : float
expected amount of distance (in pixels) between
true and estimated sources due to differences
in imaging.
"""
# remove border
est_locs, est_fluxes, which_params = filter_params(
est_locs,
est_fluxes,
slen,
pad
)
loc_errors = loc_errors[which_params]
if flux_errors is not None:
flux_errors = flux_errors[which_params]
true_locs, true_fluxes, which_true_locs = filter_params(
true_locs,
true_fluxes,
slen,
pad
)
if (est_fluxes is None) or (true_fluxes is None):
mag_error = 0.
else:
# convert to magnitude
est_mags = convert_nmgy_to_mag(est_fluxes / nelec_per_nmgy)
true_mags = convert_nmgy_to_mag(true_fluxes / nelec_per_nmgy)
mag_error = get_mag_error(est_mags, true_mags)
# location errors
locs_error = get_locs_error(est_locs * (slen - 1), true_locs * (slen - 1))
locs_error_tol = slack + (slen - 1) * loc_errors.reshape(1, -1)
locs_flags = (locs_error < locs_error_tol)
# mag errors
if flux_errors is not None:
mags_error_tol = slack + MAG_CONST * flux_errors.reshape(1, -1)
else:
mags_error_tol = np.inf
mags_flags = (mag_error < mags_error_tol)
# array (nsource x nest): is x source contained by y est
disc_bool = locs_flags * mags_flags
# array : for each true source, is there a matching estimated source
tpr_bool = np.any(disc_bool, axis=1)
if nsignal_ci is None:
# array: for each estimated source, is there a matching true source
ppv_bool = np.any(disc_bool, axis=0)
else:
# same as above, but accounting for number of sources
nsignals = np.sum(disc_bool, axis=0).astype(int)
ppv_bool = np.array([
int(nsignals[i]) in nsignal_ci[i] for i in range(nsignals.shape[0])
])
return tpr_bool, ppv_bool, disc_bool, which_params, which_true_locs
def blip_output_to_catalogue(
rej_nodes
):
# Edge case of no rejections
if len(rej_nodes) == 0:
return np.zeros((0, 2)), np.zeros((0,)), np.zeros((0,)), np.zeros((0,))
# Infer dimensionality
keys = rej_nodes[0].data.keys()
d = 1
for key in keys:
if key[0:3] == 'dim':
d = max(d, int(key.split("dim")[-1]) + 1)
# Create estimated locations and errors
locs_est = np.zeros((len(rej_nodes), d))
locs_error = np.zeros((len(rej_nodes)))
locs_peps = np.zeros((len(rej_nodes)))
weights = np.zeros((len(rej_nodes)))
for i, node in enumerate(rej_nodes):
locs_error[i] = node.data['radius']
for j in range(d):
locs_est[i, j] = node.data[f'dim{j}']
locs_peps[i] = node.pep
weights[i] = node.data['weight']
return locs_est, locs_error, locs_peps, weights
def disc_bool_to_ndisc(
disc_bool
):
m = disc_bool.shape[0] # number of sources
n = disc_bool.shape[1] # number of estimated sources
# List of sources/estimates we haven't used yet
undisc_sources = np.ones(m).astype(bool)
unused_ests = set(list(range(n)))
# Iteratively count false/true discoveries and remove
# elements from the source/estimator list
true_disc = 0
false_disc = 0
while len(unused_ests) > 0:
# Work with the estimated source which has the fewest
# number of true discoveries associated with it
lunused_ests = list(unused_ests)
true_disc_per_est = np.sum(
disc_bool[undisc_sources][:, lunused_ests],
axis=0
)
j = np.argmin(true_disc_per_est)
# convert to other indexing
gj = lunused_ests[j]
# Count false discoveries
if true_disc_per_est[j] == 0:
false_disc += 1
# For true discoveries, take a source out of the source set
# to avoid double-counting
else:
source = np.argmax(undisc_sources & disc_bool[:, gj])
undisc_sources[source] = 0
true_disc += 1
unused_ests -= set([gj])
return true_disc, false_disc
def catalogue_power_fdr(
locs_true,
locs_est,
locs_error,
slen,
nsignal_ci=None,
weights=None,
slack=0.0,
return_bools=False,
**kwargs
):
tpr_bool, ppv_bool, disc_bool, which_ests, which_locs = get_summary_stats(
est_locs=locs_est,
true_locs=locs_true,
loc_errors=locs_error,
nsignal_ci=nsignal_ci,
slen=slen,
slack=slack,
**kwargs
)
# FDR
if nsignal_ci is None: # for MAP estimates without disjointness
true_disc, false_disc = disc_bool_to_ndisc(disc_bool)
else:
true_disc = np.sum(ppv_bool)
false_disc = locs_est.shape[0] - true_disc
# Naive power
power = true_disc / max(1, np.sum(which_locs))
fdr = false_disc / max(1, locs_est.shape[0])
if locs_est.shape[0] == 0:
assert fdr == 0
if weights is None:
if nsignal_ci is not None:
res_power = np.sum(ppv_bool / np.array([len(x) for x in nsignal_ci]))
else:
res_power = np.sum(ppv_bool / locs_error)
elif weights == 'const':
res_power = true_disc
else:
res_power = np.sum(ppv_bool * weights)
if not return_bools:
return power, fdr, res_power
else:
return power, fdr, res_power, ppv_bool
def plot_rejections_matplotlib(
all_ests,
all_errors,
all_peps,
locs_true,
slen,
est_names=None,
image=None
):
# Number of methods
n_methods = len(all_ests)
if est_names is None:
est_names = [f'Est {j}' for j in range(n_methods)]
scale = slen - 1
# Create overarching figure
ncols = int(np.ceil(n_methods / 2))
fig, axarr = plt.subplots(2, ncols, figsize=(10, 10), sharey=True)
for i in range(n_methods):
irow = i // ncols
icol = i % ncols
axarr[irow, icol].matshow(image[0, 0], cmap=plt.cm.gray)
axarr[irow, icol].set_yticks([])
axarr[irow, icol].set_xticks([])
# Plot truth
axarr[irow, icol].scatter(
locs_true[:, 1] * scale,
locs_true[:, 0] * scale,
color='blue',
marker='o',
label='Ground truth'
)
# Plot estimator center
axarr[irow, icol].scatter(
all_ests[i][:, 1] * scale,
all_ests[i][:, 0] * scale,
color='red',
marker='x',
label='Estimated'
)
# Plot rectangles
for j in range(all_ests[i].shape[0]):
radius = all_errors[i][j]
if radius > 0:
xj = scale * (all_ests[i][j, 1] - radius)
yj = scale * (all_ests[i][j, 0] - radius)
circle = patches.Circle(
(xj, yj),
scale * radius,
edgecolor='r',
facecolor='r',
alpha=0.5
)
axarr[irow, icol].add_patch(circle)
axarr[irow, icol].text(xj, yj, np.around(all_peps[i][j], 2), color='white')
#axarr[irow, icol].text(xj, yj + 2*scale*radius, np.around(1 / radius, 4), color='white')
axarr[irow, icol].set(title=est_names[i])
plt.show()
| 8,448
| 23.923304
| 93
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/utils.py
|
"""This module provides various helper functions."""
import logging
import sys
import os
import warnings
from functools import wraps
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import numpy as np
from tqdm import tqdm
import astropy
from astropy.utils.data import download_file
from astropy.units.quantity import Quantity
import astropy.units as u
from astropy.visualization import (
PercentileInterval,
ImageNormalize,
SqrtStretch,
LinearStretch,
)
from astropy.time import Time
log = logging.getLogger(__name__)
__all__ = [
"LightkurveError",
"LightkurveWarning",
"KeplerQualityFlags",
"TessQualityFlags",
"bkjd_to_astropy_time",
"btjd_to_astropy_time",
"show_citation_instructions",
]
class QualityFlags(object):
"""Abstract class"""
STRINGS = {}
OPTIONS = {}
@classmethod
def decode(cls, quality):
"""Converts a QUALITY value into a list of human-readable strings.
This function takes the QUALITY bitstring that can be found for each
cadence in Kepler/K2/TESS' pixel and light curve files and converts into
a list of human-readable strings explaining the flags raised (if any).
Parameters
----------
quality : int
Value from the 'QUALITY' column of a Kepler/K2/TESS pixel or lightcurve file.
Returns
-------
flags : list of str
List of human-readable strings giving a short description of the
quality flags raised. Returns an empty list if no flags raised.
"""
# If passed an astropy quantity object, get the value
if isinstance(quality, Quantity):
quality = quality.value
result = []
for flag in cls.STRINGS.keys():
if quality & flag > 0:
result.append(cls.STRINGS[flag])
return result
@classmethod
def create_quality_mask(cls, quality_array, bitmask=None):
"""Returns a boolean array which flags good cadences given a bitmask.
This method is used by the readers of :class:`KeplerTargetPixelFile`
and :class:`KeplerLightCurve` to initialize their `quality_mask`
class attribute which is used to ignore bad-quality data.
Parameters
----------
quality_array : array of int
'QUALITY' column of a Kepler target pixel or lightcurve file.
bitmask : int or str
Bitmask (int) or one of 'none', 'default', 'hard', or 'hardest'.
Returns
-------
boolean_mask : array of bool
Boolean array in which `True` means the data is of good quality.
"""
# Return an array filled with `True` by default (i.e. ignore nothing)
if bitmask is None:
return np.ones(len(quality_array), dtype=bool)
if isinstance(quality_array, u.Quantity):
quality_array = quality_array.value
# A few pre-defined bitmasks can be specified as strings
if isinstance(bitmask, str):
try:
bitmask = cls.OPTIONS[bitmask]
except KeyError:
valid_options = tuple(cls.OPTIONS.keys())
raise ValueError(
"quality_bitmask='{}' is not supported, "
"expected one of {}"
"".format(bitmask, valid_options)
)
# The bitmask is applied using the bitwise AND operator
quality_mask = (quality_array & bitmask) == 0
# Log the quality masking as info or warning
n_cadences = len(quality_array)
n_cadences_masked = (~quality_mask).sum()
percent_masked = 100.0 * n_cadences_masked / n_cadences
logmsg = (
"{:.0f}% ({}/{}) of the cadences will be ignored due to the "
"quality mask (quality_bitmask={})."
"".format(percent_masked, n_cadences_masked, n_cadences, bitmask)
)
if percent_masked > 20:
log.warning("Warning: " + logmsg)
else:
log.info(logmsg)
return quality_mask
class KeplerQualityFlags(QualityFlags):
"""
This class encodes the meaning of the various Kepler QUALITY bitmask flags,
as documented in the Kepler Archive Manual (Ref. [1], Table 2.3).
References
----------
.. [1] Kepler: A Search for Terrestrial Planets. Kepler Archive Manual.
http://archive.stsci.edu/kepler/manuals/archive_manual.pdf
"""
AttitudeTweak = 1
SafeMode = 2
CoarsePoint = 4
EarthPoint = 8
ZeroCrossing = 16
Desat = 32
Argabrightening = 64
ApertureCosmic = 128
ManualExclude = 256
# Bit 2**10 = 512 is unused by Kepler
SensitivityDropout = 1024
ImpulsiveOutlier = 2048
ArgabrighteningOnCCD = 4096
CollateralCosmic = 8192
DetectorAnomaly = 16384
NoFinePoint = 32768
NoData = 65536
RollingBandInAperture = 131072
RollingBandInMask = 262144
PossibleThrusterFiring = 524288
ThrusterFiring = 1048576
#: DEFAULT bitmask identifies all cadences which are definitely useless.
DEFAULT_BITMASK = (
AttitudeTweak
| SafeMode
| CoarsePoint
| EarthPoint
| Desat
| ManualExclude
| DetectorAnomaly
| NoData
| ThrusterFiring
)
#: HARD bitmask is conservative and may identify cadences which are useful.
HARD_BITMASK = (
DEFAULT_BITMASK
| SensitivityDropout
| ApertureCosmic
| CollateralCosmic
| PossibleThrusterFiring
)
#: HARDEST bitmask identifies cadences with any flag set. Its use is not recommended.
HARDEST_BITMASK = 2096639
#: Dictionary which provides friendly names for the various bitmasks.
OPTIONS = {
"none": 0,
"default": DEFAULT_BITMASK,
"hard": HARD_BITMASK,
"hardest": HARDEST_BITMASK,
}
#: Pretty string descriptions for each flag
STRINGS = {
1: "Attitude tweak",
2: "Safe mode",
4: "Coarse point",
8: "Earth point",
16: "Zero crossing",
32: "Desaturation event",
64: "Argabrightening",
128: "Cosmic ray in optimal aperture",
256: "Manual exclude",
1024: "Sudden sensitivity dropout",
2048: "Impulsive outlier",
4096: "Argabrightening on CCD",
8192: "Cosmic ray in collateral data",
16384: "Detector anomaly",
32768: "No fine point",
65536: "No data",
131072: "Rolling band in optimal aperture",
262144: "Rolling band in full mask",
524288: "Possible thruster firing",
1048576: "Thruster firing",
}
class TessQualityFlags(QualityFlags):
"""
This class encodes the meaning of the various TESS QUALITY bitmask flags,
as documented in the TESS Data Products Description Document (Ref. [1], Table 28).
References
----------
.. [1] TESS Science Data Products Description Document (EXP-TESS-ARC-ICD-0014)
https://archive.stsci.edu/missions/tess/doc/EXP-TESS-ARC-ICD-TM-0014.pdf
"""
AttitudeTweak = 1
SafeMode = 2
CoarsePoint = 4
EarthPoint = 8
Argabrightening = 16
Desat = 32
ApertureCosmic = 64
ManualExclude = 128
Discontinuity = 256
ImpulsiveOutlier = 512
CollateralCosmic = 1024
#: The first stray light flag is set manually by MIT based on visual inspection.
Straylight = 2048
#: The second stray light flag is set automatically by Ames/SPOC based on background level thresholds.
Straylight2 = 4096
# See TESS Science Data Products Description Document
PlanetSearchExclude = 8192
BadCalibrationExclude = 16384
# Set in the sector 20 data release notes
InsufficientTargets = 32768
#: DEFAULT bitmask identifies all cadences which are definitely useless.
DEFAULT_BITMASK = (
AttitudeTweak | SafeMode | CoarsePoint | EarthPoint | Desat | ManualExclude
)
#: HARD bitmask is conservative and may identify cadences which are useful.
HARD_BITMASK = (
DEFAULT_BITMASK | ApertureCosmic | CollateralCosmic | Straylight | Straylight2
)
#: HARDEST bitmask identifies cadences with any flag set. Its use is not recommended.
HARDEST_BITMASK = 65535
#: Dictionary which provides friendly names for the various bitmasks.
OPTIONS = {
"none": 0,
"default": DEFAULT_BITMASK,
"hard": HARD_BITMASK,
"hardest": HARDEST_BITMASK,
}
#: Pretty string descriptions for each flag
STRINGS = {
1: "Attitude tweak",
2: "Safe mode",
4: "Coarse point",
8: "Earth point",
16: "Argabrightening",
32: "Desaturation event",
64: "Cosmic ray in optimal aperture",
128: "Manual exclude",
256: "Discontinuity corrected",
512: "Impulsive outlier",
1024: "Cosmic ray in collateral data",
2048: "Straylight",
4096: "Straylight2",
8192: "Planet Search Exclude",
16384: "Bad Calibration Exclude",
32768: "Insufficient Targets for Error Correction Exclude",
}
def channel_to_module_output(channel):
"""Returns a (module, output) pair given a CCD channel number.
Parameters
----------
channel : int
Channel number
Returns
-------
module, output : tuple of ints
Module and Output number
"""
if channel < 1 or channel > 88:
raise ValueError("Channel number must be in the range 1-88.")
lookup = _get_channel_lookup_array()
lookup[:, 0] = 0
modout = np.where(lookup == channel)
return modout[0][0], modout[1][0]
def module_output_to_channel(module, output):
"""Returns the CCD channel number for a given module and output pair.
Parameters
----------
module : int
Module number
output : int
Output number
Returns
-------
channel : int
Channel number
"""
if module < 1 or module > 26:
raise ValueError("Module number must be in range 1-26.")
if output < 1 or output > 4:
raise ValueError("Output number must be 1, 2, 3, or 4.")
return _get_channel_lookup_array()[module, output]
def _get_channel_lookup_array():
"""Returns a lookup table which maps (module, output) onto channel."""
# In the array below, channel == array[module][output]
# Note: modules 1, 5, 21, 25 are the FGS guide star CCDs.
return np.array(
[
[0, 0, 0, 0, 0],
[1, 85, 0, 0, 0],
[2, 1, 2, 3, 4],
[3, 5, 6, 7, 8],
[4, 9, 10, 11, 12],
[5, 86, 0, 0, 0],
[6, 13, 14, 15, 16],
[7, 17, 18, 19, 20],
[8, 21, 22, 23, 24],
[9, 25, 26, 27, 28],
[10, 29, 30, 31, 32],
[11, 33, 34, 35, 36],
[12, 37, 38, 39, 40],
[13, 41, 42, 43, 44],
[14, 45, 46, 47, 48],
[15, 49, 50, 51, 52],
[16, 53, 54, 55, 56],
[17, 57, 58, 59, 60],
[18, 61, 62, 63, 64],
[19, 65, 66, 67, 68],
[20, 69, 70, 71, 72],
[21, 87, 0, 0, 0],
[22, 73, 74, 75, 76],
[23, 77, 78, 79, 80],
[24, 81, 82, 83, 84],
[25, 88, 0, 0, 0],
]
)
def running_mean(data, window_size):
"""Returns the moving average of an array `data`.
Parameters
----------
data : array of numbers
The running mean will be computed on this data.
window_size : int
Window length used to compute the running mean.
"""
if window_size > len(data):
window_size = len(data)
cumsum = np.cumsum(np.insert(data, 0, 0))
return (cumsum[window_size:] - cumsum[:-window_size]) / float(window_size)
def bkjd_to_astropy_time(bkjd) -> Time:
"""Converts Kepler Barycentric Julian Day (BKJD) time values to an
`astropy.time.Time` object.
Kepler Barycentric Julian Day (BKJD) is a Julian day minus 2454833.0
(UTC=January 1, 2009 12:00:00) and corrected to the arrival times
at the barycenter of the Solar System.
BKJD is the format in which times are recorded in the Kepler data products.
The time is in the Barycentric Dynamical Time frame (TDB), which is a
time system that is not affected by leap seconds.
See Section 2.3.2 in the Kepler Archive Manual for details.
Parameters
----------
bkjd : float or array of floats
Barycentric Kepler Julian Day.
Returns
-------
time : `astropy.time.Time` object
Resulting time object.
"""
bkjd = np.atleast_1d(bkjd)
# Some data products have missing time values;
# we need to set these to zero or `Time` cannot be instantiated.
bkjd[~np.isfinite(bkjd)] = 0
return Time(bkjd, format="bkjd", scale="tdb")
def btjd_to_astropy_time(btjd) -> Time:
"""Converts TESS Barycentric Julian Day (BTJD) values to an
`astropy.time.Time` object.
TESS Barycentric Julian Day (BTJD) is a Julian day minus 2457000.0
and corrected to the arrival times at the barycenter of the Solar System.
BTJD is the format in which times are recorded in the TESS data products.
The time is in the Barycentric Dynamical Time frame (TDB), which is a
time system that is not affected by leap seconds.
Parameters
----------
btjd : float or array of floats
Barycentric TESS Julian Day
Returns
-------
time : `astropy.time.Time` object
Resulting time object.
"""
btjd = np.atleast_1d(btjd)
btjd[~np.isfinite(btjd)] = 0
return Time(btjd, format="btjd", scale="tdb")
def plot_image(
image,
ax=None,
scale="linear",
origin="lower",
xlabel="Pixel Column Number",
ylabel="Pixel Row Number",
clabel="Flux ($e^{-}s^{-1}$)",
title=None,
show_colorbar=True,
vmin=None,
vmax=None,
**kwargs
):
"""Utility function to plot a 2D image
Parameters
----------
image : 2d array
Image data.
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be generated.
scale : str
Scale used to stretch the colormap.
Options: 'linear', 'sqrt', or 'log'.
origin : str
The origin of the coordinate system.
xlabel : str
Label for the x-axis.
ylabel : str
Label for the y-axis.
clabel : str
Label for the color bar.
title : str or None
Title for the plot.
show_colorbar : bool
Whether or not to show the colorbar
vmin : float
Minimum colorbar value. By default, the 2.5%-percentile is used.
vmax : float
Maximum colorbar value. By default, the 97.5%-percentile is used.
kwargs : dict
Keyword arguments to be passed to `matplotlib.pyplot.imshow`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
if isinstance(image, u.Quantity):
image = image.value
if ax is None:
_, ax = plt.subplots()
if vmin is None or vmax is None:
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning) # ignore image NaN values
mask = np.nan_to_num(image) > 0
if mask.any() > 0:
vmin_default, vmax_default = PercentileInterval(95.0).get_limits(
image[mask]
)
else:
vmin_default, vmax_default = 0, 0
if vmin is None:
vmin = vmin_default
if vmax is None:
vmax = vmax_default
norm = None
if scale is not None:
if scale == "linear":
norm = ImageNormalize(
vmin=vmin, vmax=vmax, stretch=LinearStretch(), clip=False
)
elif scale == "sqrt":
norm = ImageNormalize(
vmin=vmin, vmax=vmax, stretch=SqrtStretch(), clip=False
)
elif scale == "log":
# To use log scale we need to guarantee that vmin > 0, so that
# we avoid division by zero and/or negative values.
norm = LogNorm(vmin=max(vmin, sys.float_info.epsilon), vmax=vmax, clip=True)
else:
raise ValueError("scale {} is not available.".format(scale))
cax = ax.imshow(image, origin=origin, norm=norm, **kwargs)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
ax.set_title(title)
if show_colorbar:
cbar = plt.colorbar(cax, ax=ax, label=clabel)
cbar.ax.yaxis.set_tick_params(tick1On=False, tick2On=False)
cbar.ax.minorticks_off()
return ax
class LightkurveError(Exception):
"""Class for Lightkurve exceptions."""
pass
class LightkurveWarning(Warning):
"""Class for all Lightkurve warnings."""
pass
class LightkurveDeprecationWarning(LightkurveWarning):
"""Class for all Lightkurve deprecation warnings."""
pass
def suppress_stdout(f, *args, **kwargs):
"""A simple decorator to suppress function print outputs."""
@wraps(f)
def wrapper(*args, **kwargs):
# redirect output to `null`
with open(os.devnull, "w") as devnull:
old_out = sys.stdout
sys.stdout = devnull
try:
return f(*args, **kwargs)
# restore to default
finally:
sys.stdout = old_out
return wrapper
def validate_method(method, supported_methods):
"""Raises a `ValueError` if a method is not supported.
Parameters
----------
method : str
The method specified by the user.
supported_methods : list of str
The methods supported. All method names must be lowercase.
Returns
-------
method : str
Will return the method name if it is supported.
"""
method = method.lower()
if method in supported_methods:
return method
raise ValueError(
"method '{}' is not supported; "
"must be one of {}".format(method, supported_methods)
)
def centroid_quadratic(data, mask=None):
"""Computes the quadratic estimate of the centroid in a 2d-array.
This method will fit a simple 2D second-order polynomial
$P(x, y) = a + bx + cy + dx^2 + exy + fy^2$
to the 3x3 patch of pixels centered on the brightest pixel within
the image. This function approximates the core of the Point
Spread Function (PSF) using a bivariate quadratic function, and returns
the maximum (x, y) coordinate of the function using linear algebra.
For the motivation and the details around this technique, please refer
to Vakili, M., & Hogg, D. W. 2016, ArXiv, 1610.05873.
Caveat: if the brightest pixel falls on the edge of the data array, the fit
will tend to fail or be inaccurate.
Parameters
----------
data : 2D array
The 2D input array representing the pixel values of the image.
mask : array_like (bool), optional
A boolean mask, with the same shape as `data`, where a **True** value
indicates the corresponding element of data is masked.
Returns
-------
column, row : tuple
The coordinates of the centroid in column and row. If the fit failed,
then (NaN, NaN) will be returned.
"""
if isinstance(data, u.Quantity):
data = data.value
# Step 1: identify the patch of 3x3 pixels (z_)
# that is centered on the brightest pixel (xx, yy)
if mask is not None:
data = data * mask
arg_data_max = np.nanargmax(data)
yy, xx = np.unravel_index(arg_data_max, data.shape)
# Make sure the 3x3 patch does not leave the TPF bounds
if yy < 1:
yy = 1
if xx < 1:
xx = 1
if yy > (data.shape[0] - 2):
yy = data.shape[0] - 2
if xx > (data.shape[1] - 2):
xx = data.shape[1] - 2
z_ = data[yy - 1 : yy + 2, xx - 1 : xx + 2]
# Next, we will fit the coefficients of the bivariate quadratic with the
# help of a design matrix (A) as defined by Eqn 20 in Vakili & Hogg
# (arxiv:1610.05873). The design matrix contains a
# column of ones followed by pixel coordinates: x, y, x**2, xy, y**2.
A = np.array(
[
[1, -1, -1, 1, 1, 1],
[1, 0, -1, 0, 0, 1],
[1, 1, -1, 1, -1, 1],
[1, -1, 0, 1, 0, 0],
[1, 0, 0, 0, 0, 0],
[1, 1, 0, 1, 0, 0],
[1, -1, 1, 1, -1, 1],
[1, 0, 1, 0, 0, 1],
[1, 1, 1, 1, 1, 1],
]
)
# We also pre-compute $(A^t A)^-1 A^t$, cf. Eqn 21 in Vakili & Hogg.
At = A.transpose()
# In Python 3 this can become `Aprime = np.linalg.inv(At @ A) @ At`
Aprime = np.matmul(np.linalg.inv(np.matmul(At, A)), At)
# Step 2: fit the polynomial $P = a + bx + cy + dx^2 + exy + fy^2$
# following Equation 21 in Vakili & Hogg.
# In Python 3 this can become `Aprime @ z_.flatten()`
a, b, c, d, e, f = np.matmul(Aprime, z_.flatten())
# Step 3: analytically find the function maximum,
# following https://en.wikipedia.org/wiki/Quadratic_function
det = 4 * d * f - e ** 2
if abs(det) < 1e-6:
return np.nan, np.nan # No solution
xm = -(2 * f * b - c * e) / det
ym = -(2 * d * c - b * e) / det
return xx + xm, yy + ym
def _query_solar_system_objects(
ra, dec, times, radius=0.1, location="kepler", cache=True, show_progress=True
):
"""Returns a list of asteroids/comets given a position and time.
This function relies on The Virtual Observatory Sky Body Tracker (SkyBot)
service which can be found at http://vo.imcce.fr/webservices/skybot/
Parameters
----------
ra : float
Right Ascension in degrees.
dec : float
Declination in degrees.
times : array of float
Times in Julian Date.
radius : float
Search radius in degrees.
location : str
Spacecraft location. Options include `'kepler'` and `'tess'`.
cache : bool
Whether to cache the search result. Default is True.
show_progress : bool
Whether to display a progress bar during the download. Default is True.
Returns
-------
result : `pandas.DataFrame`
DataFrame containing the list of known solar system objects at the
requested time and location.
"""
# We import pandas locally, because it takes quite a bit of time to import,
# and it is only required for this specific feature.
import pandas as pd
if (location.lower() == "kepler") or (location.lower() == "k2"):
location = "C55"
elif location.lower() == "tess":
location = "C57"
url = "http://vo.imcce.fr/webservices/skybot/skybotconesearch_query.php?"
url += "-mime=text&"
url += "-ra={}&".format(ra)
url += "-dec={}&".format(dec)
url += "-bd={}&".format(radius)
url += "-loc={}&".format(location)
df = None
times = np.atleast_1d(times)
for time in tqdm(times, desc="Querying for SSOs", disable=~show_progress):
url_queried = url + "EPOCH={}".format(time)
response = download_file(url_queried, cache=cache, show_progress=show_progress)
if open(response).read(10) == "# Flag: -1": # error code detected?
raise IOError(
"SkyBot Solar System query failed.\n"
"URL used:\n" + url_queried + "\n"
"Response received:\n" + open(response).read()
)
res = pd.read_csv(response, delimiter="|", skiprows=2)
if len(res) > 0:
res["epoch"] = time
res.rename(
{"# Num ": "Num", " Name ": "Name", " Class ": "Class", " Mv ": "Mv"},
inplace=True,
axis="columns",
)
res = res[["Num", "Name", "Class", "Mv", "epoch"]].reset_index(drop=True)
if df is None:
df = res
else:
df = pd.concat([df, res])
if df is not None:
df.reset_index(drop=True)
return df
def show_citation_instructions():
"""Show citation instructions."""
from . import PACKAGEDIR, __citation__
# To make installing Lightkurve easier, ipython is an optional dependency,
# because we can assume it is installed when notebook-specific features are called
try:
from IPython.display import HTML
ipython_installed = True
except ModuleNotFoundError:
ipython_installed = False
if not is_notebook() or not ipython_installed:
print(__citation__)
else:
from pathlib import Path # local import to speed up `import lightkurve`
import astroquery # local import to speed up `import lightkurve`
templatefile = Path(PACKAGEDIR, "data", "show_citation_instructions.html")
template = open(templatefile, "r").read()
template = template.replace("LIGHTKURVE_CITATION", __citation__)
template = template.replace("ASTROPY_CITATION", astropy.__citation__)
template = template.replace("ASTROQUERY_CITATION", astroquery.__citation__)
return HTML(template)
def _get_notebook_environment():
"""Returns 'jupyter', 'colab', or 'terminal'.
One can detect whether or not a piece of Python is running by executing
`get_ipython().__class__`, which returns the following result:
* Jupyter notebook: `ipykernel.zmqshell.ZMQInteractiveShell`
* Google colab: `google.colab._shell.Shell`
* IPython terminal: `IPython.terminal.interactiveshell.TerminalInteractiveShell`
* Python terminal: `NameError: name 'get_ipython' is not defined`
"""
try:
ipy = str(type(get_ipython())).lower()
if "zmqshell" in ipy:
return "jupyter"
if "colab" in ipy:
return "colab"
except NameError:
pass # get_ipython() is not a builtin
return "terminal"
def is_notebook():
"""Returns `True` if we are running in a notebook."""
return _get_notebook_environment() in ["jupyter", "colab"]
| 26,156
| 31.292593
| 106
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/lightcurve.py
|
"""Defines LightCurve, KeplerLightCurve, and TessLightCurve."""
import os
import datetime
import logging
import warnings
import collections
from collections.abc import Sequence
import numpy as np
from scipy.signal import savgol_filter
from scipy.interpolate import interp1d
import matplotlib
from matplotlib import pyplot as plt
from copy import deepcopy
from astropy.table import Table, Column, MaskedColumn
from astropy.io import fits
from astropy.time import TimeBase, Time, TimeDelta
from astropy import units as u
from astropy.units import Quantity
from astropy.timeseries import TimeSeries, aggregate_downsample
from astropy.table import vstack
from astropy.stats import calculate_bin_edges
from astropy.utils.decorators import deprecated, deprecated_renamed_argument
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.masked import Masked
from . import PACKAGEDIR, MPLSTYLE
from .utils import (
running_mean,
bkjd_to_astropy_time,
btjd_to_astropy_time,
validate_method,
_query_solar_system_objects,
)
from .utils import LightkurveWarning, LightkurveDeprecationWarning
__all__ = ["LightCurve", "KeplerLightCurve", "TessLightCurve", "FoldedLightCurve"]
log = logging.getLogger(__name__)
_HAS_VAR_BINS = 'time_bin_end' in aggregate_downsample.__kwdefaults__
def _to_unitless_day(data):
if isinstance(data, Quantity):
return data.to(u.day).value
elif not np.isscalar(data):
return np.asarray([_to_unitless_day(item) for item in data]).flatten()
else:
return data
def _is_dict_like(data1):
return hasattr(data1, "keys") and callable(getattr(data1, "keys"))
def _is_list_like(data1):
# https://stackoverflow.com/a/37842328
return isinstance(data1, Sequence) and not isinstance(data1, str)
def _is_np_structured_array(data1):
return isinstance(data1, np.ndarray) and data1.dtype.names is not None
class LightCurve(TimeSeries):
"""
Subclass of AstroPy `~astropy.table.Table` guaranteed to have *time*, *flux*, and *flux_err* columns.
Compared to the generic `~astropy.timeseries.TimeSeries` class, `LightCurve`
ensures that each object has `time`, `flux`, and `flux_err` columns.
These three columns are special for two reasons:
1. they are the key columns upon which all light curve operations operate;
2. they are always present (though they may be populated with ``NaN`` values).
`LightCurve` objects also provide user-friendly attribute access to
columns and meta data.
Parameters
----------
data : numpy ndarray, dict, list, `~astropy.table.Table`, or table-like object, optional
Data to initialize time series. This does not need to contain the times
or fluxes, which can be provided separately, but if it does contain the
times and fluxes they should be in columns called ``'time'``,
``'flux'``, and ``'flux_err'`` to be automatically recognized.
time : `~astropy.time.Time` or iterable
Time values. They can either be given directly as a
`~astropy.time.Time` array or as any iterable that initializes the
`~astropy.time.Time` class.
flux : `~astropy.units.Quantity` or iterable
Flux values for every time point.
flux_err : `~astropy.units.Quantity` or iterable
Uncertainty on each flux data point.
**kwargs : dict
Additional keyword arguments are passed to `~astropy.table.QTable`.
Attributes
----------
meta : `dict`
meta data associated with the lightcurve. The header of the underlying FITS file (if applicable)
is store in this dictionary. By convention, keys in this dictionary are usually in uppercase.
Notes
-----
*Attribute access*: You can access a column or a ``meta`` value directly as an attribute.
>>> lc.flux # shortcut for lc['flux'] # doctest: +SKIP
>>> lc.sector # shortcut for lc.meta['SECTOR'] # doctest: +SKIP
>>> lc.flux = lc.flux * 1.05 # update the values of a column. # doctest: +SKIP
In case the given name is both a column name and a key in ``meta``, the column will be returned.
Note that you *cannot* create a new column using the attribute interface. If you do so,
a new attribute is created instead, and a warning is raised.
If you do create such attributes on purpose, please note that the attributes are not carried
over when the lightcurve object is copied, or a new lightcurve object is derived
based on a copy, e.g., ``normalize()``.
Examples
--------
>>> import lightkurve as lk
>>> lc = lk.LightCurve(time=[1, 2, 3, 4], flux=[0.98, 1.02, 1.03, 0.97])
>>> lc.time
<Time object: scale='tdb' format='jd' value=[1. 2. 3. 4.]>
>>> lc.flux
<Quantity [0.98, 1.02, 1.03, 0.97]>
>>> lc.bin(time_bin_size=2, time_bin_start=0.5).flux
<Quantity [1., 1.]>
"""
# The constructor of the `TimeSeries` base class will enforce the presence
# of these columns:
_required_columns = ["time", "flux", "flux_err"]
# The following keywords were removed in Lightkurve v2.0.
# Their use will trigger a warning.
_deprecated_keywords = (
"targetid",
"label",
"time_format",
"time_scale",
"flux_unit",
)
_deprecated_column_keywords = [
"centroid_col",
"centroid_row",
"cadenceno",
"quality",
]
# If an iterable is passed for ``time``, we will initialize an AstroPy
# ``Time`` object using the following format and scale:
_default_time_format = "jd"
_default_time_scale = "tdb"
# To emulate pandas, we do not support creating new columns or meta data
# fields via attribute assignment, and raise a warning in __setattr__ when
# a new attribute is created. We need to relax this warning during the
# initial construction of the object using `_new_attributes_relax`.
_new_attributes_relax = True
# cf. issue #925
__array_priority__ = 100_000
def __init__(self, data=None, *args, time=None, flux=None, flux_err=None, **kwargs):
# the ` {has,get,set}_time_in_data()`: helpers to handle `data` of different types
# in some cases, they also need to access kwargs["names"] as well
def get_time_idx_in(names):
time_indices = np.argwhere(np.asarray(names) == "time")
if len(time_indices) > 0:
return time_indices[0][0]
else:
return None
def get_time_in_data_list():
if len(data) < 1:
return None
names = kwargs.get("names")
if names is None:
# the first item MUST be time if no names specified
if isinstance(data[0], TimeBase): # Time or TimeDelta
return data[0]
else:
return None
else:
time_idx = get_time_idx_in(names)
if time_idx is not None:
return data[time_idx]
else:
return None
def set_time_in_data_list(value):
if len(data) < 1:
raise AssertionError("data should be non-empty")
names = kwargs.get("names")
if names is None:
# the first item MUST be time if no names specified
# this is to support base Table's select columns
# in __getitem__()
# https://github.com/astropy/astropy/blob/326435449ad8d859f1abf36800c3fb88d49c27ea/astropy/table/table.py#L1888
data[0] = value
else:
time_idx = get_time_idx_in(names)
if time_idx is not None:
data[time_idx] = value
else:
raise AssertionError("data should have time column")
def get_time_in_data_np_structured_array():
if data.dtype.names is None: # no labeled filed, not a structured array
return None
if "time" not in data.dtype.names:
return None
return data["time"]
def remove_time_from_data_np_structured_array():
if data.dtype.names is None:
raise AssertionError("data should be a numpy structured array")
if "time" not in data.dtype.names:
raise AssertionError("data should have a time field")
filtered_names = [n for n in data.dtype.names if n != "time"]
return data[filtered_names]
def has_time_in_data():
"""Check if the data has a column with the name"""
if data is None:
return False
elif _is_dict_like(data):
# data is a dict-like object with keys
return "time" in data.keys()
elif _is_list_like(data):
# case data is a list-like object (a list of columns, etc.)
return get_time_in_data_list() is not None
elif _is_np_structured_array(data):
# case numpy structured array (supported by base TimeSeries)
# https://numpy.org/doc/stable/user/basics.rec.html
return get_time_in_data_np_structured_array() is not None
else:
raise ValueError(f"Unsupported type for time in data: {type(data)}")
def get_time_in_data():
if _is_dict_like(data):
# data is a dict-like object with keys
return data["time"]
elif _is_list_like(data):
return get_time_in_data_list()
elif _is_np_structured_array(data):
return get_time_in_data_np_structured_array()
else:
# should never reach here. It'd have been caught by `has_time_in()``
raise AssertionError("Unsupported type for time in data")
def set_time_in_data(value):
if _is_dict_like(data):
# data is a dict-like object with keys
data["time"] = value
elif _is_list_like(data):
set_time_in_data_list(value)
elif _is_np_structured_array(data):
# astropy Time cannot be assigned to a column in np structured array
# we have special codepath handling it outside this function
raise AssertionError("Setting Time instances to np structured array is not supported")
else:
# should never reach here. It'd have been caught by `has_time_in()``
raise AssertionError("Unsupported type for time in data")
# Delay checking for required columns until the end
self._required_columns_relax = True
# Lightkurve v1.x supported passing time, flux, and flux_err as
# positional arguments. We support it here for backwards compatibility.
if len(args) in [1, 2]:
warnings.warn(
"passing flux as a positional argument is deprecated"
", please use ``flux=...`` instead.",
LightkurveDeprecationWarning,
)
time = data
flux = args[0]
data = None
if len(args) == 2:
flux_err = args[1]
# For backwards compatibility with Lightkurve v1.x,
# we support passing deprecated keywords via **kwargs.
deprecated_kws = {}
for kw in self._deprecated_keywords:
if kw in kwargs:
deprecated_kws[kw] = kwargs.pop(kw)
deprecated_column_kws = {}
for kw in self._deprecated_column_keywords:
if kw in kwargs:
deprecated_column_kws[kw] = kwargs.pop(kw)
# If `time` is passed as keyword argument, we populate it with integer numbers
if data is None or not has_time_in_data():
if time is None and flux is not None:
time = np.arange(len(flux))
# We are tolerant of missing time format
if time is not None and not isinstance(time, (Time, TimeDelta)):
# Lightkurve v1.x supported specifying the time_format
# as a constructor kwarg
time = Time(
time,
format=deprecated_kws.get("time_format", self._default_time_format),
scale=deprecated_kws.get("time_scale", self._default_time_scale),
)
# Also be tolerant of missing time format if time is passed via `data`
if data is not None and has_time_in_data():
if not isinstance(get_time_in_data(), (Time, TimeDelta)):
tmp_time = Time(
get_time_in_data(),
format=deprecated_kws.get("time_format", self._default_time_format),
scale=deprecated_kws.get("time_scale", self._default_time_scale),
)
if _is_np_structured_array(data):
# special case for np structured array
# one cannot set a `Time` instance to it
# so we set the time to the `time` param, and take it out of data
time = tmp_time
data = remove_time_from_data_np_structured_array()
else:
set_time_in_data(tmp_time)
# Allow overriding the required columns
self._required_columns = kwargs.pop("_required_columns", self._required_columns)
# Call the SampledTimeSeries constructor.
# Disable required columns for now; we'll check those later.
tmp = self._required_columns
self._required_columns = []
super().__init__(data=data, time=time, **kwargs)
self._required_columns = tmp
# For some operations, an empty time series needs to be created, then
# columns added one by one. We should check that when columns are added
# manually, time is added first and is of the right type.
if data is None and time is None and flux is None and flux_err is None:
self._required_columns_relax = True
return
# Load `time`, `flux`, and `flux_err` from the table as local variable names
time = self.columns["time"] # super().__init__() guarantees this is a column
if "flux" in self.colnames:
if flux is None:
flux = self.columns["flux"]
else:
raise TypeError(
f"'flux' has been given both in the `data` table and as a keyword argument"
)
if "flux_err" in self.colnames:
if flux_err is None:
flux_err = self.columns["flux_err"]
else:
raise TypeError(
f"'flux_err' has been given both in the `data` table and as a keyword argument"
)
# Ensure `flux` and `flux_err` are populated with NaNs if missing
if flux is None and time is not None:
flux = np.empty(len(time))
flux[:] = np.nan
if not isinstance(flux, Quantity):
flux = Quantity(flux, deprecated_kws.get("flux_unit"))
if flux_err is None:
flux_err = np.empty(len(flux))
flux_err[:] = np.nan
if not isinstance(flux_err, Quantity):
flux_err = Quantity(flux_err, flux.unit)
# Backwards compatibility with Lightkurve v1.x
# Ensure attributes are set if passed via deprecated kwargs
for kw in deprecated_kws:
if kw not in self.meta:
self.meta[kw.upper()] = deprecated_kws[kw]
# Ensure all required columns are in the right order
with self._delay_required_column_checks():
for idx, col in enumerate(self._required_columns):
if col in self.colnames:
self.remove_column(col)
self.add_column(locals()[col], index=idx, name=col)
# Ensure columns are set if passed via deprecated kwargs
for kw in deprecated_column_kws:
if kw not in self.meta and kw not in self.columns:
self.add_column(deprecated_column_kws[kw], name=kw)
# Ensure flux and flux_err have the same units
if self["flux"].unit != self["flux_err"].unit:
raise ValueError("flux and flux_err must have the same units")
self._new_attributes_relax = False
self._required_columns_relax = False
self._check_required_columns()
def __getattr__(self, name, **kwargs):
"""Expose all columns and meta keywords as attributes."""
if name in self.__dict__:
return self.__dict__[name]
elif name in self.__class__.__dict__:
return self.__class__.__dict__[name].__get__(self)
elif name in self.columns:
return self[name]
elif "_meta" in self.__dict__:
if name in self.__dict__["_meta"]:
return self.__dict__["_meta"][name]
elif name.upper() in self.__dict__["_meta"]:
return self.__dict__["_meta"][name.upper()]
raise AttributeError(f"object has no attribute {name}")
def __setattr__(self, name, value, **kwargs):
"""To get copied, attributes have to be stored in the meta dictionary!"""
to_set_as_attr = False
if name in self.__dict__:
to_set_as_attr = True
elif name == "time":
self["time"] = value # astropy will convert value to Time if needed
elif ("columns" in self.__dict__) and (name in self.__dict__["columns"]):
self.replace_column(name, value)
elif "_meta" in self.__dict__:
if name in self.__dict__["_meta"]:
self.__dict__["_meta"][name] = value
elif name.upper() in self.__dict__["_meta"]:
self.__dict__["_meta"][name.upper()] = value
else:
to_set_as_attr = True
else:
to_set_as_attr = True
if to_set_as_attr:
if (
name not in self.__dict__
and not name.startswith("_")
and not self._new_attributes_relax
and name != 'meta'
):
warnings.warn(
(
"Lightkurve doesn't allow columns or meta values to be created via a new attribute name."
"A new attribute is created. It will not be carried over when the object is copied."
" - see https://docs.lightkurve.org/reference/api/lightkurve.LightCurve.html"
),
UserWarning,
stacklevel=2,
)
super().__setattr__(name, value, **kwargs)
def _repr_simple_(self) -> str:
"""Returns a simple __repr__.
Used by `LightCurveCollection`.
"""
result = f"<{self.__class__.__name__}"
if "LABEL" in self.meta:
result += f" LABEL=\"{self.meta.get('LABEL')}\""
for kw in ["QUARTER", "CAMPAIGN", "SECTOR", "AUTHOR", "FLUX_ORIGIN"]:
if kw in self.meta:
result += f" {kw}={self.meta.get(kw)}"
result += ">"
return result
def _base_repr_(self, html=False, descr_vals=None, **kwargs):
"""Defines the description shown by `__repr__` and `_html_repr_`."""
if descr_vals is None:
descr_vals = [self.__class__.__name__]
if self.masked:
descr_vals.append("masked=True")
descr_vals.append("length={}".format(len(self)))
if "LABEL" in self.meta:
descr_vals.append(f"LABEL=\"{self.meta.get('LABEL')}\"")
for kw in ["QUARTER", "CAMPAIGN", "SECTOR", "AUTHOR", "FLUX_ORIGIN"]:
if kw in self.meta:
descr_vals.append(f"{kw}={self.meta.get(kw)}")
return super()._base_repr_(html=html, descr_vals=descr_vals, **kwargs)
# Define `time`, `flux`, `flux_err` as class attributes to enable IDE
# of these required columns auto-completion.
@property
def time(self) -> Time:
"""Time values stored as an AstroPy `~astropy.time.Time` object."""
return self["time"]
@time.setter
def time(self, time):
self["time"] = time
@property
def flux(self) -> Quantity:
"""Brightness values stored as an AstroPy `~astropy.units.Quantity` object."""
return self["flux"]
@flux.setter
def flux(self, flux):
self["flux"] = flux
@property
def flux_err(self) -> Quantity:
"""Brightness uncertainties stored as an AstroPy `~astropy.units.Quantity` object."""
return self["flux_err"]
@flux_err.setter
def flux_err(self, flux_err):
self["flux_err"] = flux_err
def select_flux(self, flux_column, flux_err_column=None):
"""Assign a different column to be the flux column.
This method returns a copy of the LightCurve in which the ``flux``
and ``flux_err`` columns have been replaced by the values contained
in a different column.
Parameters
----------
flux_column : str
Name of the column that should become the 'flux' column.
flux_err_column : str or `None`
Name of the column that should become the 'flux_err' column.
By default, the column will be used that is obtained by adding the
suffix "_err" to the value of ``flux_column``. If such a
column does not exist, ``flux_err`` will be populated with NaN values.
Returns
-------
lc : LightCurve
Copy of the ``LightCurve`` object with the new flux values assigned.
Examples
--------
You can use this function to change the flux data on which most Lightkurve
features operate. For example, to view a periodogram based on the "sap_flux"
column in a TESS light curve, use::
>>> lc.select_flux("sap_flux").to_periodogram("lombscargle").plot() # doctest: +SKIP
"""
# Input validation
if flux_column not in self.columns:
raise ValueError(f"'{flux_column}' is not a column")
if flux_err_column and flux_err_column not in self.columns:
raise ValueError(f"'{flux_err_column}' is not a column")
lc = self.copy()
lc["flux"] = lc[flux_column]
if flux_err_column: # not None
lc["flux_err"] = lc[flux_err_column]
else:
# if `flux_err_column` is unspecified, we attempt to use
# f"{flux_column}_err" if it exists
flux_err_column = f"{flux_column}_err"
if flux_err_column in lc.columns:
lc["flux_err"] = lc[flux_err_column]
else:
lc["flux_err"][:] = np.nan
lc.meta['FLUX_ORIGIN'] = flux_column
normalized_new_flux = lc["flux"].unit is None or lc["flux"].unit is u.dimensionless_unscaled
# Note: here we assume unitless flux means it's normalized
# it's not exactly true in many constructed lightcurves in unit test
# but the assumption should hold for any real world use cases, e.g. TESS QLP
if normalized_new_flux:
lc.meta["NORMALIZED"] = normalized_new_flux
else:
# remove it altogether.
# Setting to False would suffice;
# but in typical non-normalized LC, the header will not be there at all.
lc.meta.pop("NORMALIZED", None)
return lc
# Define deprecated attributes for compatibility with Lightkurve v1.x:
@property
@deprecated(
"2.0", alternative="time.format", warning_type=LightkurveDeprecationWarning
)
def time_format(self):
return self.time.format
@property
@deprecated(
"2.0", alternative="time.scale", warning_type=LightkurveDeprecationWarning
)
def time_scale(self):
return self.time.scale
@property
@deprecated("2.0", alternative="time", warning_type=LightkurveDeprecationWarning)
def astropy_time(self):
return self.time
@property
@deprecated(
"2.0", alternative="flux.unit", warning_type=LightkurveDeprecationWarning
)
def flux_unit(self):
return self.flux.unit
@property
@deprecated("2.0", alternative="flux", warning_type=LightkurveDeprecationWarning)
def flux_quantity(self):
return self.flux
@property
@deprecated(
"2.0",
alternative="fits.open(lc.filename)",
warning_type=LightkurveDeprecationWarning,
)
def hdu(self):
with fits.open(self.filename) as hdulist:
hdulist = hdulist.copy()
return hdulist
@property
@deprecated("2.0", warning_type=LightkurveDeprecationWarning)
def SAP_FLUX(self):
"""A copy of the light curve in which `lc.flux = lc.sap_flux`
and `lc.flux_err = lc.sap_flux_err`. It is provided for backwards-
compatibility with Lightkurve v1.x and will be removed soon."""
lc = self.copy()
lc["flux"] = lc["sap_flux"]
lc["flux_err"] = lc["sap_flux_err"]
return lc
@property
@deprecated("2.0", warning_type=LightkurveDeprecationWarning)
def PDCSAP_FLUX(self):
"""A copy of the light curve in which `lc.flux = lc.pdcsap_flux`
and `lc.flux_err = lc.pdcsap_flux_err`. It is provided for backwards-
compatibility with Lightkurve v1.x and will be removed soon."""
lc = self.copy()
lc["flux"] = lc["pdcsap_flux"]
lc["flux_err"] = lc["pdcsap_flux_err"]
return lc
def __add__(self, other):
newlc = self.copy()
if isinstance(other, LightCurve):
if len(self) != len(other):
raise ValueError(
"Cannot add LightCurve objects because "
"they do not have equal length ({} vs {})."
"".format(len(self), len(other))
)
if np.any(self.time != other.time):
warnings.warn(
"Two LightCurve objects with inconsistent time "
"values are being added.",
LightkurveWarning,
)
newlc.flux = self.flux + other.flux
newlc.flux_err = np.hypot(self.flux_err, other.flux_err)
else:
newlc.flux = self.flux + other
return newlc
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
return self.__add__(-1 * other)
def __rsub__(self, other):
return (-1 * self).__add__(other)
def __mul__(self, other):
newlc = self.copy()
if isinstance(other, LightCurve):
if len(self) != len(other):
raise ValueError(
"Cannot multiply LightCurve objects because "
"they do not have equal length ({} vs {})."
"".format(len(self), len(other))
)
if np.any(self.time != other.time):
warnings.warn(
"Two LightCurve objects with inconsistent time "
"values are being multiplied.",
LightkurveWarning,
)
newlc.flux = self.flux * other.flux
# Applying standard uncertainty propagation, cf.
# https://en.wikipedia.org/wiki/Propagation_of_uncertainty#Example_formulae
newlc.flux_err = abs(newlc.flux) * np.hypot(
self.flux_err / self.flux, other.flux_err / other.flux
)
elif isinstance(
other, (u.UnitBase, u.FunctionUnitBase)
): # cf. astropy/issues/6517
newlc.flux = other * self.flux
newlc.flux_err = other * self.flux_err
else:
newlc.flux = other * self.flux
newlc.flux_err = abs(other) * self.flux_err
return newlc
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self.__mul__(1.0 / other)
def __rtruediv__(self, other):
newlc = self.copy()
if isinstance(other, LightCurve):
if len(self) != len(other):
raise ValueError(
"Cannot divide LightCurve objects because "
"they do not have equal length ({} vs {})."
"".format(len(self), len(other))
)
if np.any(self.time != other.time):
warnings.warn(
"Two LightCurve objects with inconsistent time "
"values are being divided.",
LightkurveWarning,
)
newlc.flux = other.flux / self.flux
newlc.flux_err = abs(newlc.flux) * np.hypot(
self.flux_err / self.flux, other.flux_err / other.flux
)
else:
newlc.flux = other / self.flux
newlc.flux_err = abs((other * self.flux_err) / (self.flux ** 2))
return newlc
def __div__(self, other):
return self.__truediv__(other)
def __rdiv__(self, other):
return self.__rtruediv__(other)
def show_properties(self):
"""Prints a description of all non-callable attributes.
Prints in order of type (ints, strings, lists, arrays, others).
"""
attrs = {}
deprecated_properties = list(self._deprecated_keywords)
deprecated_properties += [
"flux_quantity",
"SAP_FLUX",
"PDCSAP_FLUX",
"astropy_time",
"hdu",
]
for attr in dir(self):
if not attr.startswith("_") and attr not in deprecated_properties:
try:
res = getattr(self, attr)
except Exception:
continue
if callable(res):
continue
attrs[attr] = {"res": res}
if isinstance(res, int):
attrs[attr]["print"] = "{}".format(res)
attrs[attr]["type"] = "int"
elif isinstance(res, np.ndarray):
attrs[attr]["print"] = "array {}".format(res.shape)
attrs[attr]["type"] = "array"
elif isinstance(res, list):
attrs[attr]["print"] = "list length {}".format(len(res))
attrs[attr]["type"] = "list"
elif isinstance(res, str):
if res == "":
attrs[attr]["print"] = "{}".format("None")
else:
attrs[attr]["print"] = "{}".format(res)
attrs[attr]["type"] = "str"
elif attr == "wcs":
attrs[attr]["print"] = "astropy.wcs.wcs.WCS"
attrs[attr]["type"] = "other"
else:
attrs[attr]["print"] = "{}".format(type(res))
attrs[attr]["type"] = "other"
output = Table(names=["Attribute", "Description"], dtype=[object, object])
idx = 0
types = ["int", "str", "list", "array", "other"]
for typ in types:
for attr, dic in attrs.items():
if dic["type"] == typ:
output.add_row([attr, dic["print"]])
idx += 1
output.pprint(max_lines=-1, max_width=-1)
def append(self, others, inplace=False):
"""Append one or more other `LightCurve` object(s) to this one.
Parameters
----------
others : `LightCurve`, or list of `LightCurve`
Light curve(s) to be appended to the current one.
inplace : bool
If True, change the current `LightCurve` instance in place instead
of creating and returning a new one. Defaults to False.
Returns
-------
new_lc : `LightCurve`
Light curve which has the other light curves appened to it.
"""
if inplace:
raise ValueError(
"the `inplace` parameter is no longer supported "
"as of Lightkurve v2.0"
)
if not hasattr(others, "__iter__"):
others = (others,)
# Re-use LightCurveCollection.stitch() to avoid code duplication
from .collections import LightCurveCollection # avoid circular import
return LightCurveCollection((self, *others)).stitch(corrector_func=None)
def flatten(
self,
window_length=101,
polyorder=2,
return_trend=False,
break_tolerance=5,
niters=3,
sigma=3,
mask=None,
**kwargs,
):
"""Removes the low frequency trend using scipy's Savitzky-Golay filter.
This method wraps `scipy.signal.savgol_filter`.
Parameters
----------
window_length : int
The length of the filter window (i.e. the number of coefficients).
``window_length`` must be a positive odd integer.
polyorder : int
The order of the polynomial used to fit the samples. ``polyorder``
must be less than window_length.
return_trend : bool
If `True`, the method will return a tuple of two elements
(flattened_lc, trend_lc) where trend_lc is the removed trend.
break_tolerance : int
If there are large gaps in time, flatten will split the flux into
several sub-lightcurves and apply `savgol_filter` to each
individually. A gap is defined as a period in time larger than
`break_tolerance` times the median gap. To disable this feature,
set `break_tolerance` to None.
niters : int
Number of iterations to iteratively sigma clip and flatten. If more than one, will
perform the flatten several times, removing outliers each time.
sigma : int
Number of sigma above which to remove outliers from the flatten
mask : boolean array with length of self.time
Boolean array to mask data with before flattening. Flux values where
mask is True will not be used to flatten the data. An interpolated
result will be provided for these points. Use this mask to remove
data you want to preserve, e.g. transits.
**kwargs : dict
Dictionary of arguments to be passed to `scipy.signal.savgol_filter`.
Returns
-------
flatten_lc : `LightCurve`
New light curve object with long-term trends removed.
If ``return_trend`` is set to ``True``, this method will also return:
trend_lc : `LightCurve`
New light curve object containing the trend that was removed.
"""
if mask is None:
mask = np.ones(len(self.time), dtype=bool)
else:
# Deep copy ensures we don't change the original.
mask = deepcopy(~mask)
# Add NaNs & outliers to the mask
extra_mask = np.isfinite(self.flux)
extra_mask &= np.nan_to_num(np.abs(self.flux - np.nanmedian(self.flux))) <= (
np.nanstd(self.flux) * sigma
)
# In astropy>=5.0, extra_mask is a masked array
if hasattr(extra_mask, 'mask'):
mask &= extra_mask.filled(False)
else: # support astropy<5.0
mask &= extra_mask
for iter in np.arange(0, niters):
if break_tolerance is None:
break_tolerance = np.nan
if polyorder >= window_length:
polyorder = window_length - 1
log.warning(
"polyorder must be smaller than window_length, "
"using polyorder={}.".format(polyorder)
)
# Split the lightcurve into segments by finding large gaps in time
dt = self.time.value[mask][1:] - self.time.value[mask][0:-1]
with warnings.catch_warnings(): # Ignore warnings due to NaNs
warnings.simplefilter("ignore", RuntimeWarning)
cut = np.where(dt > break_tolerance * np.nanmedian(dt))[0] + 1
low = np.append([0], cut)
high = np.append(cut, len(self.time[mask]))
# Then, apply the savgol_filter to each segment separately
trend_signal = Quantity(np.zeros(len(self.time[mask])), unit=self.flux.unit)
for l, h in zip(low, high):
# Reduce `window_length` and `polyorder` for short segments;
# this prevents `savgol_filter` from raising an exception
# If the segment is too short, just take the median
if np.any([window_length > (h - l), (h - l) < break_tolerance]):
trend_signal[l:h] = np.nanmedian(self.flux[mask][l:h])
else:
# Scipy outputs a warning here that is not useful, will be fixed in version 1.2
with warnings.catch_warnings():
warnings.simplefilter("ignore", FutureWarning)
trsig = savgol_filter(
x=self.flux.value[mask][l:h],
window_length=window_length,
polyorder=polyorder,
**kwargs,
)
trend_signal[l:h] = Quantity(trsig, trend_signal.unit)
# Ignore outliers; note we add `1e-14` below to avoid detecting
# outliers which are merely caused by numerical noise.
mask1 = np.nan_to_num(np.abs(self.flux[mask] - trend_signal)) < (
np.nanstd(self.flux[mask] - trend_signal) * sigma
+ Quantity(1e-14, self.flux.unit)
)
f = interp1d(
self.time.value[mask][mask1],
trend_signal[mask1],
fill_value="extrapolate",
)
trend_signal = Quantity(f(self.time.value), self.flux.unit)
# In astropy>=5.0, mask1 is a masked array
if hasattr(mask1, 'mask'):
mask[mask] &= mask1.filled(False)
else: # support astropy<5.0
mask[mask] &= mask1
flatten_lc = self.copy()
with warnings.catch_warnings():
# ignore invalid division warnings
warnings.simplefilter("ignore", RuntimeWarning)
flatten_lc.flux = flatten_lc.flux / trend_signal
flatten_lc.flux_err = flatten_lc.flux_err / trend_signal
flatten_lc.meta["NORMALIZED"] = True
if return_trend:
trend_lc = self.copy()
trend_lc.flux = trend_signal
return flatten_lc, trend_lc
return flatten_lc
@deprecated_renamed_argument(
"transit_midpoint",
"epoch_time",
"2.0",
warning_type=LightkurveDeprecationWarning,
)
@deprecated_renamed_argument(
"t0", "epoch_time", "2.0", warning_type=LightkurveDeprecationWarning
)
def fold(
self,
period=None,
epoch_time=None,
epoch_phase=0,
wrap_phase=None,
normalize_phase=False,
):
"""Returns a `FoldedLightCurve` object folded on a period and epoch.
This method is identical to AstroPy's `~astropy.timeseries.TimeSeries.fold()`
method, except it returns a `FoldedLightCurve` object which offers
convenient plotting methods.
Parameters
----------
period : float `~astropy.units.Quantity`
The period to use for folding. If a ``float`` is passed we'll
assume it is in units of days.
epoch_time : `~astropy.time.Time`
The time to use as the reference epoch, at which the relative time
offset / phase will be ``epoch_phase``. Defaults to the first time
in the time series.
epoch_phase : float or `~astropy.units.Quantity`
Phase of ``epoch_time``. If ``normalize_phase`` is `True`, this
should be a dimensionless value, while if ``normalize_phase`` is
``False``, this should be a `~astropy.units.Quantity` with time
units. Defaults to 0.
wrap_phase : float or `~astropy.units.Quantity`
The value of the phase above which values are wrapped back by one
period. If ``normalize_phase`` is `True`, this should be a
dimensionless value, while if ``normalize_phase`` is ``False``,
this should be a `~astropy.units.Quantity` with time units.
Defaults to half the period, so that the resulting time series goes
from ``-period / 2`` to ``period / 2`` (if ``normalize_phase`` is
`False`) or -0.5 to 0.5 (if ``normalize_phase`` is `True`).
normalize_phase : bool
If `False` phase is returned as `~astropy.time.TimeDelta`,
otherwise as a dimensionless `~astropy.units.Quantity`.
Returns
-------
folded_lightcurve : `FoldedLightCurve`
The folded light curve object in which the ``time`` column
holds the phase values.
"""
# Lightkurve v1.x assumed that `period` was given in days if no unit
# was specified. We maintain this behavior for backwards-compatibility.
if period is not None and not isinstance(period, Quantity):
period *= u.day
if epoch_time is not None and not isinstance(epoch_time, Time):
epoch_time = Time(
epoch_time, format=self.time.format, scale=self.time.scale
)
if (
epoch_phase is not None
and not isinstance(epoch_phase, Quantity)
and not normalize_phase
):
epoch_phase *= u.day
if wrap_phase is not None and not isinstance(wrap_phase, Quantity):
wrap_phase *= u.day
# Warn if `epoch_time` appears to use the wrong format
if epoch_time is not None and epoch_time.value > 2450000:
if self.time.format == "bkjd":
warnings.warn(
"`epoch_time` appears to be given in JD, "
"however the light curve time uses BKJD "
"(i.e. JD - 2454833).",
LightkurveWarning,
)
elif self.time.format == "btjd":
warnings.warn(
"`epoch_time` appears to be given in JD, "
"however the light curve time uses BTJD "
"(i.e. JD - 2457000).",
LightkurveWarning,
)
ts = super().fold(
period=period,
epoch_time=epoch_time,
epoch_phase=epoch_phase,
wrap_phase=wrap_phase,
normalize_phase=normalize_phase,
)
# The folded time would pass the `TimeSeries` validation check if
# `normalize_phase=True`, so creating a `FoldedLightCurve` object
# requires the following three-step workaround:
# 1. Give the folded light curve a valid time column again
with ts._delay_required_column_checks():
folded_time = ts.time.copy()
ts.remove_column("time")
ts.add_column(self.time, name="time", index=0)
# 2. Create the folded object
lc = FoldedLightCurve(data=ts)
# 3. Restore the folded time
with lc._delay_required_column_checks():
lc.remove_column("time")
lc.add_column(folded_time, name="time", index=0)
# Add extra column and meta data specific to FoldedLightCurve
lc.add_column(
self.time.copy(), name="time_original", index=len(self._required_columns)
)
lc.meta["PERIOD"] = period
lc.meta["EPOCH_TIME"] = epoch_time
lc.meta["EPOCH_PHASE"] = epoch_phase
lc.meta["WRAP_PHASE"] = wrap_phase
lc.meta["NORMALIZE_PHASE"] = normalize_phase
lc.sort("time")
return lc
def normalize(self, unit="unscaled"):
"""Returns a normalized version of the light curve.
The normalized light curve is obtained by dividing the ``flux`` and
``flux_err`` object attributes by the median flux.
Optionally, the result will be multiplied by 1e2 (if `unit='percent'`),
1e3 (`unit='ppt'`), or 1e6 (`unit='ppm'`).
Parameters
----------
unit : 'unscaled', 'percent', 'ppt', 'ppm'
The desired relative units of the normalized light curve;
'ppt' means 'parts per thousand', 'ppm' means 'parts per million'.
Examples
--------
>>> import lightkurve as lk
>>> lc = lk.LightCurve(time=[1, 2, 3], flux=[25945.7, 25901.5, 25931.2], flux_err=[6.8, 4.6, 6.2])
>>> normalized_lc = lc.normalize()
>>> normalized_lc.flux
<Quantity [1.00055917, 0.99885466, 1. ]>
>>> normalized_lc.flux_err
<Quantity [0.00026223, 0.00017739, 0.00023909]>
Returns
-------
normalized_lightcurve : `LightCurve`
A new light curve object in which ``flux`` and ``flux_err`` have
been divided by the median flux.
Warns
-----
LightkurveWarning
If the median flux is negative or within half a standard deviation
from zero.
"""
validate_method(unit, ["unscaled", "percent", "ppt", "ppm"])
median_flux = np.nanmedian(self.flux)
std_flux = np.nanstd(self.flux)
# If the median flux is within half a standard deviation from zero, the
# light curve is likely zero-centered and normalization makes no sense.
if (median_flux == 0) or (
np.isfinite(std_flux) and (np.abs(median_flux) < 0.5 * std_flux)
):
warnings.warn(
"The light curve appears to be zero-centered "
"(median={:.2e} +/- {:.2e}); `normalize()` will divide "
"the light curve by a value close to zero, which is "
"probably not what you want."
"".format(median_flux, std_flux),
LightkurveWarning,
)
# If the median flux is negative, normalization will invert the light
# curve and makes no sense.
if median_flux < 0:
warnings.warn(
"The light curve has a negative median flux ({:.2e});"
" `normalize()` will therefore divide by a negative "
"number and invert the light curve, which is probably"
"not what you want".format(median_flux),
LightkurveWarning,
)
# Create a new light curve instance and normalize its values
lc = self.copy()
lc.flux = lc.flux / median_flux
lc.flux_err = lc.flux_err / median_flux
if not lc.flux.unit:
lc.flux *= u.dimensionless_unscaled
if not lc.flux_err.unit:
lc.flux_err *= u.dimensionless_unscaled
# Set the desired relative (dimensionless) units
if unit == "percent":
lc.flux = lc.flux.to(u.percent)
lc.flux_err = lc.flux_err.to(u.percent)
elif unit in ("ppt", "ppm"):
lc.flux = lc.flux.to(unit)
lc.flux_err = lc.flux_err.to(unit)
lc.meta["NORMALIZED"] = True
return lc
def remove_nans(self, column: str = "flux"):
"""Removes cadences where ``column`` is a NaN.
Parameters
----------
column : str
Column to check for NaNs. Defaults to ``'flux'``.
Returns
-------
clean_lightcurve : `LightCurve`
A new light curve object from which NaNs fluxes have been removed.
Examples
--------
>>> import lightkurve as lk
>>> import numpy as np
>>> lc = lk.LightCurve({'time': [1, 2, 3], 'flux': [1., np.nan, 1.]})
>>> lc.remove_nans()
<LightCurve length=2>
time flux flux_err
<BLANKLINE>
Time float64 float64
---- ------- --------
1.0 1.0 nan
3.0 1.0 nan
"""
return self[~np.isnan(self[column])] # This will return a sliced copy
def fill_gaps(self, method: str = "gaussian_noise"):
r"""Fill in gaps in time.
By default, the gaps will be filled with random white Gaussian noise
distributed according to
:math:`\mathcal{N} (\mu=\overline{\mathrm{flux}}, \sigma=\mathrm{CDPP})`.
No other methods are supported at this time.
Parameters
----------
method : string {'gaussian_noise'}
Method to use for gap filling. Fills with Gaussian noise by default.
Returns
-------
filled_lightcurve : `LightCurve`
A new light curve object in which all NaN values and gaps in time
have been filled.
"""
lc = self.copy().remove_nans()
# nlc = lc.copy()
newdata = {}
# Find missing time points
# Most precise method, taking into account time variation due to orbit
if hasattr(lc, "cadenceno"):
dt = lc.time.value - np.median(np.diff(lc.time.value)) * lc.cadenceno.value
ncad = np.arange(lc.cadenceno.value[0], lc.cadenceno.value[-1] + 1, 1)
in_original = np.in1d(ncad, lc.cadenceno.value)
ncad = ncad[~in_original]
ndt = np.interp(ncad, lc.cadenceno.value, dt)
ncad = np.append(ncad, lc.cadenceno.value)
ndt = np.append(ndt, dt)
ncad, ndt = ncad[np.argsort(ncad)], ndt[np.argsort(ncad)]
ntime = ndt + np.median(np.diff(lc.time.value)) * ncad
newdata["cadenceno"] = ncad
else:
# Less precise method
dt = np.nanmedian(lc.time.value[1::] - lc.time.value[:-1:])
ntime = [lc.time.value[0]]
for t in lc.time.value[1::]:
prevtime = ntime[-1]
while (t - prevtime) > 1.2 * dt:
ntime.append(prevtime + dt)
prevtime = ntime[-1]
ntime.append(t)
ntime = np.asarray(ntime, float)
in_original = np.in1d(ntime, lc.time.value)
# Fill in time points
newdata["time"] = Time(ntime, format=lc.time.format, scale=lc.time.scale)
f = np.zeros(len(ntime))
f[in_original] = np.copy(lc.flux)
fe = np.zeros(len(ntime))
fe[in_original] = np.copy(lc.flux_err)
# Temporary workaround for issue #1172. TODO: remove the `if`` statement
# below once we adopt AstroPy >=5.0.3 as a minimum dependency.
if hasattr(lc.flux_err, 'mask'):
fe[~in_original] = np.interp(ntime[~in_original], lc.time.value, lc.flux_err.unmasked)
else:
fe[~in_original] = np.interp(ntime[~in_original], lc.time.value, lc.flux_err)
if method == "gaussian_noise":
try:
std = lc.estimate_cdpp().to(lc.flux.unit).value
except:
std = np.nanstd(lc.flux.value)
f[~in_original] = np.random.normal(
np.nanmean(lc.flux.value), std, (~in_original).sum()
)
else:
raise NotImplementedError("No such method as {}".format(method))
newdata["flux"] = Quantity(f, lc.flux.unit)
newdata["flux_err"] = Quantity(fe, lc.flux_err.unit)
if hasattr(lc, "quality"):
quality = np.zeros(len(ntime), dtype=lc.quality.dtype)
quality[in_original] = np.copy(lc.quality)
quality[~in_original] += 65536
newdata["quality"] = quality
"""
# TODO: add support for other columns
for column in lc.columns:
if column in ("time", "flux", "flux_err", "quality"):
continue
old_values = lc[column]
new_values = np.empty(len(ntime), dtype=old_values.dtype)
new_values[~in_original] = np.nan
new_values[in_original] = np.copy(old_values)
newdata[column] = new_values
"""
return LightCurve(data=newdata, meta=self.meta)
def remove_outliers(
self, sigma=5.0, sigma_lower=None, sigma_upper=None, return_mask=False, **kwargs
):
"""Removes outlier data points using sigma-clipping.
This method returns a new `LightCurve` object from which data points
are removed if their flux values are greater or smaller than the median
flux by at least ``sigma`` times the standard deviation.
Sigma-clipping works by iterating over data points, each time rejecting
values that are discrepant by more than a specified number of standard
deviations from a center value. If the data contains invalid values
(NaNs or infs), they are automatically masked before performing the
sigma clipping.
.. note::
This function is a convenience wrapper around
`astropy.stats.sigma_clip()` and provides the same functionality.
Any extra arguments passed to this method will be passed on to
``sigma_clip``.
Parameters
----------
sigma : float
The number of standard deviations to use for both the lower and
upper clipping limit. These limits are overridden by
``sigma_lower`` and ``sigma_upper``, if input. Defaults to 5.
sigma_lower : float or None
The number of standard deviations to use as the lower bound for
the clipping limit. Can be set to float('inf') in order to avoid
clipping outliers below the median at all. If `None` then the
value of ``sigma`` is used. Defaults to `None`.
sigma_upper : float or None
The number of standard deviations to use as the upper bound for
the clipping limit. Can be set to float('inf') in order to avoid
clipping outliers above the median at all. If `None` then the
value of ``sigma`` is used. Defaults to `None`.
return_mask : bool
Whether or not to return a mask (i.e. a boolean array) indicating
which data points were removed. Entries marked as `True` in the
mask are considered outliers. This mask is not returned by default.
**kwargs : dict
Dictionary of arguments to be passed to `astropy.stats.sigma_clip`.
Returns
-------
clean_lc : `LightCurve`
A new light curve object from which outlier data points have been
removed.
outlier_mask : NumPy array, optional
Boolean array flagging which cadences were removed.
Only returned if `return_mask=True`.
Examples
--------
This example generates a new light curve in which all points
that are more than 1 standard deviation from the median are removed::
>>> lc = LightCurve(time=[1, 2, 3, 4, 5], flux=[1, 1000, 1, -1000, 1])
>>> lc_clean = lc.remove_outliers(sigma=1)
>>> lc_clean.time
<Time object: scale='tdb' format='jd' value=[1. 3. 5.]>
>>> lc_clean.flux
<Quantity [1., 1., 1.]>
Instead of specifying `sigma`, you may specify separate `sigma_lower`
and `sigma_upper` parameters to remove only outliers above or below
the median. For example::
>>> lc = LightCurve(time=[1, 2, 3, 4, 5], flux=[1, 1000, 1, -1000, 1])
>>> lc_clean = lc.remove_outliers(sigma_lower=float('inf'), sigma_upper=1)
>>> lc_clean.time
<Time object: scale='tdb' format='jd' value=[1. 3. 4. 5.]>
>>> lc_clean.flux
<Quantity [ 1., 1., -1000., 1.]>
Optionally, you may use the `return_mask` parameter to return a boolean
array which flags the outliers identified by the method. For example::
>>> lc_clean, mask = lc.remove_outliers(sigma=1, return_mask=True)
>>> mask
array([False, True, False, True, False])
"""
# The import time for `sigma_clip` is somehow very slow, so we use
# a local import here.
from astropy.stats.sigma_clipping import sigma_clip
# First, we create the outlier mask using AstroPy's sigma_clip function
with warnings.catch_warnings(): # Ignore warnings due to NaNs or Infs
warnings.simplefilter("ignore")
flux = self.flux
if isinstance(flux, Masked):
# Workaround for https://github.com/astropy/astropy/issues/14360
# in passing MaskedQuantity to sigma_clip, by converting it to Quantity.
# We explicitly fill masked values with `np.nan` here to ensure they are masked during sigma clipping.
# To handle unlikely edge case, convert int to float to ensure filing `np.nan` work.
# The conversion is acceptable because only the mask of the sigma_clip() result is used.
if np.issubdtype(flux.dtype, np.int_):
flux = flux.astype(float)
flux = flux.filled(np.nan)
outlier_mask = sigma_clip(
data=flux,
sigma=sigma,
sigma_lower=sigma_lower,
sigma_upper=sigma_upper,
**kwargs,
).mask
# Second, we return the masked light curve and optionally the mask itself
if return_mask:
return self.copy()[~outlier_mask], outlier_mask
return self.copy()[~outlier_mask]
@deprecated_renamed_argument(
"binsize",
new_name=None,
since="2.0",
warning_type=LightkurveDeprecationWarning,
alternative="time_bin_size",
)
def bin(
self,
time_bin_size=None,
time_bin_start=None,
time_bin_end=None,
n_bins=None,
aggregate_func=None,
bins=None,
binsize=None,
):
"""Bins a lightcurve in equally-spaced bins in time.
If the original light curve contains flux uncertainties (``flux_err``),
the binned lightcurve will report the root-mean-square error.
If no uncertainties are included, the binned curve will return the
standard deviation of the data.
Parameters
----------
time_bin_size : `~astropy.units.Quantity` or `~astropy.time.TimeDelta`, optional
The time interval for the binned time series - this is either a scalar
value (in which case all time bins will be assumed to have the same
duration) or as an array of values (in which case each time bin can
have a different duration). If this argument is provided,
``time_bin_end`` should not be provided.
(Default: 0.5 days; default unit: days.)
time_bin_start : `~astropy.time.Time` or iterable, optional
The start time for the binned time series - this can be either given
directly as a `~astropy.time.Time` array or as any iterable that
initializes the `~astropy.time.Time` class. This can also be a scalar
value if ``time_bin_size`` is provided. Defaults to the first
time in the sampled time series.
time_bin_end : `~astropy.time.Time` or iterable, optional
The times of the end of each bin - this can be either given directly as
a `~astropy.time.Time` array or as any iterable that initializes the
`~astropy.time.Time` class. This can only be given if ``time_bin_start``
is an array of values. If ``time_bin_end`` is a scalar, time bins are
assumed to be contiguous, such that the end of each bin is the start
of the next one, and ``time_bin_end`` gives the end time for the last
bin. If ``time_bin_end`` is an array, the time bins do not need to be
contiguous. If this argument is provided, ``time_bin_size`` should not
be provided. This option, like the iterable form of ``time_bin_start``,
requires Astropy 5.0.
n_bins : int, optional
The number of bins to use. Defaults to the number needed to fit all
the original points. Note that this will create this number of bins
of length ``time_bin_size`` independent of the lightkurve length.
aggregate_func : callable, optional
The function to use for combining points in the same bin. Defaults
to np.nanmean.
bins : int, iterable or str, optional
If an int, this gives the number of bins to divide the lightkurve into.
In contrast to ``n_bins`` this adjusts the length of ``time_bin_size``
to accommodate the input time series length.
If it is an iterable of ints, it specifies the indices of the bin edges.
If a string, it must be one of 'blocks', 'knuth', 'scott' or 'freedman'
defining a method of automatically determining an optimal bin size.
See `~astropy.stats.histogram` for a description of each method.
Note that 'blocks' is not a useful method for regularly sampled data.
binsize : int
In Lightkurve v1.x, the default behavior of `bin()` was to create
bins which contained an equal number data points in each bin.
This type of binning is discouraged because it usually makes more sense to
create equally-sized bins in time duration, which is the new default
behavior in Lightkurve v2.x. Nevertheless, this `binsize` parameter
allows users to simulate the old behavior of Lightkurve v1.x.
For ease of implementation, setting this parameter is identical to passing
``time_bin_size = lc.time[binsize] - time[0]``, which means that
the bins are not guaranteed to contain an identical number of
data points.
Returns
-------
binned_lc : `LightCurve`
A new light curve which has been binned.
"""
kwargs = dict()
if binsize is not None and bins is not None:
raise ValueError("Only one of ``bins`` and ``binsize`` can be specified.")
elif (binsize is not None or bins is not None) and (
time_bin_size is not None or n_bins is not None
):
raise ValueError(
"``bins`` or ``binsize`` conflicts with "
"``n_bins`` or ``time_bin_size``."
)
elif bins is not None:
if (bins not in ('blocks', 'knuth', 'scott', 'freedman') and
np.array(bins).dtype != np.int_):
raise TypeError("``bins`` must have integer type.")
elif (isinstance(bins, str) or np.size(bins) != 1) and not _HAS_VAR_BINS:
raise ValueError("Sequence or method for ``bins`` requires Astropy 5.0.")
if time_bin_start is None:
time_bin_start = self.time[0]
if not isinstance(time_bin_start, (Time, TimeDelta)):
if isinstance(self.time, TimeDelta):
time_bin_start = TimeDelta(
time_bin_start, format=self.time.format, scale=self.time.scale
)
else:
time_bin_start = Time(
time_bin_start, format=self.time.format, scale=self.time.scale
)
# Backwards compatibility with Lightkurve v1.x
if time_bin_size is None:
if bins is not None:
if np.size(bins) == 1 and _HAS_VAR_BINS:
# This actually calculates equal-length bins just as the method below;
# should it instead set equal-number bins with binsize=int(len(self) / bins)?
# Get start times in mjd and convert back to original format
bin_starts = calculate_bin_edges(self.time.mjd, bins=bins)[:-1]
time_bin_start = Time(Time(bin_starts, format='mjd'), format=self.time.format)
elif np.size(bins) == 1:
warnings.warn(
'"classic" `bins` require Astropy 5.0; will use constant lengths in time.',
LightkurveWarning)
# Odd memory error in np.searchsorted with pytest-memtest?
if self.time[0] >= time_bin_start:
i = len(self.time)
else:
i = len(self.time) - np.searchsorted(self.time, time_bin_start)
time_bin_size = ((self.time[-1] - time_bin_start) * i /
((i - 1) * bins)).to(u.day)
else:
time_bin_start = self.time[bins[:-1]]
kwargs['time_bin_end'] = self.time[bins[1:]]
elif binsize is not None:
if _HAS_VAR_BINS:
time_bin_start = self.time[::binsize]
else:
warnings.warn(
'`binsize` requires Astropy 5.0 to guarantee equal number of points; '
'will use estimated time lengths for bins.', LightkurveWarning)
if self.time[0] >= time_bin_start:
i = 0
else:
i = np.searchsorted(self.time, time_bin_start)
time_bin_size = (self.time[i + binsize] - self.time[i]).to(u.day)
else:
time_bin_size = 0.5 * u.day
elif not isinstance(time_bin_size, Quantity):
time_bin_size *= u.day
# Call AstroPy's aggregate_downsample
with warnings.catch_warnings():
# ignore uninteresting empty slice warnings
warnings.simplefilter("ignore", (RuntimeWarning, AstropyUserWarning))
ts = aggregate_downsample(
self,
time_bin_size=time_bin_size,
n_bins=n_bins,
time_bin_start=time_bin_start,
aggregate_func=aggregate_func,
**kwargs
)
# If `flux_err` is populated, assume the errors combine as the root-mean-square
if np.any(np.isfinite(self.flux_err)):
rmse_func = (
lambda x: np.sqrt(np.nansum(x ** 2)) / len(np.atleast_1d(x))
if np.any(np.isfinite(x))
else np.nan
)
ts_err = aggregate_downsample(
self,
time_bin_size=time_bin_size,
n_bins=n_bins,
time_bin_start=time_bin_start,
aggregate_func=rmse_func,
)
ts["flux_err"] = ts_err["flux_err"]
# If `flux_err` is unavailable, populate `flux_err` as nanstd(flux)
else:
ts_err = aggregate_downsample(
self,
time_bin_size=time_bin_size,
n_bins=n_bins,
time_bin_start=time_bin_start,
aggregate_func=np.nanstd,
)
ts["flux_err"] = ts_err["flux"]
# Prepare a LightCurve object by ensuring there is a time column
ts._required_columns = []
ts.add_column(ts.time_bin_start + ts.time_bin_size / 2.0, name="time")
# Ensure the required columns appear in the correct order
for idx, colname in enumerate(self.__class__._required_columns):
tmpcol = ts[colname]
ts.remove_column(colname)
ts.add_column(tmpcol, name=colname, index=idx)
return self.__class__(ts, meta=self.meta)
def estimate_cdpp(
self, transit_duration=13, savgol_window=101, savgol_polyorder=2, sigma=5.0
) -> float:
"""Estimate the CDPP noise metric using the Savitzky-Golay (SG) method.
A common estimate of the noise in a lightcurve is the scatter that
remains after all long term trends have been removed. This is the idea
behind the Combined Differential Photometric Precision (CDPP) metric.
The official Kepler Pipeline computes this metric using a wavelet-based
algorithm to calculate the signal-to-noise of the specific waveform of
transits of various durations. In this implementation, we use the
simpler "sgCDPP proxy algorithm" discussed by Gilliland et al
(2011ApJS..197....6G) and Van Cleve et al (2016PASP..128g5002V).
The steps of this algorithm are:
1. Remove low frequency signals using a Savitzky-Golay filter with
window length `savgol_window` and polynomial order `savgol_polyorder`.
2. Remove outliers by rejecting data points which are separated from
the mean by `sigma` times the standard deviation.
3. Compute the standard deviation of a running mean with
a configurable window length equal to `transit_duration`.
We use a running mean (as opposed to block averaging) to strongly
attenuate the signal above 1/transit_duration whilst retaining
the original frequency sampling. Block averaging would set the Nyquist
limit to 1/transit_duration.
Parameters
----------
transit_duration : int, optional
The transit duration in units of number of cadences. This is the
length of the window used to compute the running mean. The default
is 13, which corresponds to a 6.5 hour transit in data sampled at
30-min cadence.
savgol_window : int, optional
Width of Savitsky-Golay filter in cadences (odd number).
Default value 101 (2.0 days in Kepler Long Cadence mode).
savgol_polyorder : int, optional
Polynomial order of the Savitsky-Golay filter.
The recommended value is 2.
sigma : float, optional
The number of standard deviations to use for clipping outliers.
The default is 5.
Returns
-------
cdpp : float
Savitzky-Golay CDPP noise metric in units parts-per-million (ppm).
Notes
-----
This implementation is adapted from the Matlab version used by
Jeff van Cleve but lacks the normalization factor used there:
svn+ssh://murzim/repo/so/trunk/Develop/jvc/common/compute_SG_noise.m
"""
if not isinstance(transit_duration, int):
raise ValueError(
"transit_duration must be an integer in units "
"number of cadences, got {}.".format(transit_duration)
)
detrended_lc = self.flatten(
window_length=savgol_window, polyorder=savgol_polyorder
)
cleaned_lc = detrended_lc.remove_outliers(sigma=sigma)
with warnings.catch_warnings(): # ignore "already normalized" message
warnings.filterwarnings("ignore", message=".*already.*")
normalized_lc = cleaned_lc.normalize("ppm")
mean = running_mean(data=normalized_lc.flux, window_size=transit_duration)
return np.std(mean)
def query_solar_system_objects(
self,
cadence_mask="outliers",
radius=None,
sigma=3,
location=None,
cache=True,
return_mask=False,
show_progress=True,
):
"""Returns a list of asteroids or comets which affected the light curve.
Light curves of stars or galaxies are frequently affected by solar
system bodies (e.g. asteroids, comets, planets). These objects can move
across a target's photometric aperture mask on time scales of hours to
days. When they pass through a mask, they tend to cause a brief spike
in the brightness of the target. They can also cause dips by moving
through a local background aperture mask (if any is used).
The artifical spikes and dips introduced by asteroids are frequently
confused with stellar flares, planet transits, etc. This method helps
to identify false signals injects by asteroids by providing a list of
the solar system objects (name, brightness, time) that passed in the
vicinity of the target during the span of the light curve.
This method queries the `SkyBot API <http://vo.imcce.fr/webservices/skybot/>`_,
which returns a list of asteroids/comets/planets given a location, time,
and search cone.
Notes
-----
* This method will use the `ra` and `dec` properties of the `LightCurve`
object to determine the position of the search cone.
* The size of the search cone is 15 spacecraft pixels by default. You
can change this by passing the `radius` parameter (unit: degrees).
* By default, this method will only search points in time during which the light
curve showed 3-sigma outliers in flux. You can override this behavior
and search for specific times by passing `cadence_mask`. See examples for details.
Parameters
----------
cadence_mask : str, or boolean array with length of self.time
mask in time to select which frames or points should be searched for SSOs.
Default "outliers" will search for SSOs at points that are `sigma` from the mean.
"all" will search all cadences. Alternatively, pass a boolean array with values of "True"
for times to search for SSOs.
radius : optional, float
Radius in degrees to search for bodies. If None, will search for
SSOs within 15 pixels.
sigma : optional, float
If `cadence_mask` is set to `"outlier"`, `sigma` will be used to identify
outliers.
location : optional, str
Spacecraft location. Options include `'kepler'` and `'tess'`. Default: `self.mission`
cache : optional, bool
If True will cache the search result in the astropy cache. Set to False
to request the search again.
return_mask: optional, bool
If True will return a boolean mask in time alongside the result
show_progress: optional, bool
If True will display a progress bar during the download
Returns
-------
result : `pandas.DataFrame`
DataFrame object which lists the Solar System objects in frames
that were identified to contain SSOs. Returns `None` if no objects
were found.
Examples
--------
Find if there are SSOs affecting the lightcurve for the given time frame:
>>> df_sso = lc.query_solar_system_objects(cadence_mask=(lc.time.value >= 2014.1) & (lc.time.value <= 2014.9)) # doctest: +SKIP
Find if there are SSOs affecting the lightcurve for all times, but it will be much slower:
>>> df_sso = lc.query_solar_system_objects(cadence_mask='all') # doctest: +SKIP
"""
for attr in ["ra", "dec"]:
if not hasattr(self, "{}".format(attr)):
raise ValueError("Input does not have a `{}` attribute.".format(attr))
# Validate `cadence_mask`
if isinstance(cadence_mask, str):
if cadence_mask == "outliers":
cadence_mask = self.remove_outliers(sigma=sigma, return_mask=True)[1]
elif cadence_mask == "all":
cadence_mask = np.ones(len(self.time)).astype(bool)
else:
raise ValueError("invalid `cadence_mask` string argument")
elif isinstance(cadence_mask, collections.abc.Sequence):
cadence_mask = np.array(cadence_mask)
elif isinstance(cadence_mask, (bool)):
# for boundary case of a single element tuple, e.g., (True)
cadence_mask = np.array([cadence_mask])
elif not isinstance(cadence_mask, np.ndarray):
raise ValueError("the `cadence_mask` argument is missing or invalid")
# Avoid searching times with NaN flux; this is necessary because e.g.
# `remove_outliers` includes NaNs in its mask.
if hasattr(self.flux, 'mask'):
# Temporary workaround for issue #1172. TODO: remove this `if`` statement
# once we adopt AstroPy >=5.0.3 as a minimum dependency
cadence_mask &= ~np.isnan(self.flux.unmasked)
else:
cadence_mask &= ~np.isnan(self.flux)
# Validate `location`
if location is None:
if hasattr(self, "mission") and self.mission:
location = self.mission.lower()
else:
raise ValueError("you must pass a value for `location`.")
# Validate `radius`
if radius is None:
# 15 pixels has been chosen as a reasonable default.
# Comets have long tails which have tripped up users.
if (location == "kepler") | (location == "k2"):
radius = (4 * 15) * u.arcsecond.to(u.deg)
elif location == "tess":
radius = (21 * 15) * u.arcsecond.to(u.deg)
else:
radius = 15 * u.arcsecond.to(u.deg)
res = _query_solar_system_objects(
ra=self.ra,
dec=self.dec,
times=self.time.jd[cadence_mask],
location=location,
radius=radius,
cache=cache,
show_progress=show_progress,
)
if return_mask:
return res, np.in1d(self.time.jd, res.epoch)
return res
def _create_plot(
self,
method="plot",
column="flux",
ax=None,
normalize=False,
xlabel=None,
ylabel=None,
title="",
style="lightkurve",
show_colorbar=True,
colorbar_label="",
offset=None,
clip_outliers=False,
**kwargs,
) -> matplotlib.axes.Axes:
"""Implements `plot()`, `scatter()`, and `errorbar()` to avoid code duplication.
Parameters
----------
method : str
One of 'plot', 'scatter', or 'errorbar'.
column : str
Name of data column to plot. Default `flux`.
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be generated.
normalize : bool
Normalize the lightcurve before plotting?
xlabel : str
X axis label.
ylabel : str
Y axis label.
title : str
Title shown at the top using matplotlib `set_title`.
style : str
Path or URL to a matplotlib style file, or name of one of
matplotlib's built-in stylesheets (e.g. 'ggplot').
Lightkurve's custom stylesheet is used by default.
show_colorbar : boolean
Show the colorbar if colors are given using the `c` argument?
colorbar_label : str
Label to show next to the colorbar (if `c` is given).
offset : float
Offset value to apply to the Y axis values before plotting. Use this
to avoid light curves from overlapping on the same plot. By default,
no offset is applied.
clip_outliers : bool
If ``True``, clip the y axis limit to the 95%-percentile range.
kwargs : dict
Dictionary of arguments to be passed to Matplotlib's `plot`,
`scatter`, or `errorbar` methods.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
# Configure the default style
if style is None or style == "lightkurve":
style = MPLSTYLE
# Default xlabel
if xlabel is None:
if not hasattr(self.time, "format"):
xlabel = "Phase"
elif self.time.format == "bkjd":
xlabel = "Time - 2454833 [BKJD days]"
elif self.time.format == "btjd":
xlabel = "Time - 2457000 [BTJD days]"
elif self.time.format == "jd":
xlabel = "Time [JD]"
else:
xlabel = "Time"
# Default ylabel
if ylabel is None:
if "flux" == column:
ylabel = "Flux"
else:
ylabel = f"{column}"
if normalize or (column == "flux" and self.meta.get("NORMALIZED")):
ylabel = "Normalized " + ylabel
elif (self[column].unit) and (self[column].unit.to_string() != ""):
ylabel += f" [{self[column].unit.to_string('latex_inline')}]"
# Default legend label
if "label" not in kwargs:
kwargs["label"] = self.meta.get("LABEL")
# Workaround for AstroPy v5.0.0 issue #12481: the 'c' argument
# in matplotlib's scatter does not work with masked quantities.
if "c" in kwargs and hasattr(kwargs["c"], 'mask'):
kwargs["c"] = kwargs["c"].unmasked
flux = self[column]
try:
flux_err = self[f"{column}_err"]
except KeyError:
flux_err = np.full(len(flux), np.nan)
# Second workaround for AstroPy v5.0.0 issue #12481:
# matplotlib does not work well with `MaskedNDArray` arrays.
if hasattr(flux, 'mask'):
flux = flux.filled(np.nan)
if hasattr(flux_err, 'mask'):
flux_err = flux_err.filled(np.nan)
# Normalize the data if requested
if normalize:
# ignore "light curve is already normalized" message because
# the user explicitely asked for normalization here
with warnings.catch_warnings():
warnings.filterwarnings("ignore", message=".*already.*")
if column == "flux":
lc_normed = self.normalize()
else:
# Code below is a temporary hack because `normalize()`
# does not have a `column` argument yet
lc_tmp = self.copy()
lc_tmp["flux"] = flux
lc_tmp["flux_err"] = flux_err
lc_normed = lc_tmp.normalize()
flux, flux_err = lc_normed.flux, lc_normed.flux_err
# Apply offset if requested
if offset:
flux = flux.copy() + offset * flux.unit
# Make the plot
with plt.style.context(style):
if ax is None:
fig, ax = plt.subplots(1)
if method == "scatter":
sc = ax.scatter(self.time.value, flux, **kwargs)
# Colorbars should only be plotted if the user specifies, and there is
# a color specified that is not a string (e.g. 'C1') and is iterable.
if (
show_colorbar
and ("c" in kwargs)
and (not isinstance(kwargs["c"], str))
and hasattr(kwargs["c"], "__iter__")
):
cbar = plt.colorbar(sc, ax=ax)
cbar.set_label(colorbar_label)
cbar.ax.yaxis.set_tick_params(tick1On=False, tick2On=False)
cbar.ax.minorticks_off()
elif method == "errorbar":
if np.any(~np.isnan(flux_err)):
ax.errorbar(
x=self.time.value, y=flux.value, yerr=flux_err.value, **kwargs
)
else:
log.warning(f"Column `{column}` has no associated errors.")
else:
ax.plot(self.time.value, flux.value, **kwargs)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# Show the legend if labels were set
legend_labels = ax.get_legend_handles_labels()
if np.sum([len(a) for a in legend_labels]) != 0:
ax.legend(loc="best")
if clip_outliers and len(flux) > 0:
ymin, ymax = np.percentile(flux.value, [2.5, 97.5])
margin = 0.05 * (ymax - ymin)
ax.set_ylim(ymin - margin, ymax + margin)
return ax
def plot(self, **kwargs) -> matplotlib.axes.Axes:
"""Plot the light curve using Matplotlib's `~matplotlib.pyplot.plot` method.
Parameters
----------
column : str
Name of data column to plot. Default `flux`.
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be generated.
normalize : bool
Normalize the lightcurve before plotting?
xlabel : str
X axis label.
ylabel : str
Y axis label.
title : str
Title shown at the top using matplotlib `set_title`.
style : str
Path or URL to a matplotlib style file, or name of one of
matplotlib's built-in stylesheets (e.g. 'ggplot').
Lightkurve's custom stylesheet is used by default.
show_colorbar : boolean
Show the colorbar if colors are given using the `c` argument?
colorbar_label : str
Label to show next to the colorbar (if `c` is given).
offset : float
Offset value to apply to the Y axis values before plotting. Use this
to avoid light curves from overlapping on the same plot. By default,
no offset is applied.
kwargs : dict
Dictionary of arguments to be passed to `matplotlib.pyplot.plot`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
return self._create_plot(method="plot", **kwargs)
def scatter(
self, colorbar_label="", show_colorbar=True, **kwargs
) -> matplotlib.axes.Axes:
"""Plots the light curve using Matplotlib's `~matplotlib.pyplot.scatter` method.
Parameters
----------
column : str
Name of data column to plot. Default `flux`.
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be generated.
normalize : bool
Normalize the lightcurve before plotting?
xlabel : str
X axis label.
ylabel : str
Y axis label.
title : str
Title shown at the top using matplotlib `set_title`.
style : str
Path or URL to a matplotlib style file, or name of one of
matplotlib's built-in stylesheets (e.g. 'ggplot').
Lightkurve's custom stylesheet is used by default.
show_colorbar : boolean
Show the colorbar if colors are given using the `c` argument?
colorbar_label : str
Label to show next to the colorbar (if `c` is given).
offset : float
Offset value to apply to the Y axis values before plotting. Use this
to avoid light curves from overlapping on the same plot. By default,
no offset is applied.
kwargs : dict
Dictionary of arguments to be passed to `matplotlib.pyplot.scatter`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
return self._create_plot(
method="scatter",
colorbar_label=colorbar_label,
show_colorbar=show_colorbar,
**kwargs,
)
def errorbar(self, linestyle="", **kwargs) -> matplotlib.axes.Axes:
"""Plots the light curve using Matplotlib's `~matplotlib.pyplot.errorbar` method.
Parameters
----------
linestyle : str
Connect the error bars using a line?
column : str
Name of data column to plot. Default `flux`.
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be generated.
normalize : bool
Normalize the lightcurve before plotting?
xlabel : str
X axis label.
ylabel : str
Y axis label.
title : str
Title shown at the top using matplotlib `set_title`.
style : str
Path or URL to a matplotlib style file, or name of one of
matplotlib's built-in stylesheets (e.g. 'ggplot').
Lightkurve's custom stylesheet is used by default.
show_colorbar : boolean
Show the colorbar if colors are given using the `c` argument?
colorbar_label : str
Label to show next to the colorbar (if `c` is given).
offset : float
Offset value to apply to the Y axis values before plotting. Use this
to avoid light curves from overlapping on the same plot. By default,
no offset is applied.
kwargs : dict
Dictionary of arguments to be passed to `matplotlib.pyplot.errorbar`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
if "ls" not in kwargs:
kwargs["linestyle"] = linestyle
return self._create_plot(method="errorbar", **kwargs)
def interact_bls(
self,
notebook_url="localhost:8888",
minimum_period=None,
maximum_period=None,
resolution=2000,
):
"""Display an interactive Jupyter Notebook widget to find planets.
The Box Least Squares (BLS) periodogram is a statistical tool used
for detecting transiting exoplanets and eclipsing binaries in
light curves. This method will display a Jupyter Notebook Widget
which enables the BLS algorithm to be used interactively.
Behind the scenes, the widget uses the AstroPy implementation of BLS [1]_.
This feature only works inside an active Jupyter Notebook.
It requires Bokeh v1.0 (or later). An error message will be shown
if these dependencies are not available.
Parameters
----------
notebook_url: str
Location of the Jupyter notebook page (default: "localhost:8888")
When showing Bokeh applications, the Bokeh server must be
explicitly configured to allow connections originating from
different URLs. This parameter defaults to the standard notebook
host and port. If you are running on a different location, you
will need to supply this value for the application to display
properly. If no protocol is supplied in the URL, e.g. if it is
of the form "localhost:8888", then "http" will be used.
minimum_period : float or None
Minimum period to assess the BLS to. If None, default value of 0.3 days
will be used.
maximum_period : float or None
Maximum period to evaluate the BLS to. If None, the time coverage of the
lightcurve / 2 will be used.
resolution : int
Number of points to use in the BLS panel. Lower this value for faster
but less accurate performance. You can also vary this value using the
widget's Resolution Slider.
Examples
--------
Load the light curve for Kepler-10, remove long-term trends, and
display the BLS tool as follows:
>>> import lightkurve as lk
>>> lc = lk.search_lightcurve('kepler-10', quarter=3).download() # doctest: +SKIP
>>> lc = lc.normalize().flatten() # doctest: +SKIP
>>> lc.interact_bls() # doctest: +SKIP
References
----------
.. [1] https://docs.astropy.org/en/stable/timeseries/bls.html
"""
from .interact_bls import show_interact_widget
return show_interact_widget(
self,
notebook_url=notebook_url,
minimum_period=minimum_period,
maximum_period=maximum_period,
resolution=resolution,
)
def to_table(self) -> Table:
return Table(self)
@deprecated(
"2.0",
message="`to_timeseries()` has been deprecated. `LightCurve` is a "
"sub-class of Astropy TimeSeries as of Lightkurve v2.0 "
"and no longer needs to be converted.",
warning_type=LightkurveDeprecationWarning,
)
def to_timeseries(self):
return self
@staticmethod
def from_timeseries(ts):
"""Creates a new `LightCurve` from an AstroPy
`~astropy.timeseries.TimeSeries` object.
Parameters
----------
ts : `~astropy.timeseries.TimeSeries`
The AstroPy TimeSeries object. The object must contain columns
named 'time', 'flux', and 'flux_err'.
"""
return LightCurve(
time=ts["time"].value, flux=ts["flux"], flux_err=ts["flux_err"]
)
def to_stingray(self):
"""Returns a `stingray.Lightcurve` object.
This feature requires `Stingray <https://stingraysoftware.github.io/>`_
to be installed (e.g. ``pip install stingray``). An `ImportError` will
be raised if this package is not available.
Returns
-------
lightcurve : `stingray.Lightcurve`
An stingray Lightcurve object.
"""
try:
from stingray import Lightcurve as StingrayLightcurve
except ImportError:
raise ImportError(
"You need to install Stingray to use "
"the LightCurve.to_stringray() method."
)
return StingrayLightcurve(
time=self.time.value,
counts=self.flux,
err=self.flux_err,
input_counts=False,
)
@staticmethod
def from_stingray(lc):
"""Create a new `LightCurve` from a `stingray.Lightcurve`.
Parameters
----------
lc : `stingray.Lightcurve`
A stingray Lightcurve object.
"""
return LightCurve(time=lc.time, flux=lc.counts, flux_err=lc.counts_err)
def to_csv(self, path_or_buf=None, **kwargs):
"""Writes the light curve to a CSV file.
This method will convert the light curve into the Comma-Separated Values
(CSV) text format. By default this method will return the result as a
string, but you can also write the string directly to disk by providing
a file name or handle via the `path_or_buf` parameter.
Parameters
----------
path_or_buf : string or file handle
File path or object. By default, the result is returned as a string.
**kwargs : dict
Dictionary of arguments to be passed to `TimeSeries.write()`.
Returns
-------
csv : str or None
Returns a csv-formatted string if ``path_or_buf=None``.
Returns `None` otherwise.
"""
use_stringio = False
if path_or_buf is None:
use_stringio = True
from io import StringIO
path_or_buf = StringIO()
result = self.write(path_or_buf, format="ascii.csv", **kwargs)
if use_stringio:
return path_or_buf.getvalue()
return result
def to_pandas(self, **kwargs):
"""Converts the light curve to a Pandas `~pandas.DataFrame` object.
The data frame will be indexed by `time` using values corresponding
to the light curve's time format. This is different from the
default behavior of `Table.to_pandas()` in AstroPy, which converts
time values into ISO timestamps.
Returns
-------
dataframe : `pandas.DataFrame`
A data frame indexed by `time`.
"""
df = super().to_pandas(**kwargs)
# Default AstroPy behavior is to change the time column into ``np.datetime64``
# We override it here because it confuses Kepler/TESS users who are used
# to working in BTJD and BKJD rather than ISO timestamps.
df.index = self.time.value
df.index.name = "time"
return df
def to_excel(self, path_or_buf, **kwargs) -> None:
"""Shorthand for `to_pandas().to_excel()`.
Parameters
----------
path_or_buf : string or file handle
File path or object.
**kwargs : dict
Dictionary of arguments to be passed to `to_pandas().to_excel(**kwargs)`.
"""
try:
import openpyxl # optional dependency
except ModuleNotFoundError:
raise ModuleNotFoundError(
"You need to install `openpyxl` to use this feature, e.g. use `pip install openpyxl`."
)
self.to_pandas().to_excel(path_or_buf, **kwargs)
def to_periodogram(self, method="lombscargle", **kwargs):
"""Converts the light curve to a `~lightkurve.periodogram.Periodogram`
power spectrum object.
This method will call either
`LombScarglePeriodogram.from_lightcurve() <lightkurve.periodogram.LombScarglePeriodogram.from_lightcurve>` or
`BoxLeastSquaresPeriodogram.from_lightcurve() <lightkurve.periodogram.BoxLeastSquaresPeriodogram.from_lightcurve>`,
which in turn wrap `astropy`'s `~astropy.timeseries.LombScargle` and `~astropy.timeseries.BoxLeastSquares`.
Optional keywords accepted if ``method='lombscargle'`` are:
``minimum_frequency``, ``maximum_frequency``, ``mininum_period``,
``maximum_period``, ``frequency``, ``period``, ``nterms``,
``nyquist_factor``, ``oversample_factor``, ``freq_unit``,
``normalization``, ``ls_method``.
Optional keywords accepted if ``method='bls'`` are
``minimum_period``, ``maximum_period``, ``period``,
``frequency_factor``, ``duration``.
Parameters
----------
method : {'lombscargle', 'boxleastsquares', 'ls', 'bls'}
Use the Lomb Scargle or Box Least Squares (BLS) method to
extract the power spectrum. Defaults to ``'lombscargle'``.
``'ls'`` and ``'bls'`` are shorthands for ``'lombscargle'``
and ``'boxleastsquares'``.
kwargs : dict
Keyword arguments passed to either
`LombScarglePeriodogram <lightkurve.periodogram.LombScarglePeriodogram.from_lightcurve>` or
`BoxLeastSquaresPeriodogram <lightkurve.periodogram.BoxLeastSquaresPeriodogram.from_lightcurve>`.
Returns
-------
Periodogram : `~lightkurve.periodogram.Periodogram` object
The power spectrum object extracted from the light curve.
"""
supported_methods = ["ls", "bls", "lombscargle", "boxleastsquares"]
method = validate_method(method.replace(" ", ""), supported_methods)
if method in ["bls", "boxleastsquares"]:
from .periodogram import BoxLeastSquaresPeriodogram
return BoxLeastSquaresPeriodogram.from_lightcurve(lc=self, **kwargs)
else:
from .periodogram import LombScarglePeriodogram
return LombScarglePeriodogram.from_lightcurve(lc=self, **kwargs)
def to_seismology(self, **kwargs):
"""Returns a `~lightkurve.seismology.Seismology` object for estimating
quick-look asteroseismic quantities.
All `**kwargs` will be passed to the `to_periodogram()` method.
Returns
-------
seismology : `~lightkurve.seismology.Seismology` object
Object which can be used to estimate quick-look asteroseismic quantities.
"""
from .seismology import Seismology
return Seismology.from_lightcurve(self, **kwargs)
def to_fits(
self, path=None, overwrite=False, flux_column_name="FLUX", **extra_data
):
"""Converts the light curve to a FITS file in the Kepler/TESS file format.
The FITS file will be returned as a `~astropy.io.fits.HDUList` object.
If a `path` is specified then the file will also be written to disk.
Parameters
----------
path : str or None
Location where the FITS file will be written, which is optional.
overwrite : bool
Whether or not to overwrite the file, if `path` is set.
flux_column_name : str
The column name in the FITS file where the light curve flux data
should be stored. Typical values are `FLUX` or `SAP_FLUX`.
extra_data : dict
Extra keywords or columns to include in the FITS file.
Arguments of type str, int, float, or bool will be stored as
keywords in the primary header.
Arguments of type np.array or list will be stored as columns
in the first extension.
Returns
-------
hdu : `~astropy.io.fits.HDUList`
Returns an `~astropy.io.fits.HDUList` object.
"""
typedir = {
int: "J",
str: "A",
float: "D",
bool: "L",
np.int32: "J",
np.int32: "K",
np.float32: "E",
np.float64: "D",
}
def _header_template(extension):
"""Returns a template `fits.Header` object for a given extension."""
template_fn = os.path.join(
PACKAGEDIR, "data", "lc-ext{}-header.txt".format(extension)
)
return fits.Header.fromtextfile(template_fn)
def _make_primary_hdu(extra_data=None):
"""Returns the primary extension (#0)."""
if extra_data is None:
extra_data = {}
hdu = fits.PrimaryHDU()
# Copy the default keywords from a template file from the MAST archive
tmpl = _header_template(0)
for kw in tmpl:
hdu.header[kw] = (tmpl[kw], tmpl.comments[kw])
# Override the defaults where necessary
from . import __version__
default = {
"ORIGIN": "Unofficial data product",
"DATE": datetime.datetime.now().strftime("%Y-%m-%d"),
"CREATOR": "lightkurve.LightCurve.to_fits()",
"PROCVER": str(__version__),
}
for kw in default:
hdu.header["{}".format(kw).upper()] = default[kw]
if default[kw] is None:
log.warning("Value for {} is None.".format(kw))
for kw in extra_data:
if isinstance(extra_data[kw], (str, float, int, bool, type(None))):
hdu.header["{}".format(kw).upper()] = extra_data[kw]
if extra_data[kw] is None:
log.warning("Value for {} is None.".format(kw))
return hdu
def _make_lightcurve_extension(extra_data=None):
"""Create the 'LIGHTCURVE' extension (i.e. extension #1)."""
# Turn the data arrays into fits columns and initialize the HDU
if extra_data is None:
extra_data = {}
cols = []
if ~np.asarray(["TIME" in k.upper() for k in extra_data.keys()]).any():
cols.append(
fits.Column(
name="TIME",
format="D",
unit=self.time.format,
array=self.time.value,
)
)
if ~np.asarray(
[flux_column_name in k.upper() for k in extra_data.keys()]
).any():
cols.append(
fits.Column(
name=flux_column_name, format="E", unit="e-/s", array=self.flux
)
)
if hasattr(self,'flux_err'):
if ~(flux_column_name.upper() + "_ERR" in extra_data.keys()):
cols.append(
fits.Column(
name=flux_column_name.upper() + "_ERR",
format="E",
unit="e-/s",
array=self.flux_err,
)
)
if hasattr(self,'cadenceno'):
if ~np.asarray(
["CADENCENO" in k.upper() for k in extra_data.keys()]
).any():
cols.append(
fits.Column(name="CADENCENO", format="J", array=self.cadenceno)
)
for kw in extra_data:
if isinstance(extra_data[kw], (np.ndarray, list)):
cols.append(
fits.Column(
name="{}".format(kw).upper(),
format=typedir[extra_data[kw].dtype.type],
array=extra_data[kw],
)
)
if "SAP_QUALITY" not in extra_data:
cols.append(
fits.Column(
name="SAP_QUALITY", format="J", array=np.zeros(len(self.flux))
)
)
coldefs = fits.ColDefs(cols)
hdu = fits.BinTableHDU.from_columns(coldefs)
hdu.header["EXTNAME"] = "LIGHTCURVE"
return hdu
def _hdulist(**extra_data):
"""Returns an astropy.io.fits.HDUList object."""
list_out = fits.HDUList(
[
_make_primary_hdu(extra_data=extra_data),
_make_lightcurve_extension(extra_data=extra_data),
]
)
return list_out
hdu = _hdulist(**extra_data)
if path is not None:
hdu.writeto(path, overwrite=overwrite, checksum=True)
return hdu
def to_corrector(self, method="sff", **kwargs):
"""Returns a corrector object to remove instrument systematics.
Parameters
----------
methods : string
Currently, "sff" and "cbv" are supported. This will return a
`~correctors.SFFCorrector` and `~correctors.CBVCorrector`
class instance respectively.
**kwargs : dict
Extra keyword arguments to be passed to the corrector class.
Returns
-------
correcter : `~correctors.corrector.Corrector`
Instance of a Corrector class, which typically provides
`~correctors.corrector.Corrector.correct()`
and `~correctors.corrector.Corrector.diagnose()` methods.
"""
if method == "pld":
raise ValueError(
"The 'pld' method can only be used on "
"`TargetPixelFile` objects, not `LightCurve` objects."
)
method = validate_method(method, supported_methods=["sff", "cbv"])
if method == "sff":
from .correctors import SFFCorrector
return SFFCorrector(self, **kwargs)
elif method == "cbv":
from .correctors import CBVCorrector
return CBVCorrector(self, **kwargs)
@deprecated_renamed_argument(
"t0", "epoch_time", "2.0", warning_type=LightkurveDeprecationWarning
)
def plot_river(
self,
period,
epoch_time=None,
ax=None,
bin_points=1,
minimum_phase=-0.5,
maximum_phase=0.5,
method="mean",
**kwargs,
) -> matplotlib.axes.Axes:
"""Plot the light curve as a river plot.
A river plot uses colors to represent the light curve values in
chronological order, relative to the period of an interesting signal.
Each row in the plot represents a full period cycle, and each column
represents a fixed phase. This type of plot is often used to visualize
Transit Timing Variations (TTVs) in the light curves of exoplanets, but
it can be used to visualize periodic signals of any origin.
All extra keywords supplied are passed on to Matplotlib's
`~matplotlib.pyplot.pcolormesh` function.
Parameters
----------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
period: float
Period at which to fold the light curve
epoch_time : float
Phase mid point for plotting. Defaults to the first time value.
bin_points : int
How many points should be in each bin.
minimum_phase : float
The minimum phase to plot.
maximum_phase : float
The maximum phase to plot.
method : str
The river method. Choose from `'mean'` or `'median'` or `'sigma'`.
If `'mean'` or `'median'`, the plot will display the average value in each bin.
If `'sigma'`, the plot will display the average in the bin divided by
the error in each bin, in order to show the data in terms of standard
deviation.
kwargs : dict
Dictionary of arguments to be passed on to Matplotlib's
`~matplotlib.pyplot.pcolormesh` function.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
if hasattr(self, "time_original"): # folded light curve
time = self.time_original
else:
time = self.time
# epoch_time defaults to the first time value
if epoch_time is None:
epoch_time = time[0]
# Lightkurve v1.x assumed that `period` was given in days if no unit
# was specified. We maintain this behavior for backwards-compatibility.
if period is not None and not isinstance(period, Quantity):
period *= u.day
if epoch_time is not None and not isinstance(epoch_time, (Time, Quantity)):
epoch_time = Time(epoch_time, format=time.format, scale=time.scale)
method = validate_method(method, supported_methods=["mean", "median", "sigma"])
if (bin_points == 1) and (method in ["mean", "median"]):
bin_func = lambda y, e: (y[0], e[0])
elif (bin_points == 1) and (method in ["sigma"]):
bin_func = lambda y, e: ((y[0] - 1) / e[0], np.nan)
elif method == "mean":
bin_func = lambda y, e: (np.nanmean(y), np.nansum(e ** 2) ** 0.5 / len(e))
elif method == "median":
bin_func = lambda y, e: (np.nanmedian(y), np.nansum(e ** 2) ** 0.5 / len(e))
elif method == "sigma":
bin_func = lambda y, e: (
(np.nanmean(y) - 1) / (np.nansum(e ** 2) ** 0.5 / len(e)),
np.nan,
)
s = np.argsort(time.value)
x, y, e = time.value[s], self.flux[s], self.flux_err[s]
med = np.nanmedian(self.flux)
e /= med
y /= med
# Here `ph` is the phase of each time point x
# cyc is the number of cycles that have occured at each time point x
# since the phase 0 before x[0]
n = int(
period.value
/ np.nanmedian(np.diff(x))
* (maximum_phase - minimum_phase)
/ bin_points
)
if n == 1:
bin_points = int(maximum_phase - minimum_phase) / (
2 / int(period.value / np.nanmedian(np.diff(x)))
)
warnings.warn(
"`bin_points` is too high to plot a phase curve, resetting to {}".format(
bin_points
),
LightkurveWarning,
)
n = 2
ph = x / period.value % 1
cyc = np.asarray((x - x % period.value) / period.value, int)
cyc -= np.min(cyc)
phase = (epoch_time.value % period.value) / period.value
ph = ((x - (phase * period.value)) / period.value) % 1
cyc = np.asarray(
(x - ((x - phase * period.value) % period.value)) / period.value, int
)
cyc -= np.min(cyc)
ph[ph > 0.5] -= 1
ar = np.empty((n, np.max(cyc) + 1))
ar[:] = np.nan
bs = np.linspace(minimum_phase, maximum_phase, n + 1)
cycs = np.arange(0, np.max(cyc) + 2)
ph_masks = [(ph > bs[jdx]) & (ph <= bs[jdx + 1]) for jdx in range(n)]
qual_mask = np.isfinite(y)
for cyc1 in np.unique(cyc):
cyc_mask = cyc == cyc1
if not np.any(cyc_mask):
continue
for jdx, ph_mask in enumerate(ph_masks):
if not np.any(cyc_mask & ph_mask & qual_mask):
ar[jdx, cyc1] = np.nan
else:
ar[jdx, cyc1] = bin_func(
y[cyc_mask & ph_mask], e[cyc_mask & ph_mask]
)[0]
# If the method is average we need to denormalize the plot
if method in ["mean", "median"]:
median = np.nanmedian(self.flux.value)
if hasattr(median, 'mask'):
median = median.filled(np.nan)
ar *= median
d = np.max(
[
np.abs(np.nanmedian(ar) - np.nanpercentile(ar, 5)),
np.abs(np.nanmedian(ar) - np.nanpercentile(ar, 95)),
]
)
vmin = kwargs.pop("vmin", np.nanmedian(ar) - d)
vmax = kwargs.pop("vmax", np.nanmedian(ar) + d)
if method in ["mean", "median"]:
cmap = kwargs.pop("cmap", "viridis")
elif method == "sigma":
cmap = kwargs.pop("cmap", "coolwarm")
with plt.style.context(MPLSTYLE):
if ax is None:
_, ax = plt.subplots(figsize=(12, cyc.max() * 0.1))
im = ax.pcolormesh(
bs, cycs, ar.T, vmin=vmin, vmax=vmax, cmap=cmap, **kwargs
)
cbar = plt.colorbar(im, ax=ax)
if method in ["mean", "median"]:
unit = "[Normalized Flux]"
if self.flux.unit is not None:
if self.flux.unit != u.dimensionless_unscaled:
unit = "[{}]".format(self.flux.unit.to_string("latex"))
if bin_points == 1:
cbar.set_label("Flux {}".format(unit))
else:
cbar.set_label("Average Flux in Bin {}".format(unit))
elif method == "sigma":
if bin_points == 1:
cbar.set_label(
"Flux in units of Standard Deviation "
r"$(f - \overline{f})/(\sigma_f)$"
)
else:
cbar.set_label(
"Average Flux in Bin in units of Standard Deviation "
r"$(f - \overline{f})/(\sigma_f)$"
)
ax.set_xlabel("Phase")
ax.set_ylabel("Cycle")
ax.set_ylim(cyc.max(), 0)
ax.set_title(self.meta.get("LABEL"))
a = cyc.max() * 0.1 / 12.0
b = (cyc.max() - cyc.min()) / (bs.max() - bs.min())
ax.set_aspect(a / b)
return ax
def create_transit_mask(self, period, transit_time, duration):
"""Returns a boolean array that is ``True`` during transits and
``False`` elsewhere.
This method supports multi-planet systems by allowing ``period``,
``transit_time``, and ``duration`` to be array-like lists of parameters.
Parameters
----------
period : `~astropy.units.Quantity`, float, or array-like
Period(s) of the transits.
duration : `~astropy.units.Quantity`, float, or array-like
Duration(s) of the transits.
transit_time : `~astropy.time.Time`, float, or array-like
Transit midpoint(s) of the transits.
Returns
-------
transit_mask : np.array of bool
Mask that flags transits. Mask is ``True`` where there are transits.
Examples
--------
You can create a transit mask for a single-planet system as follows::
>>> import lightkurve as lk
>>> lc = lk.LightCurve({'time': [1, 2, 3, 4, 5], 'flux': [1, 1, 1, 1, 1]})
>>> lc.create_transit_mask(transit_time=2., period=2., duration=0.1)
array([False, True, False, True, False])
The method accepts lists of parameters to support multi-planet systems::
>>> lc.create_transit_mask(transit_time=[2., 3.], period=[2., 10.], duration=[0.1, 0.1])
array([False, True, True, True, False])
"""
# Convert Quantity objects to floats in units "day"
period = _to_unitless_day(period)
duration = _to_unitless_day(duration)
# If ``transit_time`` is a ``Quantity```, attempt converting it to a ``Time`` object
if isinstance(transit_time, Quantity):
transit_time = Time(transit_time, format=self.time.format, scale=self.time.scale)
# Ensure all parameters are 1D-arrays
period = np.atleast_1d(period)
duration = np.atleast_1d(duration)
transit_time = np.atleast_1d(transit_time)
# Make sure all params have the same number of entries
n_planets = len(period)
if any(len(param) != n_planets for param in [duration, transit_time]):
raise ValueError(
"period, duration, and transit_time must have "
"the same number of values."
)
# Initialize an empty cadence mask
in_transit = np.empty(len(self), dtype=bool)
in_transit[:] = False
# Create the transit mask
for per, dur, tt in zip(period, duration, transit_time):
if isinstance(tt, Time):
# If a `Time` is passed, ensure it has the right format & scale
tt = Time(tt, format=self.time.format, scale=self.time.scale).value
hp = per / 2.0
in_transit |= np.abs((self.time.value - tt + hp) % per - hp) < 0.5 * dur
return in_transit
def search_neighbors(
self, limit: int = 10, radius: float = 3600.0, **search_criteria
):
"""Search the data archive at MAST for the most nearby light curves.
By default, the 10 nearest neighbors located within 3600 arcseconds
are returned. You can override these defaults by changing the `limit`
and `radius` parameters.
If the LightCurve object is a Kepler, K2, or TESS light curve,
the default behavior of this method is to only return light curves
obtained during the exact same quarter, campaign, or sector.
This is useful to enable coeval light curves to be inspected for
spurious noise signals in common between multiple neighboring targets.
You can override this default behavior by passing a `mission`,
`quarter`, `campaign`, or `sector` argument yourself.
Please refer to the docstring of `search_lightcurve` for a complete
list of search parameters accepted.
Parameters
----------
limit : int
Maximum number of results to return.
radius : float or `astropy.units.Quantity` object
Conesearch radius. If a float is given it will be assumed to be in
units of arcseconds.
**search_criteria : kwargs
Extra criteria to be passed to `search_lightcurve`.
Returns
-------
result : :class:`SearchResult` object
Object detailing the neighbor light curves found, sorted by
distance from the current light curve.
"""
# Local import to avoid circular dependency
from .search import search_lightcurve
# By default, only return results from the same sector/quarter/campaign
if (
"mission" not in search_criteria
and "sector" not in search_criteria
and "quarter" not in search_criteria
and "campaign" not in search_criteria
):
mission = self.meta.get("MISSION", None)
if mission == "TESS":
search_criteria["sector"] = self.sector
elif mission == "Kepler":
search_criteria["quarter"] = self.quarter
elif mission == "K2":
search_criteria["campaign"] = self.campaign
# Note: we increase `limit` by one below to account for the fact that the
# current light curve will be returned by the search operation
log.info(
f"Started searching for up to {limit} neighbors within {radius} arcseconds."
)
result = search_lightcurve(
f"{self.ra} {self.dec}", radius=radius, limit=limit + 1, **search_criteria
)
# Filter by distance > 0 to avoid returning the current light curve
result = result[result.distance > 0]
log.info(f"Found {len(result)} neighbors.")
return result
def head(self, n: int = 5):
"""Return the first n rows.
Parameters
----------
n : int
Number of rows to return.
Returns
-------
lc : LightCurve
Light curve containing the first n rows.
"""
return self[:n]
def tail(self, n: int = 5):
"""Return the last n rows.
Parameters
----------
n : int
Number of rows to return.
Returns
-------
lc : LightCurve
Light curve containing the last n rows.
"""
return self[-n:]
def truncate(self, before: float = None, after: float = None, column: str = "time"):
"""Truncates the light curve before and after some time value.
Parameters
----------
before : float
Truncate all rows before this time value.
after : float
Truncate all rows after this time value.
column : str, optional
The name of the column on which the truncation is based. Defaults to 'time'.
Returns
-------
truncated_lc : LightCurve
The truncated light curve.
"""
def _to_unitless(data):
return np.asarray(getattr(data, "value", data))
mask = np.ones(len(self), dtype=bool)
if before:
mask &= _to_unitless(getattr(self, column)) >= before
if after:
mask &= _to_unitless(getattr(self, column)) <= after
return self[mask]
class FoldedLightCurve(LightCurve):
"""Subclass of `LightCurve` in which the ``time`` parameter represents phase values.
Compared to the `~lightkurve.lightcurve.LightCurve` base class, this class
has extra meta data entries (``period``, ``epoch_time``, ``epoch_phase``,
``wrap_phase``, ``normalize_phase``), an extra column (``time_original``),
extra properties (``phase``, ``odd_mask``, ``even_mask``),
and implements different plotting defaults.
"""
@property
def phase(self):
"""Alias for `LightCurve.time`."""
return self.time
@property
def cycle(self):
"""The cycle of the correspond `time_original`.
The first cycle is cycle 0, irrespective of whether it is a complete one or not.
"""
cycle_epoch_start = self.epoch_time - self.period / 2
result = np.asarray(np.floor(((self.time_original - cycle_epoch_start) / self.period).value), dtype=int)
result = result - result.min()
return result
@property
def odd_mask(self):
"""Boolean mask which flags the odd-numbered cycles (1, 3, 5, etc).
This is useful for studying every second occurence of a signal.
For example, in exoplanet searches, comparisons of odd and even transits
can help confirm the planetary nature of a signal. Differences in the
depth, duration, or shape of the odd- and even-numbered transits would
indicate that the 'transits' are being caused by a near-equal mass
eclipsing background binary, rather than a true transiting exoplanet.
Examples
--------
You can can visualize the odd- and even-centered transits separately as
follows:
>>> f = lc.fold(...) # doctest: +SKIP
>>> f[f.odd_mask].scatter() # doctest: +SKIP
>>> f[f.even_mask].scatter() # doctest: +SKIP
"""
return self.cycle % 2 == 1
@property
def even_mask(self):
"""Boolean mask which flags the even-numbered cycles (2, 4, 6, etc).
See the documentation of `odd_mask` for examples.
"""
return ~self.odd_mask
def _set_xlabel(self, kwargs):
"""Helper function for plot, scatter, and errorbar.
Ensures the xlabel is correctly set for folded light curves.
"""
if "xlabel" not in kwargs:
kwargs["xlabel"] = "Phase"
if isinstance(self.time, TimeDelta):
kwargs["xlabel"] += f" [{self.time.format.upper()}]"
return kwargs
def plot(self, **kwargs):
"""Plot the folded light curve using matplotlib's
`~matplotlib.pyplot.plot` method.
See `LightCurve.plot` for details on the accepted arguments.
Parameters
----------
kwargs : dict
Dictionary of arguments to be passed to `LightCurve.plot`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
kwargs = self._set_xlabel(kwargs)
return super(FoldedLightCurve, self).plot(**kwargs)
def scatter(self, **kwargs):
"""Plot the folded light curve using matplotlib's `~matplotlib.pyplot.scatter` method.
See `LightCurve.scatter` for details on the accepted arguments.
Parameters
----------
kwargs : dict
Dictionary of arguments to be passed to `LightCurve.scatter`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
kwargs = self._set_xlabel(kwargs)
return super(FoldedLightCurve, self).scatter(**kwargs)
def errorbar(self, **kwargs):
"""Plot the folded light curve using matplotlib's
`~matplotlib.pyplot.errorbar` method.
See `LightCurve.scatter` for details on the accepted arguments.
Parameters
----------
kwargs : dict
Dictionary of arguments to be passed to `LightCurve.scatter`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
kwargs = self._set_xlabel(kwargs)
return super(FoldedLightCurve, self).errorbar(**kwargs)
def plot_river(self, **kwargs):
"""Plot the folded light curve in a river style.
See `~LightCurve.plot_river` for details on the accepted arguments.
Parameters
----------
kwargs : dict
Dictionary of arguments to be passed to `~LightCurve.plot_river`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
ax = super(FoldedLightCurve, self).plot_river(
period=self.period, epoch_time=self.epoch_time, **kwargs
)
return ax
class KeplerLightCurve(LightCurve):
"""Subclass of :class:`LightCurve <lightkurve.lightcurve.LightCurve>`
to represent data from NASA's Kepler and K2 mission."""
_deprecated_keywords = (
"targetid",
"label",
"time_format",
"time_scale",
"flux_unit",
"quality_bitmask",
"channel",
"campaign",
"quarter",
"mission",
"ra",
"dec",
)
_default_time_format = "bkjd"
@classmethod
def read(cls, *args, **kwargs):
"""Returns a `KeplerLightCurve` by reading the given file.
Parameters
----------
filename : str
Local path or remote url of a Kepler light curve FITS file.
flux_column : str, optional
The column in the FITS file to be read as `flux`. Defaults to 'pdcsap_flux'.
Typically 'pdcsap_flux' or 'sap_flux'.
quality_bitmask : str or int, optional
Bitmask (integer) which identifies the quality flag bitmask that should
be used to mask out bad cadences. If a string is passed, it has the
following meaning:
* "none": no cadences will be ignored
* "default": cadences with severe quality issues will be ignored
* "hard": more conservative choice of flags to ignore
This is known to remove good data.
* "hardest": removes all data that has been flagged
This mask is not recommended.
See the :class:`KeplerQualityFlags <lightkurve.utils.KeplerQualityFlags>` class for details on the bitmasks.
format : str, optional
The format of the Kepler FITS file. Should be one of 'kepler', 'k2sff', 'everest'. Defaults to 'kepler'.
"""
# Default to Kepler file format
if kwargs.get("format") is None:
kwargs["format"] = "kepler"
return super().read(*args, **kwargs)
def to_fits(
self,
path=None,
overwrite=False,
flux_column_name="FLUX",
aperture_mask=None,
**extra_data,
):
"""Writes the KeplerLightCurve to a FITS file.
Parameters
----------
path : string, default None
File path, if `None` returns an astropy.io.fits.HDUList object.
overwrite : bool
Whether or not to overwrite the file
flux_column_name : str
The name of the label for the FITS extension, e.g. SAP_FLUX or FLUX
aperture_mask : array-like
Optional 2D aperture mask to save with this lightcurve object, if
defined. The mask can be either a boolean mask or an integer mask
mimicking the Kepler/TESS convention; boolean masks are
automatically converted to the Kepler/TESS conventions
extra_data : dict
Extra keywords or columns to include in the FITS file.
Arguments of type str, int, float, or bool will be stored as
keywords in the primary header.
Arguments of type np.array or list will be stored as columns
in the first extension.
Returns
-------
hdu : astropy.io.fits
Returns an astropy.io.fits object if path is None
"""
kepler_specific_data = {
"TELESCOP": "KEPLER",
"INSTRUME": "Kepler Photometer",
"OBJECT": "{}".format(self.targetid),
"KEPLERID": self.targetid,
"CHANNEL": self.channel,
"MISSION": self.mission,
"RA_OBJ": self.ra,
"DEC_OBJ": self.dec,
"EQUINOX": 2000,
"DATE-OBS": Time(self.time[0] + 2454833.0, format=("jd")).isot,
"SAP_QUALITY": self.quality,
"MOM_CENTR1": self.centroid_col,
"MOM_CENTR2": self.centroid_row,
}
for kw in kepler_specific_data:
if ~np.asarray([kw.lower == k.lower() for k in extra_data]).any():
extra_data[kw] = kepler_specific_data[kw]
hdu = super(KeplerLightCurve, self).to_fits(
path=None, overwrite=overwrite, **extra_data
)
hdu[0].header["QUARTER"] = self.meta.get("QUARTER")
hdu[0].header["CAMPAIGN"] = self.meta.get("CAMPAIGN")
hdu = _make_aperture_extension(hdu, aperture_mask)
if path is not None:
hdu.writeto(path, overwrite=overwrite, checksum=True)
else:
return hdu
class TessLightCurve(LightCurve):
"""Subclass of :class:`LightCurve <lightkurve.lightcurve.LightCurve>`
to represent data from NASA's TESS mission."""
_deprecated_keywords = (
"targetid",
"label",
"time_format",
"time_scale",
"flux_unit",
"quality_bitmask",
"sector",
"camera",
"ccd",
"mission",
"ra",
"dec",
)
_default_time_format = "btjd"
@classmethod
def read(cls, *args, **kwargs):
"""Returns a `TessLightCurve` by reading the given file.
Parameters
----------
filename : str
Local path or remote url of a TESS light curve FITS file.
flux_column : str, optional
The column in the FITS file to be read as `flux`. Defaults to 'pdcsap_flux'.
Typically 'pdcsap_flux' or 'sap_flux'.
quality_bitmask : str or int, optional
Bitmask (integer) which identifies the quality flag bitmask that should
be used to mask out bad cadences. If a string is passed, it has the
following meaning:
* "none": no cadences will be ignored
* "default": cadences with severe quality issues will be ignored
* "hard": more conservative choice of flags to ignore
This is known to remove good data.
* "hardest": removes all data that has been flagged
This mask is not recommended.
See the :class:`TessQualityFlags <lightkurve.utils.TessQualityFlags>` class for details on the bitmasks.
"""
# Default to TESS file format
if kwargs.get("format") is None:
kwargs["format"] = "tess"
return super().read(*args, **kwargs)
def to_fits(
self,
path=None,
overwrite=False,
flux_column_name="FLUX",
aperture_mask=None,
**extra_data,
):
"""Writes the KeplerLightCurve to a FITS file.
Parameters
----------
path : string, default None
File path, if `None` returns an astropy.io.fits.HDUList object.
overwrite : bool
Whether or not to overwrite the file
flux_column_name : str
The name of the label for the FITS extension, e.g. SAP_FLUX or FLUX
aperture_mask : array-like
Optional 2D aperture mask to save with this lightcurve object, if
defined. The mask can be either a boolean mask or an integer mask
mimicking the Kepler/TESS convention; boolean masks are
automatically converted to the Kepler/TESS conventions
extra_data : dict
Extra keywords or columns to include in the FITS file.
Arguments of type str, int, float, or bool will be stored as
keywords in the primary header.
Arguments of type np.array or list will be stored as columns
in the first extension.
Returns
-------
hdu : astropy.io.fits
Returns an astropy.io.fits object if path is None
"""
tess_specific_data = {
"OBJECT": "{}".format(self.targetid),
"MISSION": self.meta.get("MISSION"),
"RA_OBJ": self.meta.get("RA"),
"TELESCOP": self.meta.get("MISSION"),
"CAMERA": self.meta.get("CAMERA"),
"CCD": self.meta.get("CCD"),
"SECTOR": self.meta.get("SECTOR"),
"TARGETID": self.meta.get("TARGETID"),
"DEC_OBJ": self.meta.get("DEC"),
"MOM_CENTR1": self.centroid_col,
"MOM_CENTR2": self.centroid_row,
}
for kw in tess_specific_data:
if ~np.asarray([kw.lower == k.lower() for k in extra_data]).any():
extra_data[kw] = tess_specific_data[kw]
hdu = super(TessLightCurve, self).to_fits(
path=None, overwrite=overwrite, **extra_data
)
# We do this because the TESS file format is subtly different in the
# name of this column.
hdu[1].columns.change_name("SAP_QUALITY", "QUALITY")
hdu = _make_aperture_extension(hdu, aperture_mask)
if path is not None:
hdu.writeto(path, overwrite=overwrite, checksum=True)
else:
return hdu
# Helper functions
def _boolean_mask_to_bitmask(aperture_mask):
"""Takes in an aperture_mask and returns a Kepler-style bitmask
Parameters
----------
aperture_mask : array-like
2D aperture mask. The mask can be either a boolean mask or an integer
mask mimicking the Kepler/TESS convention; boolean or boolean-like masks
are converted to the Kepler/TESS conventions. Kepler bitmasks are
returned unchanged except for possible datatype conversion.
Returns
-------
bitmask : numpy uint8 array
A bitmask incompletely mimicking the Kepler/TESS convention: Bit 2,
value = 3, means "pixel was part of the custom aperture". The other
bits have no meaning and are currently assigned a value of 1.
"""
# Masks can either be boolean input or Kepler pipeline style
clean_mask = np.nan_to_num(aperture_mask)
contains_bit2 = (clean_mask.astype(np.int_) & 2).any()
all_zeros_or_ones = (clean_mask.dtype in ["float", "int"]) & (
(set(np.unique(clean_mask)) - {0, 1}) == set()
)
is_bool_mask = (aperture_mask.dtype == "bool") | all_zeros_or_ones
if is_bool_mask:
out_mask = np.ones(aperture_mask.shape, dtype=np.uint8)
out_mask[aperture_mask == 1] = 3
out_mask = out_mask.astype(np.uint8)
elif contains_bit2:
out_mask = aperture_mask.astype(np.uint8)
else:
log.warn(
"The input aperture mask must be boolean or follow the "
"Kepler-pipeline standard; returning None."
)
out_mask = None
return out_mask
def _make_aperture_extension(hdu_list, aperture_mask):
"""Returns an `ImageHDU` object containing the 'APERTURE' extension
of a light curve file."""
if aperture_mask is not None:
bitmask = _boolean_mask_to_bitmask(aperture_mask)
hdu = fits.ImageHDU(bitmask)
hdu.header["EXTNAME"] = "APERTURE"
hdu_list.append(hdu)
return hdu_list
| 138,768
| 39.754479
| 140
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/units.py
|
"""Define custom AstroPy units commonly used by the Kepler/TESS community."""
from astropy import units as u
__all__ = ["ppt", "ppm"]
ppt = u.def_unit(["ppt", "parts per thousand"], u.Unit(1e-3))
ppm = u.def_unit(["ppm", "parts per million"], u.Unit(1e-6))
u.add_enabled_units([ppt, ppm])
| 291
| 31.444444
| 77
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/interact.py
|
"""Provides tools for interactive visualizations.
Example use
-----------
The functions in this module are used to create Bokeh-based visualization
widgets. For example, the following code will create an interactive
visualization widget showing the pixel data and a lightcurve::
# SN 2018 oh Supernova example
from lightkurve import KeplerTargetPixelFile
tpf = KeplerTargetPixelFile.from_archive(228682548)
tpf.interact()
Note that this will only work inside a Jupyter notebook at this time.
"""
from __future__ import division, print_function
import os
import logging
import warnings
import numpy as np
from astropy.coordinates import SkyCoord, Angle
from astropy.io import ascii
from astropy.stats import sigma_clip
from astropy.time import Time
import astropy.units as u
from astropy.utils.exceptions import AstropyUserWarning
import pandas as pd
from pandas import Series
from .utils import KeplerQualityFlags, LightkurveWarning, LightkurveError
log = logging.getLogger(__name__)
# Import the optional Bokeh dependency, or print a friendly error otherwise.
try:
import bokeh # Import bokeh first so we get an ImportError we can catch
from bokeh.io import show, output_notebook, push_notebook
from bokeh.plotting import figure, ColumnDataSource
from bokeh.models import (
LogColorMapper,
Slider,
RangeSlider,
Span,
ColorBar,
LogTicker,
Range1d,
LinearColorMapper,
BasicTicker,
Arrow,
VeeHead,
)
from bokeh.layouts import layout, Spacer
from bokeh.models.tools import HoverTool
from bokeh.models.widgets import Button, Div
from bokeh.models.formatters import PrintfTickFormatter
except ImportError:
# We will print a nice error message in the `show_interact_widget` function
pass
def _search_nearby_of_tess_target(tic_id):
# To avoid warnings / overflow error in attempting to convert GAIA DR2, TIC ID, TOI
# as int32 (the default) in some cases
return ascii.read(f"https://exofop.ipac.caltech.edu/tess/download_nearbytarget.php?id={tic_id}&output=csv",
format="csv",
fast_reader=False,
converters={
"GAIA DR2": [ascii.convert_numpy(str)],
"TIC ID": [ascii.convert_numpy(str)],
"TOI": [ascii.convert_numpy(str)],
})
def _get_tic_meta_of_gaia_in_nearby(tab, nearby_gaia_id, key, default=None):
res = tab[tab['GAIA DR2'] == str(nearby_gaia_id)]
if len(res) > 0:
return res[0][key]
else:
return default
def _correct_with_proper_motion(ra, dec, pm_ra, pm_dec, equinox, new_time):
"""Return proper-motion corrected RA / Dec.
It also return whether proper motion correction is applied or not."""
# all parameters have units
if ra is None or dec is None or \
pm_ra is None or pm_dec is None or (np.all(pm_ra == 0) and np.all(pm_dec == 0)) or \
equinox is None:
return ra, dec, False
# To be more accurate, we should have supplied distance to SkyCoord
# in theory, for Gaia DR2 data, we can infer the distance from the parallax provided.
# It is not done for 2 reasons:
# 1. Gaia DR2 data has negative parallax values occasionally. Correctly handling them could be tricky. See:
# https://www.cosmos.esa.int/documents/29201/1773953/Gaia+DR2+primer+version+1.3.pdf/a4459741-6732-7a98-1406-a1bea243df79
# 2. For our purpose (ploting in various interact usage) here, the added distance does not making
# noticeable significant difference. E.g., applying it to Proxima Cen, a target with large parallax
# and huge proper motion, does not change the result in any noticeable way.
#
c = SkyCoord(ra, dec, pm_ra_cosdec=pm_ra, pm_dec=pm_dec,
frame='icrs', obstime=equinox)
# Suppress ErfaWarning temporarily as a workaround for:
# https://github.com/astropy/astropy/issues/11747
with warnings.catch_warnings():
# the same warning appears both as an ErfaWarning and a astropy warning
# so we filter by the message instead
warnings.filterwarnings("ignore", message="ERFA function")
new_c = c.apply_space_motion(new_obstime=new_time)
return new_c.ra, new_c.dec, True
def _get_corrected_coordinate(tpf_or_lc):
"""Extract coordinate from Kepler/TESS FITS, with proper motion corrected
to the start of observation if proper motion is available."""
h = tpf_or_lc.meta
new_time = tpf_or_lc.time[0]
ra = h.get("RA_OBJ")
dec = h.get("DEC_OBJ")
pm_ra = h.get("PMRA")
pm_dec = h.get("PMDEC")
equinox = h.get("EQUINOX")
if ra is None or dec is None or pm_ra is None or pm_dec is None or equinox is None:
# case cannot apply proper motion due to missing parameters
return ra, dec, False
# Note: it'd be better / extensible if the unit is a property of the tpf or lc
if tpf_or_lc.meta.get("TICID") is not None:
pm_unit = u.milliarcsecond / u.year
else: # assumes to be Kepler / K2
pm_unit = u.arcsecond / u.year
ra_corrected, dec_corrected, pm_corrected = _correct_with_proper_motion(
ra * u.deg, dec *u.deg,
pm_ra * pm_unit, pm_dec * pm_unit,
# e.g., equinox 2000 is treated as J2000 is set to be noon of 2000-01-01 TT
Time(equinox, format="decimalyear", scale="tt") + 0.5,
new_time)
return ra_corrected.to(u.deg).value, dec_corrected.to(u.deg).value, pm_corrected
def _to_unitless(items):
"""Convert the values in the item list to unitless one"""
return [getattr(item, "value", item) for item in items]
def prepare_lightcurve_datasource(lc):
"""Prepare a bokeh ColumnDataSource object for tool tips.
Parameters
----------
lc : LightCurve object
The light curve to be shown.
Returns
-------
lc_source : bokeh.plotting.ColumnDataSource
"""
# Convert time into human readable strings, breaks with NaN time
# See https://github.com/lightkurve/lightkurve/issues/116
if (lc.time == lc.time).all():
human_time = lc.time.isot
else:
human_time = [" "] * len(lc.flux)
# Convert binary quality numbers into human readable strings
qual_strings = []
for bitmask in lc.quality:
if isinstance(bitmask, u.Quantity):
bitmask = bitmask.value
flag_str_list = KeplerQualityFlags.decode(bitmask)
if len(flag_str_list) == 0:
qual_strings.append(" ")
if len(flag_str_list) == 1:
qual_strings.append(flag_str_list[0])
if len(flag_str_list) > 1:
qual_strings.append("; ".join(flag_str_list))
lc_source = ColumnDataSource(
data=dict(
time=lc.time.value,
time_iso=human_time,
flux=lc.flux.value,
cadence=lc.cadenceno.value,
quality_code=lc.quality.value,
quality=np.array(qual_strings),
)
)
return lc_source
def aperture_mask_to_selected_indices(aperture_mask):
"""Convert the 2D aperture mask to 1D selection indices, for the use with bokeh ColumnDataSource."""
npix = aperture_mask.size
pixel_index_array = np.arange(0, npix, 1)
return pixel_index_array[aperture_mask.reshape(-1)]
def aperture_mask_from_selected_indices(selected_pixel_indices, tpf):
"""Convert an aperture mask in 1D selection indices back to 2D (in the shape of the given TPF)."""
npix = tpf.flux[0, :, :].size
pixel_index_array = np.arange(0, npix, 1).reshape(tpf.flux[0].shape)
selected_indices = np.array(selected_pixel_indices)
selected_mask_1d = np.isin(pixel_index_array, selected_indices)
return selected_mask_1d.reshape(tpf.flux[0].shape)
def prepare_tpf_datasource(tpf, aperture_mask):
"""Prepare a bokeh DataSource object for selection glyphs
Parameters
----------
tpf : TargetPixelFile
TPF to be shown.
aperture_mask : boolean numpy array
The Aperture mask applied at the startup of interact
Returns
-------
tpf_source : bokeh.plotting.ColumnDataSource
Bokeh object to be shown.
"""
_, ny, nx = tpf.shape
# (xa, ya) pair enumerates all pixels of the tpf
xx = tpf.column + np.arange(nx)
yy = tpf.row + np.arange(ny)
xa, ya = np.meshgrid(xx, yy)
# flatten them, as column data source requires 1d data
xa = xa.flatten()
ya = ya.flatten()
tpf_source = ColumnDataSource(data=dict(xx=xa.astype(float), yy=ya.astype(float)))
# convert the ndarray from aperture_mask_to_selected_indices() to plain list
# because bokeh v3.0.2 does not accept ndarray (and causes js error)
# see https://github.com/bokeh/bokeh/issues/12624
tpf_source.selected.indices = list(aperture_mask_to_selected_indices(aperture_mask))
return tpf_source
def get_lightcurve_y_limits(lc_source):
"""Compute sensible defaults for the Y axis limits of the lightcurve plot.
Parameters
----------
lc_source : bokeh.plotting.ColumnDataSource
The lightcurve being shown.
Returns
-------
ymin, ymax : float, float
Flux min and max limits.
"""
with warnings.catch_warnings(): # Ignore warnings due to NaNs
warnings.simplefilter("ignore", AstropyUserWarning)
flux = sigma_clip(lc_source.data["flux"], sigma=5, masked=False)
low, high = np.nanpercentile(flux, (1, 99))
margin = 0.10 * (high - low)
return low - margin, high + margin
def make_lightcurve_figure_elements(lc, lc_source, ylim_func=None):
"""Make the lightcurve figure elements.
Parameters
----------
lc : LightCurve
Lightcurve to be shown.
lc_source : bokeh.plotting.ColumnDataSource
Bokeh object that enables the visualization.
Returns
----------
fig : `bokeh.plotting.figure` instance
step_renderer : GlyphRenderer
vertical_line : Span
"""
mission = lc.meta.get("MISSION")
if mission == "K2":
title = "Lightcurve for {} (K2 C{})".format(lc.label, lc.campaign)
elif mission == "Kepler":
title = "Lightcurve for {} (Kepler Q{})".format(lc.label, lc.quarter)
elif mission == "TESS":
title = "Lightcurve for {} (TESS Sec. {})".format(lc.label, lc.sector)
else:
title = "Lightcurve for target {}".format(lc.label)
fig = figure(
title=title,
height=340,
width=600,
tools="pan,wheel_zoom,box_zoom,tap,reset",
toolbar_location="below",
border_fill_color="whitesmoke",
)
fig.title.offset = -10
fig.yaxis.axis_label = "Flux (e/s)"
fig.xaxis.axis_label = "Time (days)"
try:
if (lc.mission == "K2") or (lc.mission == "Kepler"):
fig.xaxis.axis_label = "Time - 2454833 (days)"
elif lc.mission == "TESS":
fig.xaxis.axis_label = "Time - 2457000 (days)"
except AttributeError: # no mission keyword available
pass
if ylim_func is None:
ylims = get_lightcurve_y_limits(lc_source)
else:
ylims = _to_unitless(ylim_func(lc))
fig.y_range = Range1d(start=ylims[0], end=ylims[1])
# Add step lines, circles, and hover-over tooltips
fig.step(
"time",
"flux",
line_width=1,
color="gray",
source=lc_source,
nonselection_line_color="gray",
nonselection_line_alpha=1.0,
)
circ = fig.circle(
"time",
"flux",
source=lc_source,
fill_alpha=0.3,
size=8,
line_color=None,
selection_color="firebrick",
nonselection_fill_alpha=0.0,
nonselection_fill_color="grey",
nonselection_line_color=None,
nonselection_line_alpha=0.0,
fill_color=None,
hover_fill_color="firebrick",
hover_alpha=0.9,
hover_line_color="white",
)
tooltips = [
("Cadence", "@cadence"),
("Time ({})".format(lc.time.format.upper()), "@time{0,0.000}"),
("Time (ISO)", "@time_iso"),
("Flux", "@flux"),
("Quality Code", "@quality_code"),
("Quality Flag", "@quality"),
]
fig.add_tools(
HoverTool(
tooltips=tooltips,
renderers=[circ],
mode="mouse",
point_policy="snap_to_data",
)
)
# Vertical line to indicate the cadence
vertical_line = Span(
location=lc.time[0].value,
dimension="height",
line_color="firebrick",
line_width=4,
line_alpha=0.5,
)
fig.add_layout(vertical_line)
return fig, vertical_line
def _add_tics_with_matching_gaia_ids_to(result, tab, gaia_ids):
# use pandas Series rather than plain list, so they look like the existing columns in the source
#
# Note: we convert all the data to string to better handles cases when a star has no TIC
# In such cases, if we supply None as a value in a pandas Series,
# bokeh's tooltip template will render it as NaN (rather than empty string)
# To avoid NaN display, we force the Series to use string dtype, and for stars with missing TICs,
# empty string will be used as the value. bokeh's tooltip template can correctly render it as empty string
col_tic_id = Series(data=[_get_tic_meta_of_gaia_in_nearby(tab, id, 'TIC ID', "") for id in gaia_ids],
dtype=str)
col_tess_mag = Series(data=[_get_tic_meta_of_gaia_in_nearby(tab, id, 'TESS Mag', "") for id in gaia_ids],
dtype=str)
col_separation = Series(data=[_get_tic_meta_of_gaia_in_nearby(tab, id, 'Separation (arcsec)', "") for id in gaia_ids],
dtype=str)
result['tic'] = col_tic_id
result['TESSmag'] = col_tess_mag
result['separation'] = col_separation
return result
# use case: signify Gaia ID (Source, int type) as missing
_MISSING_INT_VAL = 0
def _add_tics_with_no_matching_gaia_ids_to(result, tab, gaia_ids, magnitude_limit):
def _add_to(data_dict, dest_colname, src):
# the data_dict should ultimately have the same columns/dtype as the result,
# as it will be appended to the result at the end
data_dict[dest_colname] = Series(data=src, dtype=result[dest_colname].dtype)
def _dummy_like(ary, dtype):
dummy_val = None
if pd.api.types.is_integer_dtype(dtype):
dummy_val = _MISSING_INT_VAL
elif pd.api.types.is_float_dtype(dtype):
dummy_val = np.nan
return [dummy_val for i in range(len(ary))]
# filter out those with matching gaia ids
# (handled in `_add_tics_with_matching_gaia_ids_to()`)
gaia_str_ids = [str(id) for id in gaia_ids]
tab = tab[np.isin(tab['GAIA DR2'], gaia_str_ids, invert=True)]
# filter out those with gaia ids, but Gaia Mag is smaller than magnitude_limit
# (they won't appear in the given gaia_ids list)
tab = tab[tab['GAIA Mag'] < magnitude_limit]
# apply magnitude_limit filter for those with no Gaia data using TESS mag
tab = tab[tab['TESS Mag'] < magnitude_limit]
# convert the filtered tab to a dataframe, so as to append to the existing result
data = dict()
_add_to(data, 'tic', tab['TIC ID'])
_add_to(data, 'TESSmag', tab['TESS Mag'])
_add_to(data, 'magForSize', tab['TESS Mag'])
_add_to(data, 'separation', tab['Separation (arcsec)'])
# convert the string Ra/Dec to float
# we assume the equinox is the same as those from Gaia DR2
coords = SkyCoord(tab['RA'], tab['Dec'], unit=(u.hourangle, u.deg), frame='icrs')
_add_to(data, 'RA_ICRS', coords.ra.value)
_add_to(data, 'DE_ICRS', coords.dec.value)
_add_to(data, 'pmRA', tab['PM RA (mas/yr)'])
_add_to(data, 'e_pmRA', tab['PM RA Err (mas/yr)'])
_add_to(data, 'pmDE', tab['PM Dec (mas/yr)'])
_add_to(data, 'e_pmDE', tab['PM Dec Err (mas/yr)'])
# add dummy columns so that the resulting data frame would match the existing one
nontic_colnames = [c for c in result.keys() if c not in data.keys()]
for c in nontic_colnames:
data[c] = Series(data=_dummy_like(tab, result[c].dtype), dtype=result[c].dtype)
# finally, append the entries to existing result dataframe
return pd.concat([result, pd.DataFrame(data)])
def _add_nearby_tics_if_tess(tpf, magnitude_limit, result):
tic_id = tpf.meta.get('TICID', None)
# handle 3 cases:
# - TESS tpf has a valid id, type integer
# - Some TESSCut has empty string while and some others has None
# - Kepler tpf does not have the header
if tic_id is None or tic_id == "":
return result, []
if isinstance(tic_id, str):
# for cases tpf is from tpf.cutout() call in #1089
tic_id = tic_id.replace("_CUTOUT", "")
# nearby TICs from ExoFOP
tab = _search_nearby_of_tess_target(tic_id)
gaia_ids = result['Source'].array
# merge the TICs with matching Gaia entries
result = _add_tics_with_matching_gaia_ids_to(result, tab, gaia_ids)
# add new entries for the TICs with no matching Gaia ones
result = _add_tics_with_no_matching_gaia_ids_to(result, tab, gaia_ids, magnitude_limit)
source_colnames_extras = ['tic', 'TESSmag', 'separation']
tooltips_extras = [("TIC", "@tic"), ("TESS Mag", "@TESSmag"), ("Separation (\")", "@separation")]
return result, source_colnames_extras, tooltips_extras
def _to_display(series):
def _format(val):
if val == _MISSING_INT_VAL or np.isnan(val):
return ""
else:
return str(val)
return pd.Series(data=[_format(v) for v in series], dtype=str)
def _get_nearby_gaia_objects(tpf, magnitude_limit=18):
"""Get nearby objects (of the target defined in tpf) from Gaia.
The result is formatted for the use of plot."""
# Get the positions of the Gaia sources
try:
c1 = SkyCoord(tpf.ra, tpf.dec, frame="icrs", unit="deg")
except Exception as err:
msg = ("Cannot get nearby stars in GAIA because TargetPixelFile has no valid coordinate. "
f"ra: {tpf.ra}, dec: {tpf.dec}")
raise LightkurveError(msg) from err
# Use pixel scale for query size
pix_scale = 4.0 # arcseconds / pixel for Kepler, default
if tpf.mission == "TESS":
pix_scale = 21.0
# We are querying with a diameter as the radius, overfilling by 2x.
from astroquery.vizier import Vizier
Vizier.ROW_LIMIT = -1
with warnings.catch_warnings():
# suppress useless warning to workaround https://github.com/astropy/astroquery/issues/2352
warnings.filterwarnings(
"ignore", category=u.UnitsWarning, message="Unit 'e' not supported by the VOUnit standard"
)
result = Vizier.query_region(
c1,
catalog=["I/345/gaia2"],
radius=Angle(np.max(tpf.shape[1:]) * pix_scale, "arcsec"),
)
no_targets_found_message = ValueError(
"Either no sources were found in the query region " "or Vizier is unavailable"
)
too_few_found_message = ValueError(
"No sources found brighter than {:0.1f}".format(magnitude_limit)
)
if result is None:
raise no_targets_found_message
elif len(result) == 0:
raise too_few_found_message
result = result["I/345/gaia2"].to_pandas()
result = result[result.Gmag < magnitude_limit]
if len(result) == 0:
raise no_targets_found_message
# drop all the filtered rows, it makes subsequent TESS-specific processing easier (to add rows/columns)
result.reset_index(drop=True, inplace=True)
result['magForSize'] = result['Gmag'] # to be used as the basis for sizing the dots in plots
return result
def add_gaia_figure_elements(tpf, fig, magnitude_limit=18):
"""Make the Gaia Figure Elements"""
result = _get_nearby_gaia_objects(tpf, magnitude_limit)
source_colnames_extras = []
tooltips_extras = []
try:
result, source_colnames_extras, tooltips_extras = _add_nearby_tics_if_tess(tpf, magnitude_limit, result)
except Exception as err:
warnings.warn(
f"interact_sky() - cannot obtain nearby TICs. Skip it. The error: {err}",
LightkurveWarning,
)
ra_corrected, dec_corrected, _ = _correct_with_proper_motion(
np.nan_to_num(np.asarray(result.RA_ICRS)) * u.deg, np.nan_to_num(np.asarray(result.DE_ICRS)) * u.deg,
np.nan_to_num(np.asarray(result.pmRA)) * u.milliarcsecond / u.year,
np.nan_to_num(np.asarray(result.pmDE)) * u.milliarcsecond / u.year,
Time(2457206.375, format="jd", scale="tdb"),
tpf.time[0])
result.RA_ICRS = ra_corrected.to(u.deg).value
result.DE_ICRS = dec_corrected.to(u.deg).value
# Convert to pixel coordinates
radecs = np.vstack([result["RA_ICRS"], result["DE_ICRS"]]).T
coords = tpf.wcs.all_world2pix(radecs, 0)
# Gently size the points by their Gaia magnitude
sizes = 64.0 / 2 ** (result["magForSize"] / 5.0)
one_over_parallax = 1.0 / (result["Plx"] / 1000.0)
source = ColumnDataSource(
data=dict(
ra=result["RA_ICRS"],
dec=result["DE_ICRS"],
pmra=result["pmRA"],
pmde=result["pmDE"],
source=_to_display(result["Source"]),
Gmag=result["Gmag"],
plx=result["Plx"],
one_over_plx=one_over_parallax,
x=coords[:, 0] + tpf.column,
y=coords[:, 1] + tpf.row,
size=sizes,
)
)
for c in source_colnames_extras:
source.data[c] = result[c]
tooltips = [
("Gaia source", "@source"),
("G", "@Gmag"),
("Parallax (mas)", "@plx (~@one_over_plx{0,0} pc)"),
("RA", "@ra{0,0.00000000}"),
("DEC", "@dec{0,0.00000000}"),
("pmRA", "@pmra{0,0.000} mas/yr"),
("pmDE", "@pmde{0,0.000} mas/yr"),
("column", "@x{0.0}"),
("row", "@y{0.0}"),
]
tooltips = tooltips_extras + tooltips
r = fig.circle(
"x",
"y",
source=source,
fill_alpha=0.3,
size="size",
line_color=None,
selection_color="firebrick",
nonselection_fill_alpha=0.3,
nonselection_line_color=None,
nonselection_line_alpha=1.0,
fill_color="firebrick",
hover_fill_color="firebrick",
hover_alpha=0.9,
hover_line_color="white",
)
fig.add_tools(
HoverTool(
tooltips=tooltips,
renderers=[r],
mode="mouse",
point_policy="snap_to_data",
)
)
# mark the target's position too
target_ra, target_dec, pm_corrected = _get_corrected_coordinate(tpf)
target_x, target_y = None, None
if target_ra is not None and target_dec is not None:
pix_x, pix_y = tpf.wcs.all_world2pix([(target_ra, target_dec)], 0)[0]
target_x, target_y = tpf.column + pix_x, tpf.row + pix_y
fig.cross(x=target_x, y=target_y, size=20, color="black", line_width=1)
if not pm_corrected:
warnings.warn(("Proper motion correction cannot be applied to the target, as none is available. "
"Thus the target (the cross) might be noticeably away from its actual position, "
"if it has large proper motion."),
category=LightkurveWarning)
# display an arrow on the selected target
arrow_head = VeeHead(size=16)
arrow_4_selected = Arrow(end=arrow_head, line_color="red", line_width=4,
x_start=0, y_start=0, x_end=0, y_end=0, tags=["selected"],
visible=False)
fig.add_layout(arrow_4_selected)
def show_arrow_at_target(attr, old, new):
if len(new) > 0:
x, y = source.data["x"][new[0]], source.data["y"][new[0]]
# workaround: the arrow_head color should have been specified once
# in its creation, but it seems to hit a bokeh bug, resulting in an error
# of the form ValueError("expected ..., got {'value': 'red'}")
# in actual websocket call, it seems that the color value is
# sent as "{'value': 'red'}", but they are expdecting "red" instead.
# somehow the error is bypassed if I specify it later in here.
#
# The issue is present in bokeh 2.2.3 / 2.1.1, but not in bokeh 2.3.1
# I cannot identify a specific issue /PR on github about it though.
arrow_head.fill_color = "red"
arrow_head.line_color = "black"
# place the arrow near (x,y), taking care of boundary cases (at the edge of the plot)
if x < fig.x_range.start + 1:
# boundary case: the point is at the left edge of the plot
arrow_4_selected.x_start = x + 0.85
arrow_4_selected.x_end = x + 0.2
elif x > fig.x_range.end - 1:
# boundary case: the point is at the right edge of the plot
arrow_4_selected.x_start = x - 0.85
arrow_4_selected.x_end = x - 0.2
elif target_x is None or x < target_x:
# normal case 1 : point is to the left of the target
arrow_4_selected.x_start = x - 0.85
arrow_4_selected.x_end = x - 0.2
else:
# normal case 2 : point is to the right of the target
# flip arrow's direction so that it won't overlap with the target
arrow_4_selected.x_start = x + 0.85
arrow_4_selected.x_end = x + 0.2
if y > fig.y_range.end - 0.5:
# boundary case: the point is at near the top of the plot
arrow_4_selected.y_start = y - 0.4
arrow_4_selected.y_end = y - 0.1
elif y < fig.y_range.start + 0.5:
# boundary case: the point is at near the top of the plot
arrow_4_selected.y_start = y + 0.4
arrow_4_selected.y_end = y + 0.1
else: # normal case
arrow_4_selected.y_start = y
arrow_4_selected.y_end = y
arrow_4_selected.visible = True
else:
arrow_4_selected.visible = False
source.selected.on_change("indices", show_arrow_at_target)
# a widget that displays some of the selected star's metadata
# so that they can be copied (e.g., GAIA ID).
# It is a workaround, because bokeh's hover tooltip disappears as soon as the mouse is away from the star.
message_selected_target = Div(text="")
def show_target_info(attr, old, new):
# the following is essentially redoing the bokeh tooltip template above in plain HTML
# with some slight tweak, mainly to add some helpful links.
#
# Note: in source, columns "x" and "y" are ndarray while other column are pandas Series,
# so the access api is slightly different.
if len(new) > 0:
msg = "Selected:<br><table>"
for idx in new:
tic_id = source.data['tic'].iat[idx] if source.data.get('tic') is not None else None
if tic_id is not None and tic_id != "": # TESS-specific meta data, if available
msg += f"""
<tr><td>TIC</td><td>{tic_id}
(<a target="_blank" href="https://exofop.ipac.caltech.edu/tess/target.php?id={tic_id}">ExoFOP</a>)</td></tr>
<tr><td>TESS Mag</td><td>{source.data['TESSmag'].iat[idx]}</td></tr>
<tr><td>Separation (")</td><td>{source.data['separation'].iat[idx]}</td></tr>
"""
# the main meta data
msg += f"""
<tr><td>Gaia source</td><td>{source.data['source'].iat[idx]}
(<a target="_blank"
href="http://vizier.u-strasbg.fr/viz-bin/VizieR-S?Gaia DR2 {source.data['source'].iat[idx]}">Vizier</a>)</td></tr>
<tr><td>G</td><td>{source.data['Gmag'].iat[idx]:.3f}</td></tr>
<tr><td>Parallax (mas)</td>
<td>{source.data['plx'].iat[idx]:,.3f} (~ {source.data['one_over_plx'].iat[idx]:,.0f} pc)</td>
</tr>
<tr><td>RA</td><td>{source.data['ra'].iat[idx]:,.8f}</td></tr>
<tr><td>DEC</td><td>{source.data['dec'].iat[idx]:,.8f}</td></tr>
<tr><td>pmRA</td><td>{source.data['pmra'].iat[idx]} mas/yr</td></tr>
<tr><td>pmDE</td><td>{source.data['pmde'].iat[idx]} mas/yr</td></tr>
<tr><td>column</td><td>{source.data['x'][idx]:.1f}</td></tr>
<tr><td>row</td><td>{source.data['y'][idx]:.1f}</td></tr>
<tr><td colspan="2">Search
<a target="_blank"
href="http://simbad.u-strasbg.fr/simbad/sim-id?Ident=Gaia DR2 {source.data['source'].iat[idx]}">
SIMBAD by Gaia ID</a></td></tr>
<tr><td colspan="2">
<a target="_blank"
href="http://simbad.u-strasbg.fr/simbad/sim-coo?Coord={source.data['ra'].iat[idx]}+{source.data['dec'].iat[idx]}&Radius=2&Radius.unit=arcmin">
SIMBAD by coordinate</a></td></tr>
<tr><td colspan="2"> </td></tr>
"""
msg += "\n<table>"
message_selected_target.text = msg
# else do nothing (not clearing the widget) for now.
def on_selected_change(*args):
show_arrow_at_target(*args)
show_target_info(*args)
source.selected.on_change("indices", show_target_info)
return fig, r, message_selected_target
def to_selected_pixels_source(tpf_source):
xx = tpf_source.data["xx"].flatten()
yy = tpf_source.data["yy"].flatten()
selected_indices = tpf_source.selected.indices
return ColumnDataSource(dict(
xx=xx[selected_indices],
yy=yy[selected_indices],
))
def make_tpf_figure_elements(
tpf,
tpf_source,
tpf_source_selectable=True,
pedestal=None,
fiducial_frame=None,
width=370,
height=340,
scale="log",
vmin=None,
vmax=None,
cmap="Viridis256",
tools="tap,box_select,wheel_zoom,reset",
):
"""Returns the lightcurve figure elements.
Parameters
----------
tpf : TargetPixelFile
TPF to show.
tpf_source : bokeh.plotting.ColumnDataSource
TPF data source.
tpf_source_selectable : boolean
True if the tpf_source is selectable. False to show the selected pixels
in the tpf_source only. Default is True.
pedestal: float
A scalar value to be added to the TPF flux values, often to avoid
taking the log of a negative number in colorbars.
Defaults to `-min(tpf.flux) + 1`
fiducial_frame: int
The tpf slice to start with by default, it is assumed the WCS
is exact for this frame.
scale: str
Color scale for tpf figure. Default is 'log'
vmin: int [optional]
Minimum color scale for tpf figure
vmax: int [optional]
Maximum color scale for tpf figure
cmap: str
Colormap to use for tpf plot. Default is 'Viridis256'
tools: str
Bokeh tool list
Returns
-------
fig, stretch_slider : bokeh.plotting.figure.Figure, RangeSlider
"""
if pedestal is None:
pedestal = -np.nanmin(tpf.flux.value) + 1
if scale == "linear":
pedestal = 0
if tpf.mission in ["Kepler", "K2"]:
title = "Pixel data (CCD {}.{})".format(tpf.module, tpf.output)
elif tpf.mission == "TESS":
title = "Pixel data (Camera {}.{})".format(tpf.camera, tpf.ccd)
else:
title = "Pixel data"
# We subtract 0.5 from the range below because pixel coordinates refer to
# the middle of a pixel, e.g. (col, row) = (10.0, 20.0) is a pixel center.
fig = figure(
width=width,
height=height,
x_range=(tpf.column - 0.5, tpf.column + tpf.shape[2] - 0.5),
y_range=(tpf.row - 0.5, tpf.row + tpf.shape[1] - 0.5),
title=title,
tools=tools,
toolbar_location="below",
border_fill_color="whitesmoke",
)
fig.yaxis.axis_label = "Pixel Row Number"
fig.xaxis.axis_label = "Pixel Column Number"
vlo, lo, hi, vhi = np.nanpercentile(tpf.flux.value + pedestal, [0.2, 1, 95, 99.8])
if vmin is not None:
vlo, lo = vmin, vmin
if vmax is not None:
vhi, hi = vmax, vmax
if scale == "log":
vstep = (np.log10(vhi) - np.log10(vlo)) / 300.0 # assumes counts >> 1.0!
if scale == "linear":
vstep = (vhi - vlo) / 300.0 # assumes counts >> 1.0!
if scale == "log":
color_mapper = LogColorMapper(palette=cmap, low=lo, high=hi)
elif scale == "linear":
color_mapper = LinearColorMapper(palette=cmap, low=lo, high=hi)
else:
raise ValueError("Please specify either `linear` or `log` scale for color.")
fig.image(
[tpf.flux.value[fiducial_frame, :, :] + pedestal],
x=tpf.column - 0.5,
y=tpf.row - 0.5,
dw=tpf.shape[2],
dh=tpf.shape[1],
dilate=True,
color_mapper=color_mapper,
name="tpfimg",
)
# The colorbar will update with the screen stretch slider
# The colorbar margin increases as the length of the tick labels grows.
# This colorbar share of the plot window grows, shrinking plot area.
# This effect is known, some workarounds might work to fix the plot area:
# https://github.com/bokeh/bokeh/issues/5186
if scale == "log":
ticker = LogTicker(desired_num_ticks=8)
elif scale == "linear":
ticker = BasicTicker(desired_num_ticks=8)
color_bar = ColorBar(
color_mapper=color_mapper,
ticker=ticker,
label_standoff=-10,
border_line_color=None,
location=(0, 0),
background_fill_color="whitesmoke",
major_label_text_align="left",
major_label_text_baseline="middle",
title="e/s",
margin=0,
)
fig.add_layout(color_bar, "right")
color_bar.formatter = PrintfTickFormatter(format="%14i")
if tpf_source is not None:
if tpf_source_selectable:
fig.rect(
"xx",
"yy",
1,
1,
source=tpf_source,
fill_color="gray",
fill_alpha=0.4,
line_color="white",
)
else:
# Paint the selected pixels such that they cannot be selected / deselected.
# Used to show specified aperture pixels without letting users to
# change them in ``interact_sky```
selected_pixels_source = to_selected_pixels_source(tpf_source)
r_selected = fig.rect(
"xx",
"yy",
1,
1,
source=selected_pixels_source,
fill_color="gray",
fill_alpha=0.0,
line_color="white",
)
r_selected.nonselection_glyph = None
# Configure the stretch slider and its callback function
if scale == "log":
start, end = np.log10(vlo), np.log10(vhi)
values = (np.log10(lo), np.log10(hi))
elif scale == "linear":
start, end = vlo, vhi
values = (lo, hi)
stretch_slider = RangeSlider(
start=start,
end=end,
step=vstep,
title="Screen Stretch ({})".format(scale),
value=values,
orientation="horizontal",
width=200,
direction="ltr",
show_value=True,
sizing_mode="fixed",
height=15,
name="tpfstretch",
)
def stretch_change_callback_log(attr, old, new):
"""TPF stretch slider callback."""
fig.select("tpfimg")[0].glyph.color_mapper.high = 10 ** new[1]
fig.select("tpfimg")[0].glyph.color_mapper.low = 10 ** new[0]
def stretch_change_callback_linear(attr, old, new):
"""TPF stretch slider callback."""
fig.select("tpfimg")[0].glyph.color_mapper.high = new[1]
fig.select("tpfimg")[0].glyph.color_mapper.low = new[0]
if scale == "log":
stretch_slider.on_change("value", stretch_change_callback_log)
if scale == "linear":
stretch_slider.on_change("value", stretch_change_callback_linear)
return fig, stretch_slider
def make_default_export_name(tpf, suffix="custom-lc"):
"""makes the default name to save a custom interact mask"""
fn = tpf.hdu.filename()
if fn is None:
outname = "{}_{}_{}.fits".format(tpf.mission, tpf.targetid, suffix)
else:
base = os.path.basename(fn)
outname = base.rsplit(".fits")[0] + "-{}.fits".format(suffix)
return outname
def show_interact_widget(
tpf,
notebook_url="localhost:8888",
lc=None,
max_cadences=200000,
aperture_mask="default",
exported_filename=None,
transform_func=None,
ylim_func=None,
vmin=None,
vmax=None,
scale="log",
cmap="Viridis256",
):
"""Display an interactive Jupyter Notebook widget to inspect the pixel data.
The widget will show both the lightcurve and pixel data. The pixel data
supports pixel selection via Bokeh tap and box select tools in an
interactive javascript user interface.
Note: at this time, this feature only works inside an active Jupyter
Notebook, and tends to be too slow when more than ~30,000 cadences
are contained in the TPF (e.g. short cadence data).
Parameters
----------
tpf : lightkurve.TargetPixelFile
Target Pixel File to interact with
notebook_url: str
Location of the Jupyter notebook page (default: "localhost:8888")
When showing Bokeh applications, the Bokeh server must be
explicitly configured to allow connections originating from
different URLs. This parameter defaults to the standard notebook
host and port. If you are running on a different location, you
will need to supply this value for the application to display
properly. If no protocol is supplied in the URL, e.g. if it is
of the form "localhost:8888", then "http" will be used.
max_cadences: int
Raise a RuntimeError if the number of cadences shown is larger than
this value. This limit helps keep browsers from becoming unresponsive.
aperture_mask : array-like, 'pipeline', 'threshold', 'default', or 'all'
A boolean array describing the aperture such that `True` means
that the pixel will be used.
If None or 'all' are passed, all pixels will be used.
If 'pipeline' is passed, the mask suggested by the official pipeline
will be returned.
If 'threshold' is passed, all pixels brighter than 3-sigma above
the median flux will be used.
If 'default' is passed, 'pipeline' mask will be used when available,
with 'threshold' as the fallback.
exported_filename: str
An optional filename to assign to exported fits files containing
the custom aperture mask generated by clicking on pixels in interact.
The default adds a suffix '-custom-aperture-mask.fits' to the
TargetPixelFile basename.
transform_func: function
A function that transforms the lightcurve. The function takes in a
LightCurve object as input and returns a LightCurve object as output.
The function can be complex, such as detrending the lightcurve. In this
way, the interactive selection of aperture mask can be evaluated after
inspection of the transformed lightcurve. The transform_func is applied
before saving a fits file. Default: None (no transform is applied).
ylim_func: function
A function that returns ylimits (low, high) given a LightCurve object.
The default is to return an expanded window around the 10-90th
percentile of lightcurve flux values.
scale: str
Color scale for tpf figure. Default is 'log'
vmin: int [optional]
Minimum color scale for tpf figure
vmax: int [optional]
Maximum color scale for tpf figure
cmap: str
Colormap to use for tpf plot. Default is 'Viridis256'
"""
try:
import bokeh
if bokeh.__version__[0] == "0":
warnings.warn(
"interact() requires Bokeh version 1.0 or later", LightkurveWarning
)
except ImportError:
log.error(
"The interact() tool requires the `bokeh` Python package; "
"you can install bokeh using e.g. `conda install bokeh`."
)
return None
aperture_mask = tpf._parse_aperture_mask(aperture_mask)
if ~aperture_mask.any():
log.error(
"No pixels in `aperture_mask`, finding optimum aperture using `tpf.create_threshold_mask`."
)
aperture_mask = tpf.create_threshold_mask()
if ~aperture_mask.any():
log.error("No pixels in `aperture_mask`, using all pixels.")
aperture_mask = tpf._parse_aperture_mask("all")
if exported_filename is None:
exported_filename = make_default_export_name(tpf)
try:
exported_filename = str(exported_filename)
except:
log.error("Invalid input filename type for interact()")
raise
if ".fits" not in exported_filename.lower():
exported_filename += ".fits"
if lc is None:
lc = tpf.to_lightcurve(aperture_mask=aperture_mask)
tools = "tap,box_select,wheel_zoom,reset"
else:
lc = lc.copy()
tools = "wheel_zoom,reset"
aperture_mask = np.zeros(tpf.flux.shape[1:]).astype(bool)
aperture_mask[0, 0] = True
lc.meta["APERTURE_MASK"] = aperture_mask
if transform_func is not None:
lc = transform_func(lc)
# Bokeh cannot handle many data points
# https://github.com/bokeh/bokeh/issues/7490
n_cadences = len(lc.cadenceno)
if n_cadences > max_cadences:
log.error(
f"Error: interact cannot display more than {max_cadences} "
"cadences without suffering significant performance issues. "
"You can limit the number of cadences show using slicing, e.g. "
"`tpf[0:1000].interact()`. Alternatively, you can override "
"this limitation by passing the `max_cadences` argument."
)
elif n_cadences > 30000:
log.warning(
f"Warning: the pixel file contains {n_cadences} cadences. "
"The performance of interact() is very slow for such a "
"large number of frames. Consider using slicing, e.g. "
"`tpf[0:1000].interact()`, to make interact run faster."
)
def create_interact_ui(doc):
# The data source includes metadata for hover-over tooltips
lc_source = prepare_lightcurve_datasource(lc)
tpf_source = prepare_tpf_datasource(tpf, aperture_mask)
# Create the lightcurve figure and its vertical marker
fig_lc, vertical_line = make_lightcurve_figure_elements(
lc, lc_source, ylim_func=ylim_func
)
# Create the TPF figure and its stretch slider
pedestal = -np.nanmin(tpf.flux.value) + 1
if scale == "linear":
pedestal = 0
fig_tpf, stretch_slider = make_tpf_figure_elements(
tpf,
tpf_source,
pedestal=pedestal,
fiducial_frame=0,
vmin=vmin,
vmax=vmax,
scale=scale,
cmap=cmap,
tools=tools,
)
# Helper lookup table which maps cadence number onto flux array index.
tpf_index_lookup = {cad: idx for idx, cad in enumerate(tpf.cadenceno)}
# Interactive slider widgets and buttons to select the cadence number
cadence_slider = Slider(
start=np.min(tpf.cadenceno),
end=np.max(tpf.cadenceno),
value=np.min(tpf.cadenceno),
step=1,
title="Cadence Number",
width=490,
)
r_button = Button(label=">", button_type="default", width=30)
l_button = Button(label="<", button_type="default", width=30)
export_button = Button(
label="Save Lightcurve", button_type="success", width=120
)
message_on_save = Div(text=" ", width=600, height=15)
# Callbacks
def _create_lightcurve_from_pixels(
tpf, selected_pixel_indices, transform_func=transform_func
):
"""Create the lightcurve from the selected pixel index list"""
selected_mask = aperture_mask_from_selected_indices(selected_pixel_indices, tpf)
lc_new = tpf.to_lightcurve(aperture_mask=selected_mask)
lc_new.meta["APERTURE_MASK"] = selected_mask
if transform_func is not None:
lc_transformed = transform_func(lc_new)
if len(lc_transformed) != len(lc_new):
warnings.warn(
"Dropping cadences in `transform_func` is not "
"yet supported due to fixed time coordinates."
"Skipping the transformation...",
LightkurveWarning,
)
else:
lc_new = lc_transformed
lc_new.meta["APERTURE_MASK"] = selected_mask
return lc_new
def update_upon_pixel_selection(attr, old, new):
"""Callback to take action when pixels are selected."""
# Check if a selection was "re-clicked", then de-select
if (sorted(old) == sorted(new)) & (new != []):
# Trigger recursion
tpf_source.selected.indices = new[1:]
if new != []:
lc_new = _create_lightcurve_from_pixels(
tpf, new, transform_func=transform_func
)
lc_source.data["flux"] = lc_new.flux.value
if ylim_func is None:
ylims = get_lightcurve_y_limits(lc_source)
else:
ylims = _to_unitless(ylim_func(lc_new))
fig_lc.y_range.start = ylims[0]
fig_lc.y_range.end = ylims[1]
else:
lc_source.data["flux"] = lc.flux.value * 0.0
fig_lc.y_range.start = -1
fig_lc.y_range.end = 1
message_on_save.text = " "
export_button.button_type = "success"
def update_upon_cadence_change(attr, old, new):
"""Callback to take action when cadence slider changes"""
if new in tpf.cadenceno:
frameno = tpf_index_lookup[new]
fig_tpf.select("tpfimg")[0].data_source.data["image"] = [
tpf.flux.value[frameno, :, :] + pedestal
]
vertical_line.update(location=tpf.time.value[frameno])
else:
fig_tpf.select("tpfimg")[0].data_source.data["image"] = [
tpf.flux.value[0, :, :] * np.NaN
]
lc_source.selected.indices = []
def go_right_by_one():
"""Step forward in time by a single cadence"""
existing_value = cadence_slider.value
if existing_value < np.max(tpf.cadenceno):
cadence_slider.value = existing_value + 1
def go_left_by_one():
"""Step back in time by a single cadence"""
existing_value = cadence_slider.value
if existing_value > np.min(tpf.cadenceno):
cadence_slider.value = existing_value - 1
def save_lightcurve():
"""Save the lightcurve as a fits file with mask as HDU extension"""
if tpf_source.selected.indices != []:
lc_new = _create_lightcurve_from_pixels(
tpf, tpf_source.selected.indices, transform_func=transform_func
)
lc_new.to_fits(
exported_filename,
overwrite=True,
flux_column_name="SAP_FLUX",
aperture_mask=lc_new.meta["APERTURE_MASK"].astype(np.int_),
SOURCE="lightkurve interact",
NOTE="custom mask",
MASKNPIX=np.nansum(lc_new.meta["APERTURE_MASK"]),
)
if message_on_save.text == " ":
text = '<font color="black"><i>Saved file {} </i></font>'
message_on_save.text = text.format(exported_filename)
export_button.button_type = "success"
else:
text = '<font color="gray"><i>Saved file {} </i></font>'
message_on_save.text = text.format(exported_filename)
else:
text = (
'<font color="gray"><i>No pixels selected, no mask saved</i></font>'
)
export_button.button_type = "warning"
message_on_save.text = text
def jump_to_lightcurve_position(attr, old, new):
if new != []:
cadence_slider.value = lc.cadenceno[new[0]]
# Map changes to callbacks
r_button.on_click(go_right_by_one)
l_button.on_click(go_left_by_one)
tpf_source.selected.on_change("indices", update_upon_pixel_selection)
lc_source.selected.on_change("indices", jump_to_lightcurve_position)
export_button.on_click(save_lightcurve)
cadence_slider.on_change("value", update_upon_cadence_change)
# Layout all of the plots
sp1, sp2, sp3, sp4 = (
Spacer(width=15),
Spacer(width=30),
Spacer(width=80),
Spacer(width=60),
)
widgets_and_figures = layout(
[fig_lc, fig_tpf],
[l_button, sp1, r_button, sp2, cadence_slider, sp3, stretch_slider],
[export_button, sp4, message_on_save],
)
doc.add_root(widgets_and_figures)
output_notebook(verbose=False, hide_banner=True)
return show(create_interact_ui, notebook_url=notebook_url)
def show_skyview_widget(tpf, notebook_url="localhost:8888", aperture_mask="empty", magnitude_limit=18):
"""skyview
Parameters
----------
tpf : lightkurve.TargetPixelFile
Target Pixel File to interact with
notebook_url: str
Location of the Jupyter notebook page (default: "localhost:8888")
When showing Bokeh applications, the Bokeh server must be
explicitly configured to allow connections originating from
different URLs. This parameter defaults to the standard notebook
host and port. If you are running on a different location, you
will need to supply this value for the application to display
properly. If no protocol is supplied in the URL, e.g. if it is
of the form "localhost:8888", then "http" will be used.
aperture_mask : array-like, 'pipeline', 'threshold', 'default', 'background', or 'empty'
Highlight pixels selected by aperture_mask.
Default is 'empty': no pixel is highlighted.
magnitude_limit : float
A value to limit the results in based on Gaia Gmag. Default, 18.
"""
try:
import bokeh
if bokeh.__version__[0] == "0":
warnings.warn(
"interact_sky() requires Bokeh version 1.0 or later", LightkurveWarning
)
except ImportError:
log.error(
"The interact_sky() tool requires the `bokeh` Python package; "
"you can install bokeh using e.g. `conda install bokeh`."
)
return None
# Try to identify the "fiducial frame", for which the TPF WCS is exact
zp = (tpf.pos_corr1 == 0) & (tpf.pos_corr2 == 0)
(zp_loc,) = np.where(zp)
if len(zp_loc) == 1:
fiducial_frame = zp_loc[0]
else:
fiducial_frame = 0
aperture_mask = tpf._parse_aperture_mask(aperture_mask)
def create_interact_ui(doc):
tpf_source = prepare_tpf_datasource(tpf, aperture_mask)
# The data source includes metadata for hover-over tooltips
# Create the TPF figure and its stretch slider
fig_tpf, stretch_slider = make_tpf_figure_elements(
tpf,
tpf_source,
tpf_source_selectable=False,
fiducial_frame=fiducial_frame,
width=640,
height=600,
tools="tap,box_zoom,wheel_zoom,reset"
)
fig_tpf, r, message_selected_target = add_gaia_figure_elements(
tpf, fig_tpf, magnitude_limit=magnitude_limit
)
# Optionally override the default title
if tpf.mission == "K2":
fig_tpf.title.text = (
"Skyview for EPIC {}, K2 Campaign {}, CCD {}.{}".format(
tpf.targetid, tpf.campaign, tpf.module, tpf.output
)
)
elif tpf.mission == "Kepler":
fig_tpf.title.text = (
"Skyview for KIC {}, Kepler Quarter {}, CCD {}.{}".format(
tpf.targetid, tpf.quarter, tpf.module, tpf.output
)
)
elif tpf.mission == "TESS":
fig_tpf.title.text = "Skyview for TESS {} Sector {}, Camera {}.{}".format(
tpf.targetid, tpf.sector, tpf.camera, tpf.ccd
)
# Layout all of the plots
widgets_and_figures = layout([fig_tpf, message_selected_target], [stretch_slider])
doc.add_root(widgets_and_figures)
output_notebook(verbose=False, hide_banner=True)
return show(create_interact_ui, notebook_url=notebook_url)
| 53,474
| 37.471223
| 145
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/lightcurvefile.py
|
"""DEPRECATED: `LightCurveFile` classes were removed in Lightkurve v2.0 in
favor of only having `LightCurve` classes. To minimize breaking code, we
retain the `LightCurveFile` classes here as wrappers around the new
`LightCurve` objects, but will remove these wrappers in a future version..
"""
from . import LightCurve, KeplerLightCurve, TessLightCurve
LightCurveFile = LightCurve
KeplerLightCurveFile = KeplerLightCurve.read
TessLightCurveFile = TessLightCurve.read
| 470
| 41.818182
| 74
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/time.py
|
"""Adds the BKJD and BTJD time format for use by Astropy's `Time` object.
Caution: AstroPy time objects make a distinction between a time's format
(e.g. ISO, JD, MJD) and its scale (e.g. UTC, TDB). This can be confusing
because the acronym "BTJD" refers both to a format (TJD) and to a scale (TDB).
Note: the classes below derive from an AstroPy meta class which will automatically
register the formats for use in AstroPy Time objects.
"""
from astropy.time.formats import TimeFromEpoch
class TimeBKJD(TimeFromEpoch):
"""
Barycentric Kepler Julian Date (BKJD): days since JD 2454833.0.
For example, 0 in BTJD is noon on January 1, 2009.
BKJD is the format in which times are recorded in data products from
NASA's Kepler Space Telescope, where it is always given in the
Barycentric Dynamical Time (TDB) scale by convention.
"""
name = 'bkjd'
unit = 1.0
epoch_val = 2454833
epoch_val2 = None
epoch_scale = 'tdb'
epoch_format = 'jd'
class TimeBTJD(TimeFromEpoch):
"""
Barycentric TESS Julian Date (BTJD): days since JD 2457000.0.
For example, 0 in BTJD is noon on December 8, 2014.
BTJD is the format in which times are recorded in data products from
NASA's Transiting Exoplanet Survey Satellite (TESS), where it is
always given in the Barycentric Dynamical Time (TDB) scale by convention.
"""
name = 'btjd'
unit = 1.0
epoch_val = 2457000
epoch_val2 = None
epoch_scale = 'tdb'
epoch_format = 'jd'
| 1,509
| 31.12766
| 82
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/version.py
|
# It is important to store the version number in a separate file
# so that we can read it from setup.py without importing the package
__version__ = "2.4.1dev"
| 159
| 39
| 68
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/interact_bls.py
|
"""This module provides helper functions for the `LightCurve.interact_bls()` feature."""
import logging
import warnings
import numpy as np
from astropy.convolution import convolve, Box1DKernel
from astropy.time import Time, TimeDelta
from astropy.timeseries import BoxLeastSquares
import astropy.units as u
from .utils import LightkurveWarning
log = logging.getLogger(__name__)
# Import the optional Bokeh dependency, or print a friendly error otherwise.
try:
import bokeh # Import bokeh first so we get an ImportError we can catch
from bokeh.io import show, output_notebook
from bokeh.plotting import figure, ColumnDataSource
from bokeh.models import Slider, Span, Range1d
from bokeh.models import Text
from bokeh.layouts import layout, Spacer
from bokeh.models.tools import HoverTool
from bokeh.models.widgets import Button, Paragraph
from bokeh.events import PanEnd, Reset
except ImportError:
pass # we will print an error message in `show_interact_widget` instead
from .interact import prepare_lightcurve_datasource
from .lightcurve import LightCurve
__all__ = ["show_interact_widget"]
#
# Convert Time / Quantity to unitless ones for the use with bokeh
# Otherwise, Bokeh will complain:
# - NotImplementedError('cannot make a list of Quantities. Get list of values with q.value.tolist()')
# - TypeError('Object of type Time is not JSON serializable')
# - TypeError('Object of type TimeDelta is not JSON serializable')
#
def _to_unitless(data):
"""Convet the values in the data dict to unitless one"""
return {key: getattr(val, "value", val) for key, val in data.items()}
def _to_ColumnDataSource(data):
"""Convet the values in the data dict to unitless one for use with ColumnDataSource"""
return ColumnDataSource(data=_to_unitless(data))
def _update_source(source, data):
"""Convet the values in the data dict to unitless one and use it to update the given DataSource"""
source.data = _to_unitless(data)
return source
def _at_ratio(values, ratio):
"""Return a unitless scalar value that is at the given ratio in the range of the given values.
Conceptually, it is `max(values) - min(values) * ratio + min(values)`
It's frequently used to place the position the help icon of a plot based on the axis' values
"""
if getattr(values, "end", None) is not None:
# case range ,with start / end
return (values.end - values.start) * ratio + values.start
else:
# case plain number array, Time, or Quantity (with .value)
result = (np.max(values) - np.min(values)) * ratio + np.min(values)
# return raw value without unit
return getattr(result, "value", result)
def _isfinite(val):
val_actual = getattr(val, "value", val)
return np.isfinite(val_actual)
def prepare_bls_datasource(result, loc):
"""Prepare a bls result for bokeh plotting
Parameters
----------
result : BLS.model result
The BLS model result to use
loc : int
Index of the "best" period. (Usually the max power)
Returns
-------
bls_source : Bokeh.plotting.ColumnDataSource
Bokeh style source for plotting
"""
bls_source = _to_ColumnDataSource(
data=dict(
period=result["period"],
power=result["power"],
depth=result["depth"],
duration=result["duration"],
transit_time=result["transit_time"],
)
)
bls_source.selected.indices = [loc]
return bls_source
def prepare_folded_datasource(folded_lc):
"""Prepare a FoldedLightCurve object for bokeh plotting.
Parameters
----------
folded_lc : lightkurve.FoldedLightCurve
The folded lightcurve
Returns
-------
folded_source : Bokeh.plotting.ColumnDataSource
Bokeh style source for plotting
"""
folded_src = _to_ColumnDataSource(
data=dict(phase=folded_lc.time, flux=folded_lc.flux)
)
return folded_src
# Helper functions for help text...
def prepare_lc_help_source(lc):
data = dict(
time=[_at_ratio(lc.time, 0.98)],
flux=[_at_ratio(lc.flux, 0.95)],
boxicon=["https://bokeh.pydata.org/en/latest/_images/BoxZoom.png"],
panicon=["https://bokeh.pydata.org/en/latest/_images/Pan.png"],
reseticon=["https://bokeh.pydata.org/en/latest/_images/Reset.png"],
tapicon=["https://bokeh.pydata.org/en/latest/_images/Tap.png"],
hovericon=["https://bokeh.pydata.org/en/latest/_images/Hover.png"],
helpme=["?"],
help=[
"""
<div style="width: 550px;">
<div>
<span style="font-size: 12px; font-weight: bold;">Light Curve</span>
</div>
<div>
<span style="font-size: 11px;"">This panel shows the full light curve, with the BLS model overlayed in red. The period of the model is the period
currently selected in the BLS panel [top, left], indicated by the vertical red line. The duration of the transit model is given by the duration slider below..</span>
<br></br>
</div>
<div>
<span style="font-size: 12px; font-weight: bold;">Bokeh Tools</span>
</div>
<div>
<span style="font-size: 11px;"">Each of the three panels have Bokeh tools to navigate them.
You can turn off/on each tool by clicking the icon in the tray below each panel.
You can zoom in using the Box Zoom Tool, move about the panel using the Pan Tool,
or reset the panel back to the original view using the Reset Tool. </span>
<br></br>
<center>
<table>
<tr>
<td><img src="@boxicon" height="20" width="20"></td><td><span style="font-size: 11px;"">Box Zoom Tool</span></td>
</tr>
<tr>
<td><img src="@panicon" height="20" width="20"></td><td><span style="font-size: 11px;"">Pan Tool</span></td>
</tr>
<tr>
<td><img src="@reseticon" height="20" width="20"></td><td><span style="font-size: 11px;"">Reset Tool</span></td>
</tr>
<tr>
<td><img src="@tapicon" height="20" width="20"></td><td><span style="font-size: 11px;"">Tap Tool (select periods in BLS Panel only)</span></td>
</tr>
<tr>
<td><img src="@hovericon" height="20" width="20"></td><td><span style="font-size: 11px;"">Help Messages (click to disable/enable help)</span></td>
</tr>
</table>
</center>
</div>
</div>
"""
],
)
return _to_ColumnDataSource(data=data)
def prepare_bls_help_source(bls_source, slider_value):
data = dict(
period=[bls_source.data["period"][int(slider_value * 0.95)]],
power=[_at_ratio(bls_source.data["power"], 0.98)],
helpme=["?"],
help=[
"""
<div style="width: 375px;">
<div style="height: 190px;">
</div>
<div>
<span style="font-size: 12px; font-weight: bold;">Box Least Squares Periodogram</span>
</div>
<div>
<span style="font-size: 11px;"">This panel shows the BLS periodogram for
the light curve shown in the lower panel.
The current selected period is highlighted by the red line.
The selected period is the peak period within the range.
The Folded Light Curve panel [right] will update when a new period
is selected in the BLS Panel. You can select a new period either by
using the Box Zoom tool to select a smaller range, or by clicking on the peak you want to select. </span>
<br></br>
<span style="font-size: 11px;"">The panel is set at the resolution
given by the Resolution Slider [bottom]. This value is the number
of points in the BLS Periodogram panel.
Increasing the resolution will make the BLS Periodogram more accurate,
but slower to render. To increase the resolution for a given peak,
simply zoom in with the Box Zoom Tool.</span>
</div>
</div>
"""
],
)
return _to_ColumnDataSource(data=data)
def prepare_f_help_source(f):
data = dict(
phase=[_at_ratio(f.time, 0.98)],
flux=[_at_ratio(f.flux, 0.98)],
helpme=["?"],
help=[
"""
<div style="width: 375px;">
<div style="height: 190px;">
</div>
<div>
<span style="font-size: 12px; font-weight: bold;">Folded Light Curve</span>
</div>
<div>
<span style="font-size: 11px;"">This panel shows the folded light curve,
using the period currently selected in the BLS panel [left], indicated by the red line.
The transit model is show in red, and duration of the transit model
is given by the duration slider below. Update the slider to change the duration.
The period and transit midpoint values of the model are given above this panel.</span>
<br></br>
<span style="font-size: 11px;"">If the folded transit looks like a near miss of
the true period, try zooming in on the peak in the BLS Periodogram panel [right]
with the Box Zoom tool. This will increase the resolution of the peak, and provide
a better period solution. You can also vary the transit duration, for a better fit.
If the transit model is too shallow, it may be that you have selected a harmonic.
Look in the BLS Periodogram for a peak at (e.g. 0.25x, 0.5x, 2x, 4x the current period etc).</span>
</div>
</div>
"""
],
)
return _to_ColumnDataSource(data=data)
def _to_axis_label(label_base, unit):
if (unit) and (unit.to_string() != ""):
# bokeh does not support latex rendering. astropy's default string tends to be too verbose
# so we support a shortform for the typical use case
unit_str = "e/s" if unit == u.electron / u.second else unit.to_string()
return f"{label_base} [{unit_str}]"
else:
return label_base
def make_lightcurve_figure_elements(
lc, model_lc, lc_source, model_lc_source, help_source
):
"""Make a figure with a simple light curve scatter and model light curve line.
Parameters
----------
lc : lightkurve.LightCurve
Light curve to plot
model_lc : lightkurve.LightCurve
Model light curve to plot
lc_source : bokeh.plotting.ColumnDataSource
Bokeh style source object for plotting light curve
model_lc_source : bokeh.plotting.ColumnDataSource
Bokeh style source object for plotting model light curve
help_source : bokeh.plotting.ColumnDataSource
Bokeh style source object for rendering help button
Returns
-------
fig : bokeh.plotting.figure
Bokeh figure object
"""
# Make figure
fig = figure(
title="Light Curve",
height=300,
width=900,
tools="pan,box_zoom,wheel_zoom,reset",
toolbar_location="below",
border_fill_color="#FFFFFF",
active_drag="box_zoom",
)
fig.title.offset = -10
fig.yaxis.axis_label = _to_axis_label("Flux", lc.flux.unit)
if lc.time.format == "bkjd":
fig.xaxis.axis_label = "Time - 2454833 (days)"
elif lc.time.format == "btjd":
fig.xaxis.axis_label = "Time - 2457000 (days)"
else:
fig.xaxis.axis_label = "Time (days)"
ylims = [np.nanmin(lc.flux.value), np.nanmax(lc.flux.value)]
fig.y_range = Range1d(start=float(ylims[0]), end=float(ylims[1]))
# Add light curve
fig.circle(
"time",
"flux",
line_width=1,
color="#191919",
source=lc_source,
nonselection_line_color="#191919",
size=0.5,
nonselection_line_alpha=1.0,
)
# Add model
fig.step(
"time",
"flux",
line_width=1,
color="firebrick",
source=model_lc_source,
nonselection_line_color="firebrick",
nonselection_line_alpha=1.0,
)
# Help button
question_mark = Text(
x="time",
y="flux",
text="helpme",
text_color="grey",
text_align="center",
text_baseline="middle",
text_font_size="12px",
text_font_style="bold",
text_alpha=0.6,
)
fig.add_glyph(help_source, question_mark)
help = fig.circle(
"time",
"flux",
alpha=0.0,
size=15,
source=help_source,
line_width=2,
line_color="grey",
line_alpha=0.6,
)
tooltips = help_source.data["help"][0]
fig.add_tools(
HoverTool(
tooltips=tooltips,
renderers=[help],
mode="mouse",
point_policy="snap_to_data",
)
)
return fig
def make_folded_figure_elements(
f, f_model_lc, f_source, f_model_lc_source, help_source
):
"""Make a scatter plot of a FoldedLightCurve.
Parameters
----------
f : lightkurve.LightCurve
Folded light curve to plot
f_model_lc : lightkurve.LightCurve
Model folded light curve to plot
f_source : bokeh.plotting.ColumnDataSource
Bokeh style source object for plotting folded light curve
f_model_lc_source : bokeh.plotting.ColumnDataSource
Bokeh style source object for plotting model folded light curve
help_source : bokeh.plotting.ColumnDataSource
Bokeh style source object for rendering help button
Returns
-------
fig : bokeh.plotting.figure
Bokeh figure object
"""
# Build Figure
fig = figure(
title="Folded Light Curve",
height=340,
width=450,
tools="pan,box_zoom,wheel_zoom,reset",
toolbar_location="below",
border_fill_color="#FFFFFF",
active_drag="box_zoom",
)
fig.title.offset = -10
fig.yaxis.axis_label = _to_axis_label("Flux", f.flux.unit)
fig.xaxis.axis_label = f"Phase [{f.time.format.upper()}]"
# Scatter point for data
fig.circle(
"phase",
"flux",
line_width=1,
color="#191919",
source=f_source,
nonselection_line_color="#191919",
nonselection_line_alpha=1.0,
size=0.1,
)
# Line plot for model
fig.step(
"phase",
"flux",
line_width=3,
color="firebrick",
source=f_model_lc_source,
nonselection_line_color="firebrick",
nonselection_line_alpha=1.0,
)
# Help button
question_mark = Text(
x="phase",
y="flux",
text="helpme",
text_color="grey",
text_align="center",
text_baseline="middle",
text_font_size="12px",
text_font_style="bold",
text_alpha=0.6,
)
fig.add_glyph(help_source, question_mark)
help = fig.circle(
"phase",
"flux",
alpha=0.0,
size=15,
source=help_source,
line_width=2,
line_color="grey",
line_alpha=0.6,
)
tooltips = help_source.data["help"][0]
fig.add_tools(
HoverTool(
tooltips=tooltips,
renderers=[help],
mode="mouse",
point_policy="snap_to_data",
)
)
return fig
def make_bls_figure_elements(result, bls_source, help_source):
"""Make a line plot of a BLS result.
Parameters
----------
result : BLS.model result
BLS model result to plot
bls_source : bokeh.plotting.ColumnDataSource
Bokeh style source object for plotting BLS source
help_source : bokeh.plotting.ColumnDataSource
Bokeh style source object for rendering help button
Returns
-------
fig : bokeh.plotting.figure
Bokeh figure object
vertical_line : bokeh.models.Span
Vertical line to highlight current selected period
"""
# Build Figure
fig = figure(
title="BLS Periodogram",
height=340,
width=450,
tools="pan,box_zoom,wheel_zoom,tap,reset",
toolbar_location="below",
border_fill_color="#FFFFFF",
x_axis_type="log",
active_drag="box_zoom",
)
fig.title.offset = -10
fig.yaxis.axis_label = "Power"
fig.xaxis.axis_label = "Period [days]"
fig.y_range = Range1d(
start=result.power.min().value * 0.95, end=result.power.max().value * 1.05
)
fig.x_range = Range1d(
start=result.period.min().value, end=result.period.max().value
)
# Add circles for the selection of new period. These are always hidden
fig.circle(
"period",
"power",
source=bls_source,
fill_alpha=0.0,
size=6,
line_color=None,
selection_color="white",
nonselection_fill_alpha=0.0,
nonselection_fill_color="white",
nonselection_line_color=None,
nonselection_line_alpha=0.0,
fill_color=None,
hover_fill_color="white",
hover_alpha=0.0,
hover_line_color="white",
)
# Add line for the BLS power
fig.line(
"period",
"power",
line_width=1,
color="#191919",
source=bls_source,
nonselection_line_color="#191919",
nonselection_line_alpha=1.0,
)
# Vertical line to indicate the current period
vertical_line = Span(
location=0,
dimension="height",
line_color="firebrick",
line_width=3,
line_alpha=0.5,
)
fig.add_layout(vertical_line)
# Help button
question_mark = Text(
x="period",
y="power",
text="helpme",
text_color="grey",
text_align="center",
text_baseline="middle",
text_font_size="12px",
text_font_style="bold",
text_alpha=0.6,
)
fig.add_glyph(help_source, question_mark)
help = fig.circle(
"period",
"power",
alpha=0.0,
size=15,
source=help_source,
line_width=2,
line_color="grey",
line_alpha=0.6,
)
tooltips = help_source.data["help"][0]
fig.add_tools(
HoverTool(
tooltips=tooltips,
renderers=[help],
mode="mouse",
point_policy="snap_to_data",
)
)
return fig, vertical_line
def _preprocess_lc_for_bls(lc):
clean = lc.remove_nans()
# convert to normalized unscaled flux if needed,
# so that its scale is the same as the BLS model lc (to be generated),
# making it easier to be visualized in the same plot.
if not clean.meta.get("NORMALIZED", False):
clean = clean.normalize()
elif clean.flux.unit != u.dimensionless_unscaled:
# case normalized, but in other units (percents, etc.)
clean.flux = clean.flux.to(u.dimensionless_unscaled)
clean.flux_err = clean.flux_err.to(u.dimensionless_unscaled)
return clean
def show_interact_widget(
lc,
notebook_url="localhost:8888",
minimum_period=None,
maximum_period=None,
resolution=2000,
):
"""Show the BLS interact widget.
Parameters
----------
notebook_url: str
Location of the Jupyter notebook page (default: "localhost:8888")
When showing Bokeh applications, the Bokeh server must be
explicitly configured to allow connections originating from
different URLs. This parameter defaults to the standard notebook
host and port. If you are running on a different location, you
will need to supply this value for the application to display
properly. If no protocol is supplied in the URL, e.g. if it is
of the form "localhost:8888", then "http" will be used.
minimum_period : float or None
Minimum period to assess the BLS to. If None, default value of 0.3 days
will be used.
maximum_period : float or None
Maximum period to evaluate the BLS to. If None, the time coverage of the
lightcurve / 4 will be used.
resolution : int
Number of points to use in the BLS panel. Lower this value to have a faster
but less accurate compute time. You can also vary this value using the
Resolution Slider.
"""
try:
import bokeh
if bokeh.__version__[0] == "0":
warnings.warn(
"interact_bls() requires Bokeh version 1.0 or later", LightkurveWarning
)
except ImportError:
log.error(
"The interact_bls() tool requires the `bokeh` package; "
"you can install bokeh using e.g. `conda install bokeh`."
)
return None
def _round_strip_unit(val, decimals):
return np.round(getattr(val, "value", val), decimals)
def _as_1d(time):
"""Convert scalar Time to a 1-d array, suitable to be used in creating LightCurve"""
return time.reshape((1,))
def _to_lc(time, flux):
"""Shorthand to create a LightCurve with time and flux used in creating a Model LightCurve"""
return LightCurve(time=time, flux=flux)
def _create_interact_ui(
doc, minp=minimum_period, maxp=maximum_period, resolution=resolution
):
"""Create BLS interact user interface."""
if minp is None:
minp = 0.3
if maxp is None:
maxp = (lc.time[-1].value - lc.time[0].value) / 2
# TODO: consider to accept Time as minp / maxp, and convert it to unitless days
time_format = ""
if lc.time.format == "bkjd":
time_format = " - 2454833 days"
if lc.time.format == "btjd":
time_format = " - 2457000 days"
# Some sliders
duration_slider = Slider(
start=0.01,
end=0.5,
value=0.05,
step=0.01,
title="Duration [Days]",
width=400,
)
npoints_slider = Slider(
start=500,
end=10000,
value=resolution,
step=100,
title="BLS Resolution",
width=400,
)
# Set up the period values, BLS model and best period
period_values = np.logspace(
np.log10(minp), np.log10(maxp), npoints_slider.value
)
period_values = period_values[
(period_values > duration_slider.value) & (period_values < maxp)
]
model = BoxLeastSquares(lc.time, lc.flux)
result = model.power(period_values, duration_slider.value)
loc = np.argmax(result.power)
best_period = result.period[loc]
best_t0 = result.transit_time[loc]
# Some Buttons
double_button = Button(label="Double Period", button_type="danger", width=100)
half_button = Button(label="Half Period", button_type="danger", width=100)
text_output = Paragraph(
text="Period: {} days, T0: {}{}".format(
_round_strip_unit(best_period, 7),
_round_strip_unit(best_t0, 7),
time_format,
),
width=350,
height=40,
)
# Set up BLS source
bls_source = prepare_bls_datasource(result, loc)
bls_source_units = dict(
transit_time_format=result["transit_time"].format,
transit_time_scale=result["transit_time"].scale,
period=result["period"].unit,
)
bls_help_source = prepare_bls_help_source(bls_source, npoints_slider.value)
# Set up the model LC
mf = model.model(lc.time, best_period, duration_slider.value, best_t0)
mf /= np.median(mf)
mask = ~(convolve(np.asarray(mf == np.median(mf)), Box1DKernel(2)) > 0.9)
model_lc = _to_lc(lc.time[mask], mf[mask])
model_lc_source = _to_ColumnDataSource(
data=dict(time=model_lc.time, flux=model_lc.flux)
)
# Set up the LC
nb = int(np.ceil(len(lc.flux) / 5000))
lc_source = prepare_lightcurve_datasource(lc[::nb])
lc_help_source = prepare_lc_help_source(lc)
# Set up folded LC
nb = int(np.ceil(len(lc.flux) / 10000))
f = lc.fold(best_period, best_t0)
f_source = prepare_folded_datasource(f[::nb])
f_help_source = prepare_f_help_source(f)
f_model_lc = model_lc.fold(best_period, best_t0)
f_model_lc = _to_lc(_as_1d(f.time.min()), [1]).append(f_model_lc)
f_model_lc = f_model_lc.append(_to_lc(_as_1d(f.time.max()), [1]))
f_model_lc_source = _to_ColumnDataSource(
data=dict(phase=f_model_lc.time, flux=f_model_lc.flux)
)
def _update_light_curve_plot(event):
"""If we zoom in on LC plot, update the binning."""
mint, maxt = fig_lc.x_range.start, fig_lc.x_range.end
inwindow = (lc.time.value > mint) & (lc.time.value < maxt)
nb = int(np.ceil(inwindow.sum() / 5000))
temp_lc = lc[inwindow]
_update_source(
lc_source, {"time": temp_lc.time[::nb], "flux": temp_lc.flux[::nb]}
)
def _update_folded_plot(event):
loc = np.argmax(bls_source.data["power"])
best_period = bls_source.data["period"][loc]
best_t0 = bls_source.data["transit_time"][loc]
# Otherwise, we can just update the best_period index
minphase, maxphase = fig_folded.x_range.start, fig_folded.x_range.end
f = lc.fold(best_period, best_t0)
inwindow = (f.time > minphase) & (f.time < maxphase)
nb = int(np.ceil(inwindow.sum() / 10000))
_update_source(
f_source,
{"phase": f[inwindow].time[::nb], "flux": f[inwindow].flux[::nb]},
)
# Function to update the widget
def _update_params(all=False, best_period=None, best_t0=None):
if all:
# If we're updating everything, recalculate the BLS model
minp, maxp = fig_bls.x_range.start, fig_bls.x_range.end
period_values = np.logspace(
np.log10(minp), np.log10(maxp), npoints_slider.value
)
ok = (period_values > duration_slider.value) & (period_values < maxp)
if ok.sum() == 0:
return
period_values = period_values[ok]
result = model.power(period_values, duration_slider.value)
ok = (
_isfinite(result["power"])
& _isfinite(result["duration"])
& _isfinite(result["transit_time"])
& _isfinite(result["period"])
)
ok_result = dict(
period=result["period"][
ok
], # useful for accessing values with units needed later
power=result["power"][ok],
duration=result["duration"][ok],
transit_time=result["transit_time"][ok],
)
_update_source(bls_source, ok_result)
loc = np.nanargmax(ok_result["power"])
best_period = ok_result["period"][loc]
best_t0 = ok_result["transit_time"][loc]
minpow, maxpow = (
bls_source.data["power"].min() * 0.95,
bls_source.data["power"].max() * 1.05,
)
fig_bls.y_range.start = minpow
fig_bls.y_range.end = maxpow
# Otherwise, we can just update the best_period index
minphase, maxphase = fig_folded.x_range.start, fig_folded.x_range.end
f = lc.fold(best_period, best_t0)
inwindow = (f.time > minphase) & (f.time < maxphase)
nb = int(np.ceil(inwindow.sum() / 10000))
_update_source(
f_source,
{"phase": f[inwindow].time[::nb], "flux": f[inwindow].flux[::nb]},
)
mf = model.model(lc.time, best_period, duration_slider.value, best_t0)
mf /= np.median(mf)
mask = ~(convolve(np.asarray(mf == np.median(mf)), Box1DKernel(2)) > 0.9)
model_lc = _to_lc(lc.time[mask], mf[mask])
_update_source(
model_lc_source, {"time": model_lc.time, "flux": model_lc.flux}
)
f_model_lc = model_lc.fold(best_period, best_t0)
f_model_lc = _to_lc(_as_1d(f.time.min()), [1]).append(f_model_lc)
f_model_lc = f_model_lc.append(_to_lc(_as_1d(f.time.max()), [1]))
_update_source(
f_model_lc_source, {"phase": f_model_lc.time, "flux": f_model_lc.flux}
)
vertical_line.update(location=best_period.value)
fig_folded.title.text = "Period: {} days \t T0: {}{}".format(
_round_strip_unit(best_period, 7),
_round_strip_unit(best_t0, 7),
time_format,
)
text_output.text = "Period: {} days, \t T0: {}{}".format(
_round_strip_unit(best_period, 7),
_round_strip_unit(best_t0, 7),
time_format,
)
# Callbacks
def _update_upon_period_selection(attr, old, new):
"""When we select a period we should just update a few things, but we should not recalculate model"""
if len(new) > 0:
new = new[0]
best_period = (
bls_source.data["period"][new] * bls_source_units["period"]
)
best_t0 = Time(
bls_source.data["transit_time"][new],
format=bls_source_units["transit_time_format"],
scale=bls_source_units["transit_time_scale"],
)
_update_params(best_period=best_period, best_t0=best_t0)
def _update_model_slider(attr, old, new):
"""If the duration slider is updated, then update the whole model set."""
_update_params(all=True)
def _update_model_slider_EVENT(event):
"""If we update the duration slider, we should update the whole model set.
This is the same as the _update_model_slider but it has a different call signature...
"""
_update_params(all=True)
def _double_period_event():
fig_bls.x_range.start *= 2
fig_bls.x_range.end *= 2
_update_params(all=True)
def _half_period_event():
fig_bls.x_range.start /= 2
fig_bls.x_range.end /= 2
_update_params(all=True)
# Help Hover Call Backs
def _update_folded_plot_help_reset(event):
f_help_source.data["phase"] = [_at_ratio(f.time, 0.95)]
f_help_source.data["flux"] = [_at_ratio(f.flux, 0.95)]
def _update_folded_plot_help(event):
f_help_source.data["phase"] = [_at_ratio(fig_folded.x_range, 0.95)]
f_help_source.data["flux"] = [_at_ratio(fig_folded.y_range, 0.95)]
def _update_lc_plot_help_reset(event):
lc_help_source.data["time"] = [_at_ratio(lc.time, 0.98)]
lc_help_source.data["flux"] = [_at_ratio(lc.flux, 0.95)]
def _update_lc_plot_help(event):
lc_help_source.data["time"] = [_at_ratio(fig_lc.x_range, 0.98)]
lc_help_source.data["flux"] = [_at_ratio(fig_lc.y_range, 0.95)]
def _update_bls_plot_help_event(event):
# cannot use _at_ratio helper for period, because period is log scaled.
bls_help_source.data["period"] = [
bls_source.data["period"][int(npoints_slider.value * 0.95)]
]
bls_help_source.data["power"] = [_at_ratio(bls_source.data["power"], 0.98)]
def _update_bls_plot_help(attr, old, new):
bls_help_source.data["period"] = [
bls_source.data["period"][int(npoints_slider.value * 0.95)]
]
bls_help_source.data["power"] = [_at_ratio(bls_source.data["power"], 0.98)]
# Create all the figures.
fig_folded = make_folded_figure_elements(
f, f_model_lc, f_source, f_model_lc_source, f_help_source
)
fig_folded.title.text = "Period: {} days \t T0: {}{}".format(
_round_strip_unit(best_period, 7),
_round_strip_unit(best_t0, 5),
time_format,
)
fig_bls, vertical_line = make_bls_figure_elements(
result, bls_source, bls_help_source
)
fig_lc = make_lightcurve_figure_elements(
lc, model_lc, lc_source, model_lc_source, lc_help_source
)
# Map changes
# If we click a new period, update
bls_source.selected.on_change("indices", _update_upon_period_selection)
# If we change the duration, update everything, including help button for BLS
duration_slider.on_change("value", _update_model_slider)
duration_slider.on_change("value", _update_bls_plot_help)
# If we increase resolution, update everything
npoints_slider.on_change("value", _update_model_slider)
# Make sure the vertical line always goes to the best period.
vertical_line.update(location=best_period.value)
# If we pan in the BLS panel, update everything
fig_bls.on_event(PanEnd, _update_model_slider_EVENT)
fig_bls.on_event(Reset, _update_model_slider_EVENT)
# If we pan in the LC panel, rebin the points
fig_lc.on_event(PanEnd, _update_light_curve_plot)
fig_lc.on_event(Reset, _update_light_curve_plot)
# If we pan in the Folded panel, rebin the points
fig_folded.on_event(PanEnd, _update_folded_plot)
fig_folded.on_event(Reset, _update_folded_plot)
# Deal with help button
fig_bls.on_event(PanEnd, _update_bls_plot_help_event)
fig_bls.on_event(Reset, _update_bls_plot_help_event)
fig_folded.on_event(PanEnd, _update_folded_plot_help)
fig_folded.on_event(Reset, _update_folded_plot_help_reset)
fig_lc.on_event(PanEnd, _update_lc_plot_help)
fig_lc.on_event(Reset, _update_lc_plot_help_reset)
# Buttons
double_button.on_click(_double_period_event)
half_button.on_click(_half_period_event)
# Layout the widget
doc.add_root(
layout(
[
[fig_bls, fig_folded],
fig_lc,
[
Spacer(width=70),
duration_slider,
Spacer(width=50),
npoints_slider,
],
[
Spacer(width=70),
double_button,
Spacer(width=70),
half_button,
Spacer(width=300),
text_output,
],
]
)
)
# TODO: pre-process LC
lc = _preprocess_lc_for_bls(lc)
output_notebook(verbose=False, hide_banner=True)
return show(_create_interact_ui, notebook_url=notebook_url)
| 37,534
| 36.348259
| 202
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/targetpixelfile.py
|
"""Defines TargetPixelFile, KeplerTargetPixelFile, and TessTargetPixelFile."""
from __future__ import division
import datetime
import os
import warnings
import logging
import collections
from astropy.io import fits
from astropy.io.fits import Undefined, BinTableHDU
from astropy.nddata import Cutout2D
from astropy.table import Table
from astropy.wcs import WCS
from astropy.utils.exceptions import AstropyWarning
from astropy.coordinates import SkyCoord
from astropy.stats.funcs import median_absolute_deviation as MAD
from astropy.utils.decorators import deprecated
from astropy.time import Time
from astropy.units import Quantity
import astropy.units as u
import matplotlib
from matplotlib import animation
from matplotlib import patches
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import numpy as np
from scipy.ndimage import label
from tqdm import tqdm
from copy import deepcopy
from . import PACKAGEDIR, MPLSTYLE
from .lightcurve import LightCurve, KeplerLightCurve, TessLightCurve
from .prf import KeplerPRF
from .utils import (
KeplerQualityFlags,
TessQualityFlags,
plot_image,
LightkurveWarning,
LightkurveDeprecationWarning,
validate_method,
centroid_quadratic,
_query_solar_system_objects,
)
from .io import detect_filetype
__all__ = ["KeplerTargetPixelFile", "TessTargetPixelFile"]
log = logging.getLogger(__name__)
# OPEN: consider to move to utils and
# consolidate with the helper in lightcurve.py (for time label)
_TIME_LABEL_DICT_BRIEF = {"": "Phase", "bkjd": "[BKJD days]", "btjd": "[BTJD days]"}
def _time_label_brief(time):
format = getattr(time, "format", "")
return _TIME_LABEL_DICT_BRIEF.get(format, format.upper())
class HduToMetaMapping(collections.abc.Mapping):
"""Provides a read-only view of HDU header in `astropy.timeseries.TimeSeries.meta` format"""
def __init__(self, hdu):
# use OrderedDict rather than simple dict for 2 reasons:
# 1. more friendly __repr__ and __str__
# 2. make the behavior between a TPF and a LC is more consistent.
# (LightCurve.meta is an OrderedDict)
self._dict = collections.OrderedDict()
self._dict.update(hdu.header)
def __getitem__(self, key):
return self._dict[key]
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict)
def __repr__(self):
return self._dict.__repr__()
def __str__(self):
return self._dict.__str__()
class TargetPixelFile(object):
"""Abstract class representing FITS files which contain time series imaging data.
You should probably not be using this abstract class directly;
see `KeplerTargetPixelFile` and `TessTargetPixelFile` instead.
"""
def __init__(self, path, quality_bitmask="default", targetid=None, **kwargs):
self.path = path
if isinstance(path, fits.HDUList):
self.hdu = path
else:
with fits.open(self.path, **kwargs) as hdulist:
self.hdu = deepcopy(hdulist)
self.quality_bitmask = quality_bitmask
self.targetid = targetid
# For consistency with `LightCurve`, provide a `meta` dictionary
self.meta = HduToMetaMapping(self.hdu[0])
def __getitem__(self, key):
"""Implements indexing and slicing.
Note: the implementation below cannot be be simplified using
`copy[1].data = copy[1].data[self.quality_mask][key]`
due to the complicated behavior of AstroPy's `FITS_rec`.
"""
# Step 1: determine the indexes of the data to return.
# We start by determining the indexes of the good-quality cadences.
quality_idx = np.where(self.quality_mask)[0]
# Then we apply the index or slice to the good-quality indexes.
if isinstance(key, int):
# Ensure we always have a range; this is necessary to ensure
# that we always ge a `FITS_rec` instead of a `FITS_record` below.
if key == -1:
selected_idx = quality_idx[key:]
else:
selected_idx = quality_idx[key : key + 1]
else:
selected_idx = quality_idx[key]
# Step 2: use the indexes to create a new copy of the data.
with warnings.catch_warnings():
# Ignore warnings about empty fields
warnings.simplefilter("ignore", UserWarning)
# AstroPy added `HDUList.copy()` in v3.1, allowing us to avoid manually
# copying the HDUs, which brought along unexpected memory leaks.
copy = self.hdu.copy()
copy[1] = BinTableHDU(
data=self.hdu[1].data[selected_idx], header=self.hdu[1].header
)
return self.__class__(
copy, quality_bitmask=self.quality_bitmask, targetid=self.targetid
)
def __len__(self):
return len(self.time)
def __add__(self, other):
if isinstance(other, Quantity):
other = other.value
hdu = deepcopy(self.hdu)
hdu[1].data["FLUX"][self.quality_mask] += other
return type(self)(hdu, quality_bitmask=self.quality_bitmask)
def __mul__(self, other):
if isinstance(other, Quantity):
other = other.value
hdu = deepcopy(self.hdu)
hdu[1].data["FLUX"][self.quality_mask] *= other
hdu[1].data["FLUX_ERR"][self.quality_mask] *= other
return type(self)(hdu, quality_bitmask=self.quality_bitmask)
def __rtruediv__(self, other):
if isinstance(other, Quantity):
other = other.value
hdu = deepcopy(self.hdu)
hdu[1].data["FLUX"][self.quality_mask] /= other
hdu[1].data["FLUX_ERR"][self.quality_mask] /= other
return type(self)(hdu, quality_bitmask=self.quality_bitmask)
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
return self.__add__(-1 * other)
def __rsub__(self, other):
return (-1 * self).__add__(other)
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self.__mul__(1.0 / other)
def __div__(self, other):
return self.__truediv__(other)
def __rdiv__(self, other):
return self.__rtruediv__(other)
@property
@deprecated("2.0", alternative="time", warning_type=LightkurveDeprecationWarning)
def astropy_time(self):
"""Returns an AstroPy Time object for all good-quality cadences."""
return self.time
@property
def hdu(self):
return self._hdu
@hdu.setter
def hdu(self, value, keys=("FLUX", "QUALITY")):
"""Verify the file format when setting the value of `self.hdu`.
Raises a ValueError if `value` does not appear to be a Target Pixel File.
"""
for key in keys:
if ~(
np.any(
[
value[1].header[ttype] == key
for ttype in value[1].header["TTYPE*"]
]
)
):
raise ValueError(
"File {} does not have a {} column, "
"is this a target pixel file?".format(self.path, key)
)
self._hdu = value
def get_keyword(self, keyword, hdu=0, default=None):
"""Returns a header keyword value.
If the keyword is Undefined or does not exist,
then return ``default`` instead.
"""
return self.hdu[hdu].header.get(keyword, default)
@property
@deprecated(
"2.0", alternative="get_header()", warning_type=LightkurveDeprecationWarning
)
def header(self):
"""DEPRECATED. Please use ``get_header()`` instead."""
return self.hdu[0].header
def get_header(self, ext=0):
"""Returns the metadata embedded in the file.
Target Pixel Files contain embedded metadata headers spread across three
different FITS extensions:
1. The "PRIMARY" extension (``ext=0``) provides a metadata header
providing details on the target and its CCD position.
2. The "PIXELS" extension (``ext=1``) provides details on the
data column and their coordinate system (WCS).
3. The "APERTURE" extension (``ext=2``) provides details on the
aperture pixel mask and the expected coordinate system (WCS).
Parameters
----------
ext : int or str
FITS extension name or number.
Returns
-------
header : `~astropy.io.fits.header.Header`
Header object containing metadata keywords.
"""
return self.hdu[ext].header
@property
def ra(self):
"""Right Ascension of target ('RA_OBJ' header keyword)."""
return self.get_keyword("RA_OBJ")
@property
def dec(self):
"""Declination of target ('DEC_OBJ' header keyword)."""
return self.get_keyword("DEC_OBJ")
@property
def column(self):
"""CCD pixel column number ('1CRV5P' header keyword)."""
return self.get_keyword("1CRV5P", hdu=1, default=0)
@property
def row(self):
"""CCD pixel row number ('2CRV5P' header keyword)."""
return self.get_keyword("2CRV5P", hdu=1, default=0)
@property
def pos_corr1(self):
"""Returns the column position correction."""
return self.hdu[1].data["POS_CORR1"][self.quality_mask]
@property
def pos_corr2(self):
"""Returns the row position correction."""
return self.hdu[1].data["POS_CORR2"][self.quality_mask]
@property
def pipeline_mask(self):
"""Returns the optimal aperture mask used by the pipeline.
If the aperture extension is missing from the file, a mask
composed of all `True` values will be returned.
"""
# Both Kepler and TESS flag the pixels in the optimal aperture using
# bit number 2 in the aperture mask extension, e.g. see Section 6 of
# the TESS Data Products documentation (EXP-TESS-ARC-ICD-TM-0014.pdf).
try:
return self.hdu[2].data & 2 > 0
except (IndexError, TypeError):
# `IndexError` may be raised if the aperture extension (#2) is missing
# `TypeError` may be raised because early versions of TESScut returned floats in HDU 2
return np.ones(self.hdu[1].data["FLUX"][0].shape, dtype=bool)
@property
def shape(self):
"""Return the cube dimension shape."""
return self.flux.shape
@property
def time(self) -> Time:
"""Returns the time for all good-quality cadences."""
time_values = self.hdu[1].data["TIME"][self.quality_mask]
# Some data products have missing time values;
# we need to set these to zero or `Time` cannot be instantiated.
time_values[~np.isfinite(time_values)] = 0
bjdrefi = self.hdu[1].header.get("BJDREFI")
if bjdrefi == 2454833:
time_format = "bkjd"
elif bjdrefi == 2457000:
time_format = "btjd"
else:
time_format = "jd"
return Time(
time_values,
scale=self.hdu[1].header.get("TIMESYS", "tdb").lower(),
format=time_format,
)
@property
def cadenceno(self):
"""Return the cadence number for all good-quality cadences."""
cadenceno = self.hdu[1].data["CADENCENO"][self.quality_mask]
# The TESScut service returns an array of zeros as CADENCENO.
# If this is the case, return frame numbers from 0 instead.
if cadenceno[0] == 0:
return np.arange(0, len(cadenceno), 1, dtype=int)
return cadenceno
@property
def nan_time_mask(self):
"""Returns a boolean mask flagging cadences whose time is `nan`."""
return self.time.value == 0
@property
def flux(self) -> Quantity:
"""Returns the flux for all good-quality cadences."""
unit = None
if self.get_header(1).get("TUNIT5") == "e-/s":
unit = "electron/s"
return Quantity(self.hdu[1].data["FLUX"][self.quality_mask], unit=unit)
@property
def flux_err(self) -> Quantity:
"""Returns the flux uncertainty for all good-quality cadences."""
unit = None
if self.get_header(1).get("TUNIT6") == "e-/s":
unit = "electron/s"
return Quantity(self.hdu[1].data["FLUX_ERR"][self.quality_mask], unit=unit)
@property
def flux_bkg(self) -> Quantity:
"""Returns the background flux for all good-quality cadences."""
return Quantity(
self.hdu[1].data["FLUX_BKG"][self.quality_mask], unit="electron/s"
)
@property
def flux_bkg_err(self) -> Quantity:
return Quantity(
self.hdu[1].data["FLUX_BKG_ERR"][self.quality_mask], unit="electron/s"
)
@property
def quality(self):
"""Returns the quality flag integer of every good cadence."""
return self.hdu[1].data["QUALITY"][self.quality_mask]
@property
def wcs(self) -> WCS:
"""Returns an `astropy.wcs.WCS` object with the World Coordinate System
solution for the target pixel file.
Returns
-------
w : `astropy.wcs.WCS` object
WCS solution
"""
if "MAST" in self.hdu[0].header["ORIGIN"]: # Is it a TessCut TPF?
# TPF's generated using the TESSCut service in early 2019 only appear
# to contain a valid WCS in the second extension (the aperture
# extension), so we treat such files as a special case.
return WCS(self.hdu[2])
else:
# For standard (Ames-pipeline-produced) TPF files, we use the WCS
# keywords provided in the first extension (the data table extension).
# Specifically, we use the WCS keywords for the 5th data column (FLUX).
wcs_keywords = {
"1CTYP5": "CTYPE1",
"2CTYP5": "CTYPE2",
"1CRPX5": "CRPIX1",
"2CRPX5": "CRPIX2",
"1CRVL5": "CRVAL1",
"2CRVL5": "CRVAL2",
"1CUNI5": "CUNIT1",
"2CUNI5": "CUNIT2",
"1CDLT5": "CDELT1",
"2CDLT5": "CDELT2",
"11PC5": "PC1_1",
"12PC5": "PC1_2",
"21PC5": "PC2_1",
"22PC5": "PC2_2",
"NAXIS1": "NAXIS1",
"NAXIS2": "NAXIS2",
}
mywcs = {}
for oldkey, newkey in wcs_keywords.items():
if self.hdu[1].header.get(oldkey, None) is not None:
mywcs[newkey] = self.hdu[1].header[oldkey]
return WCS(mywcs)
def get_coordinates(self, cadence="all"):
"""Returns two 3D arrays of RA and Dec values in decimal degrees.
If cadence number is given, returns 2D arrays for that cadence. If
cadence is 'all' returns one RA, Dec value for each pixel in every cadence.
Uses the WCS solution and the POS_CORR data from TPF header.
Parameters
----------
cadence : 'all' or int
Which cadences to return the RA Dec coordinates for.
Returns
-------
ra : numpy array, same shape as tpf.flux[cadence]
Array containing RA values for every pixel, for every cadence.
dec : numpy array, same shape as tpf.flux[cadence]
Array containing Dec values for every pixel, for every cadence.
"""
w = self.wcs
X, Y = np.meshgrid(np.arange(self.shape[2]), np.arange(self.shape[1]))
pos_corr1_pix = np.copy(self.hdu[1].data["POS_CORR1"])
pos_corr2_pix = np.copy(self.hdu[1].data["POS_CORR2"])
# We zero POS_CORR* when the values are NaN or make no sense (>50px)
with warnings.catch_warnings(): # Comparing NaNs to numbers is OK here
warnings.simplefilter("ignore", RuntimeWarning)
bad = np.any(
[
~np.isfinite(pos_corr1_pix),
~np.isfinite(pos_corr2_pix),
np.abs(pos_corr1_pix - np.nanmedian(pos_corr1_pix)) > 50,
np.abs(pos_corr2_pix - np.nanmedian(pos_corr2_pix)) > 50,
],
axis=0,
)
pos_corr1_pix[bad], pos_corr2_pix[bad] = 0, 0
# Add in POSCORRs
X = np.atleast_3d(X).transpose([2, 0, 1]) + np.atleast_3d(
pos_corr1_pix
).transpose([1, 2, 0])
Y = np.atleast_3d(Y).transpose([2, 0, 1]) + np.atleast_3d(
pos_corr2_pix
).transpose([1, 2, 0])
# Pass through WCS
ra, dec = w.wcs_pix2world(X.ravel(), Y.ravel(), 1)
ra = ra.reshape((pos_corr1_pix.shape[0], self.shape[1], self.shape[2]))
dec = dec.reshape((pos_corr2_pix.shape[0], self.shape[1], self.shape[2]))
ra, dec = ra[self.quality_mask], dec[self.quality_mask]
if cadence != "all":
return ra[cadence], dec[cadence]
return ra, dec
def show_properties(self):
"""Prints a description of all non-callable attributes.
Prints in order of type (ints, strings, lists, arrays, others).
"""
attrs = {}
for attr in dir(self):
if not attr.startswith("_") and attr != "header" and attr != "astropy_time":
res = getattr(self, attr)
if callable(res):
continue
if attr == "hdu":
attrs[attr] = {"res": res, "type": "list"}
for idx, r in enumerate(res):
if idx == 0:
attrs[attr]["print"] = "{}".format(r.header["EXTNAME"])
else:
attrs[attr]["print"] = "{}, {}".format(
attrs[attr]["print"], "{}".format(r.header["EXTNAME"])
)
continue
else:
attrs[attr] = {"res": res}
if isinstance(res, int):
attrs[attr]["print"] = "{}".format(res)
attrs[attr]["type"] = "int"
elif isinstance(res, np.ndarray):
attrs[attr]["print"] = "array {}".format(res.shape)
attrs[attr]["type"] = "array"
elif isinstance(res, list):
attrs[attr]["print"] = "list length {}".format(len(res))
attrs[attr]["type"] = "list"
elif isinstance(res, str):
if res == "":
attrs[attr]["print"] = "{}".format("None")
else:
attrs[attr]["print"] = "{}".format(res)
attrs[attr]["type"] = "str"
elif attr == "wcs":
attrs[attr]["print"] = "astropy.wcs.wcs.WCS"
attrs[attr]["type"] = "other"
else:
attrs[attr]["print"] = "{}".format(type(res))
attrs[attr]["type"] = "other"
output = Table(names=["Attribute", "Description"], dtype=[object, object])
idx = 0
types = ["int", "str", "list", "array", "other"]
for typ in types:
for attr, dic in attrs.items():
if dic["type"] == typ:
output.add_row([attr, dic["print"]])
idx += 1
output.pprint(max_lines=-1, max_width=-1)
def to_lightcurve(self, method="sap", corrector=None, **kwargs):
"""Performs photometry on the pixel data and returns a LightCurve object.
The valid keyword arguments depends on the method chosen:
- 'sap' or 'aperture': see the docstring of `extract_aperture_photometry()`
- 'prf': see the docstring of `extract_prf_photometry()`
- 'pld': see the docstring of `to_corrector()`
For methods 'sff' and 'cbv', they are syntactic shortcuts of:
- creating a lightcurve using 'sap' method,
- corrects the created lightcurve using `LightCurve.to_corrector()`
of the respective method.
Parameters
----------
method : 'aperture', 'prf', 'sap', 'sff', 'cbv', 'pld'.
Photometry method to use. 'aperture' is an alias of 'sap'.
**kwargs : dict
Extra arguments to be passed to the `extract_aperture_photometry()`, the
`extract_prf_photometry()`, or the `to_corrector()` method of this class.
Returns
-------
lc : LightCurve object
Object containing the resulting lightcurve.
"""
method = validate_method(method, supported_methods=["aperture", "prf", "sap", "sff", "cbv", "pld"])
if method in ["aperture", "sap"]:
return self.extract_aperture_photometry(**kwargs)
elif method == "prf":
return self.prf_lightcurve(**kwargs)
elif method in ["sff", "cbv"]:
lc = self.extract_aperture_photometry(**kwargs)
return lc.to_corrector(method).correct()
elif method == "pld":
return self.to_corrector("pld", **kwargs).correct()
def _resolve_default_aperture_mask(self, aperture_mask):
if isinstance(aperture_mask, str) and (aperture_mask == "default"):
# returns 'pipeline', unless it is missing. Falls back to 'threshold'
return "pipeline" if np.any(self.pipeline_mask) else "threshold"
else:
return aperture_mask
def _parse_aperture_mask(self, aperture_mask):
"""Parse the `aperture_mask` parameter as given by a user.
The `aperture_mask` parameter is accepted by a number of methods.
This method ensures that the parameter is always parsed in the same way.
Parameters
----------
aperture_mask : array-like, 'pipeline', 'all', 'threshold', 'default',
'background', or None
A boolean array describing the aperture such that `True` means
that the pixel will be used.
If None or 'all' are passed, all pixels will be used.
If 'pipeline' is passed, the mask suggested by the official pipeline
will be returned.
If 'threshold' is passed, all pixels brighter than 3-sigma above
the median flux will be used.
If 'default' is passed, 'pipeline' mask will be used when available,
with 'threshold' as the fallback.
If 'background' is passed, all pixels fainter than the median flux
will be used.
If 'empty' is passed, no pixels will be used.
Returns
-------
aperture_mask : ndarray
2D boolean numpy array containing `True` for selected pixels.
"""
aperture_mask = self._resolve_default_aperture_mask(aperture_mask)
# If 'pipeline' mask is requested but missing, fall back to 'threshold'
if (
isinstance(aperture_mask, str)
and (aperture_mask == "pipeline")
and ~np.any(self.pipeline_mask)
):
raise ValueError(
"_parse_aperture_mask: 'pipeline' is requested, but it is missing or empty."
)
# Input validation
if hasattr(aperture_mask, "shape") and (
aperture_mask.shape != self.flux[0].shape
):
raise ValueError(
"`aperture_mask` has shape {}, "
"but the flux data has shape {}"
"".format(aperture_mask.shape, self.flux[0].shape)
)
with warnings.catch_warnings():
# `aperture_mask` supports both arrays and string values; these yield
# uninteresting FutureWarnings when compared, so let's ignore that.
warnings.simplefilter(action="ignore", category=FutureWarning)
if aperture_mask is None or aperture_mask == "all":
aperture_mask = np.ones((self.shape[1], self.shape[2]), dtype=bool)
elif aperture_mask == "pipeline":
aperture_mask = self.pipeline_mask
elif aperture_mask == "threshold":
aperture_mask = self.create_threshold_mask()
elif aperture_mask == "background":
aperture_mask = ~self.create_threshold_mask(
threshold=0, reference_pixel=None
)
elif aperture_mask == "empty":
aperture_mask = np.zeros((self.shape[1], self.shape[2]), dtype=bool)
elif (
np.issubdtype(aperture_mask.dtype, np.int_)
and ((aperture_mask & 2) == 2).any()
):
# Kepler and TESS pipeline style integer flags
aperture_mask = (aperture_mask & 2) == 2
elif isinstance(aperture_mask.flat[0], (np.integer, np.float_)):
aperture_mask = aperture_mask.astype(bool)
self._last_aperture_mask = aperture_mask
return aperture_mask
def create_threshold_mask(self, threshold=3, reference_pixel="center"):
"""Returns an aperture mask creating using the thresholding method.
This method will identify the pixels in the TargetPixelFile which show
a median flux that is brighter than `threshold` times the standard
deviation above the overall median. The standard deviation is estimated
in a robust way by multiplying the Median Absolute Deviation (MAD)
with 1.4826.
If the thresholding method yields multiple contiguous regions, then
only the region closest to the (col, row) coordinate specified by
`reference_pixel` is returned. For exmaple, `reference_pixel=(0, 0)`
will pick the region closest to the bottom left corner.
By default, the region closest to the center of the mask will be
returned. If `reference_pixel=None` then all regions will be returned.
Parameters
----------
threshold : float
A value for the number of sigma by which a pixel needs to be
brighter than the median flux to be included in the aperture mask.
reference_pixel: (int, int) tuple, 'center', or None
(col, row) pixel coordinate closest to the desired region.
For example, use `reference_pixel=(0,0)` to select the region
closest to the bottom left corner of the target pixel file.
If 'center' (default) then the region closest to the center pixel
will be selected. If `None` then all regions will be selected.
Returns
-------
aperture_mask : ndarray
2D boolean numpy array containing `True` for pixels above the
threshold.
"""
if reference_pixel == "center":
reference_pixel = (self.shape[2] / 2, self.shape[1] / 2)
# Calculate the median image
with warnings.catch_warnings():
warnings.simplefilter("ignore")
median_image = np.nanmedian(self.flux, axis=0)
vals = median_image[np.isfinite(median_image)].flatten()
# Calculate the theshold value in flux units
mad_cut = (1.4826 * MAD(vals) * threshold) + np.nanmedian(median_image)
# Create a mask containing the pixels above the threshold flux
threshold_mask = np.nan_to_num(median_image) >= mad_cut
if (reference_pixel is None) or (not threshold_mask.any()):
# return all regions above threshold
return threshold_mask
else:
# Return only the contiguous region closest to `region`.
# First, label all the regions:
labels = label(threshold_mask)[0]
# For all pixels above threshold, compute distance to reference pixel:
label_args = np.argwhere(labels > 0)
distances = [
np.hypot(crd[0], crd[1])
for crd in label_args
- np.array([reference_pixel[1], reference_pixel[0]])
]
# Which label corresponds to the closest pixel?
closest_arg = label_args[np.argmin(distances)]
closest_label = labels[closest_arg[0], closest_arg[1]]
return labels == closest_label
def estimate_background(self, aperture_mask="background"):
"""Returns an estimate of the median background level in the FLUX column.
In the case of official Kepler and TESS Target Pixel Files, the
background estimates should be close to zero because these products
have already been background-subtracted by the pipeline (i.e. the values
in the `FLUX_BKG` column have been subtracted from the values in `FLUX`).
Background subtraction is often imperfect however, and this method aims
to allow users to estimate residual background signals using different
methods.
Target Pixel Files created by the MAST TESSCut service have
not been background-subtracted. For such products, or other community-
generated pixel files, this method provides a first-order estimate of
the background levels.
This method estimates the per-pixel background flux over time by
computing the median pixel value across the `aperture mask`.
Parameters
----------
aperture_mask : 'background', 'all', or array-like
Which pixels should be used to estimate the background?
If None or 'all' are passed, all pixels in the pixel file will be
used. If 'background' is passed, all pixels fainter than the
median flux will be used. Alternatively, users can pass a boolean
array describing the aperture mask such that `True` means that the
pixel will be used.
Returns
-------
lc : `LightCurve` object
Median background flux in units electron/second/pixel.
"""
mask = self._parse_aperture_mask(aperture_mask)
# For each cadence, compute the median pixel flux across the background
simple_bkg = np.nanmedian(self.flux[:, mask], axis=1) / u.pixel
return LightCurve(time=self.time, flux=simple_bkg)
def estimate_centroids(self, aperture_mask="default", method="moments"):
"""Returns the flux center of an object inside ``aperture_mask``.
Telescopes tend to smear out the light from a point-like star over
multiple pixels. For this reason, it is common to estimate the position
of a star by computing the *geometric center* of its image.
Astronomers refer to this position as the *centroid* of the object,
i.e. the term *centroid* is often used as a generic synonym to refer
to the measured position of an object in a telescope exposure.
This function provides two methods to estimate the position of a star:
* `method='moments'` will compute the "center of mass" of the light
based on the 2D image moments of the pixels inside ``aperture_mask``.
* `method='quadratic'` will fit a two-dimensional, second-order
polynomial to the 3x3 patch of pixels centered on the brightest pixel
inside the ``aperture_mask``, and return the peak of that polynomial.
Following Vakili & Hogg 2016 (ArXiv:1610.05873, Section 3.2).
Parameters
----------
aperture_mask : 'pipeline', 'threshold', 'all', 'default', or array-like
Which pixels contain the object to be measured, i.e. which pixels
should be used in the estimation? If None or 'all' are passed,
all pixels in the pixel file will be used.
If 'pipeline' is passed, the mask suggested by the official pipeline
will be returned.
If 'threshold' is passed, all pixels brighter than 3-sigma above
the median flux will be used.
If 'default' is passed, 'pipeline' mask will be used when available,
with 'threshold' as the fallback.
Alternatively, users can pass a boolean array describing the
aperture mask such that `True` means that the pixel will be used.
method : 'moments' or 'quadratic'
Defines which method to use to estimate the centroids. 'moments'
computes the centroid based on the sample moments of the data.
'quadratic' fits a 2D polynomial to the data and returns the
coordinate of the peak of that polynomial.
Returns
-------
columns, rows : `~astropy.units.Quantity`, `~astropy.units.Quantity`
Arrays containing the column and row positions for the centroid
for each cadence, or NaN for cadences where the estimation failed.
"""
method = validate_method(method, ["moments", "quadratic"])
if method == "moments":
return self._estimate_centroids_via_moments(aperture_mask=aperture_mask)
elif method == "quadratic":
return self._estimate_centroids_via_quadratic(aperture_mask=aperture_mask)
def _estimate_centroids_via_moments(self, aperture_mask):
"""Compute the "center of mass" of the light based on the 2D moments;
this is a helper method for `estimate_centroids()`."""
aperture_mask = self._parse_aperture_mask(aperture_mask)
yy, xx = np.indices(self.shape[1:])
yy = self.row + yy
xx = self.column + xx
total_flux = np.nansum(self.flux[:, aperture_mask], axis=1)
with warnings.catch_warnings():
# RuntimeWarnings may occur below if total_flux contains zeros
warnings.simplefilter("ignore", RuntimeWarning)
col_centr = (
np.nansum(xx * aperture_mask * self.flux, axis=(1, 2)) / total_flux
)
row_centr = (
np.nansum(yy * aperture_mask * self.flux, axis=(1, 2)) / total_flux
)
return col_centr * u.pixel, row_centr * u.pixel
def _estimate_centroids_via_quadratic(self, aperture_mask):
"""Estimate centroids by fitting a 2D quadratic to the brightest pixels;
this is a helper method for `estimate_centroids()`."""
aperture_mask = self._parse_aperture_mask(aperture_mask)
col_centr, row_centr = [], []
for idx in range(len(self.time)):
col, row = centroid_quadratic(self.flux[idx], mask=aperture_mask)
col_centr.append(col)
row_centr.append(row)
col_centr = np.asfarray(col_centr) + self.column
row_centr = np.asfarray(row_centr) + self.row
col_centr = Quantity(col_centr, unit="pixel")
row_centr = Quantity(row_centr, unit="pixel")
return col_centr, row_centr
def _aperture_photometry(
self, aperture_mask, flux_method="sum", centroid_method="moments"
):
"""Helper method for ``extract_aperture photometry``.
Returns
-------
flux, flux_err, centroid_col, centroid_row
"""
# Validate the aperture mask
apmask = self._parse_aperture_mask(aperture_mask)
if apmask.sum() == 0:
log.warning("Warning: aperture mask contains zero pixels.")
# Estimate centroids
centroid_col, centroid_row = self.estimate_centroids(
apmask, method=centroid_method
)
# Estimate flux
if flux_method == "sum":
flux = np.nansum(self.flux[:, apmask], axis=1)
elif flux_method == "median":
flux = np.nanmedian(self.flux[:, apmask], axis=1)
elif flux_method == "mean":
flux = np.nanmean(self.flux[:, apmask], axis=1)
else:
raise ValueError("`flux_method` must be one of 'sum', 'median', or 'mean'.")
# In the future we may wish to add a user specified function
# We use ``np.nansum`` above to be robust against a subset of pixels
# being NaN, however if *all* pixels are NaN, we propagate a NaN.
is_allnan = ~np.any(np.isfinite(self.flux[:, apmask]), axis=1)
flux[is_allnan] = np.nan
# Similarly, if *all* pixel values across the TPF are exactly zero,
# we propagate NaN (cf. #873 for an example of this happening)
is_allzero = np.all(self.flux == 0, axis=(1, 2))
flux[is_allzero] = np.nan
# Estimate flux_err
with warnings.catch_warnings():
# Ignore warnings due to negative errors
warnings.simplefilter("ignore", RuntimeWarning)
if flux_method == "sum":
flux_err = np.nansum(self.flux_err[:, apmask] ** 2, axis=1) ** 0.5
elif flux_method == "median":
flux_err = np.nanmedian(self.flux_err[:, apmask] ** 2, axis=1) ** 0.5
elif flux_method == "mean":
flux_err = np.nanmean(self.flux_err[:, apmask] ** 2, axis=1) ** 0.5
is_allnan = ~np.any(np.isfinite(self.flux_err[:, apmask]), axis=1)
flux_err[is_allnan] = np.nan
if self.get_header(1).get("TUNIT5") == "e-/s":
flux = Quantity(flux, unit="electron/s")
if self.get_header(1).get("TUNIT6") == "e-/s":
flux_err = Quantity(flux_err, unit="electron/s")
return flux, flux_err, centroid_col, centroid_row
def query_solar_system_objects(
self,
cadence_mask="outliers",
radius=None,
sigma=3,
cache=True,
return_mask=False,
show_progress=True
):
"""Returns a list of asteroids or comets which affected the target pixel files.
Light curves of stars or galaxies are frequently affected by solar
system bodies (e.g. asteroids, comets, planets). These objects can move
across a target's photometric aperture mask on time scales of hours to
days. When they pass through a mask, they tend to cause a brief spike
in the brightness of the target. They can also cause dips by moving
through a local background aperture mask (if any is used).
The artifical spikes and dips introduced by asteroids are frequently
confused with stellar flares, planet transits, etc. This method helps
to identify false signals injects by asteroids by providing a list of
the solar system objects (name, brightness, time) that passed in the
vicinity of the target during the span of the light curve.
This method queries the `SkyBot API <http://vo.imcce.fr/webservices/skybot/>`_,
which returns a list of asteroids/comets/planets given a location, time,
and search cone.
Notes
-----
* This method will use the `ra` and `dec` properties of the `LightCurve`
object to determine the position of the search cone.
* The size of the search cone is 5 spacecraft pixels + TPF dimension by default. You
can change this by passing the `radius` parameter (unit: degrees).
* By default, this method will only search points in time during which the light
curve showed 3-sigma outliers in flux. You can override this behavior
and search for specific times by passing `cadence_mask`. See examples for details.
Parameters
----------
cadence_mask : str, or boolean array with length of self.time
mask in time to select which frames or points should be searched for SSOs.
Default "outliers" will search for SSOs at points that are `sigma` from the mean.
"all" will search all cadences. Alternatively, pass a boolean array with values of "True"
for times to search for SSOs.
radius : optional, float
Radius to search for bodies. If None, will search for SSOs within 5 pixels of
all pixels in the TPF.
sigma : optional, float
If `cadence_mask` is set to `"outlier"`, `sigma` will be used to identify
outliers.
cache : optional, bool
If True will cache the search result in the astropy cache. Set to False
to request the search again.
return_mask: optional, bool
If True will return a boolean mask in time alongside the result
show_progress: optional, bool
If True will display a progress bar during the download
Returns
-------
result : pandas.DataFrame
DataFrame containing the list objects in frames that were identified to contain
SSOs.
Examples
--------
Find if there are SSOs affecting the target pixel file for the given time frame:
>>> df_sso = tpf.query_solar_system_objects(cadence_mask=(tpf.time.value >= 2014.1) & (tpf.time.value <= 2014.9)) # doctest: +SKIP
Find if there are SSOs affecting the target pixel file for all times, but it will be much slower:
>>> df_sso = tpf.query_solar_system_objects(cadence_mask='all') # doctest: +SKIP
"""
for attr in ["mission", "ra", "dec"]:
if not hasattr(self, "{}".format(attr)):
raise ValueError("Input does not have a `{}` attribute.".format(attr))
location = self.mission.lower()
if isinstance(cadence_mask, str):
if cadence_mask == "outliers":
aper = self.pipeline_mask
if aper.sum() == 0:
aper = "all"
lc = self.to_lightcurve(aperture_mask=aper)
cadence_mask = lc.remove_outliers(sigma=sigma, return_mask=True)[1]
# Avoid searching times with NaN flux; this is necessary because e.g.
# `remove_outliers` includes NaNs in its mask.
cadence_mask &= ~np.isnan(lc.flux)
elif cadence_mask == "all":
cadence_mask = np.ones(len(self.time)).astype(bool)
else:
raise ValueError("invalid `cadence_mask` string argument")
elif isinstance(cadence_mask, collections.abc.Sequence):
cadence_mask = np.array(cadence_mask)
elif isinstance(cadence_mask, (bool)):
# for boundary case of a single element tuple, e.g., (True)
cadence_mask = np.array([cadence_mask])
elif not isinstance(cadence_mask, np.ndarray):
raise ValueError("Pass a cadence_mask method or a cadence_mask")
if (location == "kepler") | (location == "k2"):
pixel_scale = 4
if location == "tess":
pixel_scale = 27
if radius == None:
radius = (
2 ** 0.5 * (pixel_scale * (np.max(self.shape[1:]) + 5))
) * u.arcsecond.to(u.deg)
res = _query_solar_system_objects(
ra=self.ra,
dec=self.dec,
times=self.time.jd[cadence_mask],
location=location,
radius=radius,
cache=cache,
show_progress=show_progress,
)
if return_mask:
return res, np.in1d(self.time.jd, res.epoch)
return res
def plot(
self,
ax=None,
frame=0,
cadenceno=None,
bkg=False,
column="FLUX",
aperture_mask=None,
show_colorbar=True,
mask_color="red",
title=None,
style="lightkurve",
**kwargs,
):
"""Plot the pixel data for a single frame (i.e. at a single time).
The time can be specified by frame index number (`frame=0` will show the
first frame) or absolute cadence number (`cadenceno`).
Parameters
----------
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be generated.
frame : int
Frame number. The default is 0, i.e. the first frame.
cadenceno : int, optional
Alternatively, a cadence number can be provided.
This argument has priority over frame number.
bkg : bool
If True and `column="FLUX"`, background will be added to the pixel values.
column : str
Choose the FITS data column to be plotted. May be one of ('FLUX',
'FLUX_ERR','FLUX_BKG','FLUX_BKG_ERR','COSMIC_RAYS','RAW_CNTS').
aperture_mask : ndarray or str
Highlight pixels selected by aperture_mask.
show_colorbar : bool
Whether or not to show the colorbar
mask_color : str
Color to show the aperture mask
style : str
Path or URL to a matplotlib style file, or name of one of
matplotlib's built-in stylesheets (e.g. 'ggplot').
Lightkurve's custom stylesheet is used by default.
kwargs : dict
Keywords arguments passed to `lightkurve.utils.plot_image`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
if style == "lightkurve" or style is None:
style = MPLSTYLE
if cadenceno is not None:
try:
frame = np.argwhere(cadenceno == self.cadenceno)[0][0]
except IndexError:
raise ValueError(
"cadenceno {} is out of bounds, "
"must be in the range {}-{}.".format(
cadenceno, self.cadenceno[0], self.cadenceno[-1]
)
)
try:
if column == "FLUX":
if bkg and np.any(np.isfinite(self.flux_bkg[frame])):
data_to_plot = self.flux[frame] + self.flux_bkg[frame]
else:
data_to_plot = self.flux[frame]
else:
data_to_plot = self.hdu[1].data[column][self.quality_mask][frame]
except KeyError:
raise ValueError(
"column must be one of the following: ('FLUX','FLUX_ERR',"
"'FLUX_BKG','FLUX_BKG_ERR','COSMIC_RAYS','RAW_CNTS')"
)
except IndexError:
raise ValueError(
"frame {} is out of bounds, must be in the range "
"0-{}.".format(frame, self.shape[0])
)
# Make list of preset colour labels
clabels = {
"FLUX": "Flux ($e^{-}s^{-1}$)",
"FLUX_ERR": "Flux Err. ($e^{-}s^{-1}$)",
"FLUX_BKG": "Background Flux ($e^{-}s^{-1}$)",
"FLUX_BKG_ERR": "Background Flux Err. ($e^{-}s^{-1}$)",
"COSMIC_RAYS": "Cosmic Ray Flux ($e^{-}s^{-1}$)",
"RAW_CNTS": "Raw Counts",
}
with plt.style.context(style):
if title is None:
title = "Target ID: {}, Cadence: {}".format(
self.targetid, self.cadenceno[frame]
)
# We subtract -0.5 because pixel coordinates refer to the middle of
# a pixel, e.g. (col, row) = (10.0, 20.0) is a pixel center.
img_extent = (
self.column - 0.5,
self.column + self.shape[2] - 0.5,
self.row - 0.5,
self.row + self.shape[1] - 0.5,
)
# If an axes is passed that used WCS projection, don't use img_extent
# This addresses lk issue #1095, where the tpf coordinates were incorrectly plotted
# By default ax=None
if ax != None:
if hasattr(ax, "wcs"):
img_extent = None
ax = plot_image(
data_to_plot,
ax=ax,
title=title,
extent=img_extent,
show_colorbar=show_colorbar,
clabel=clabels.get(column, column),
**kwargs,
)
ax.grid(False)
# Overlay the aperture mask if given
if aperture_mask is not None:
aperture_mask = self._parse_aperture_mask(aperture_mask)
for i in range(self.shape[1]):
for j in range(self.shape[2]):
if aperture_mask[i, j]:
if hasattr(ax, "wcs"):
# When using WCS coordinates, do not add col/row to mask coords
xy = (j - 0.5, i - 0.5)
else:
xy = (j + self.column - 0.5, i + self.row - 0.5)
rect = patches.Rectangle(
xy=xy,
width=1,
height=1,
color=mask_color,
fill=False,
hatch="//",
)
ax.add_patch(rect)
return ax
def _to_matplotlib_animation(
self, step: int = None, interval: int = 200, **plot_args
) -> "matplotlib.animation.FuncAnimation":
"""Returns a `matplotlib.animation.FuncAnimation` object.
The animation shows the flux values over time by calling `tpf.plot()` for multiple frames.
Parameters
----------
step : int
Spacing between frames. By default, the spacing will be determined such that
50 frames are shown, i.e. `step = len(tpf) // 50`. Showing more than 50 frames
will be slow on many systems.
interval : int
Delay between frames in milliseconds.
**plot_args : dict
Optional parameters passed to tpf.plot().
"""
if step is None:
step = len(self) // 50
if step < 1:
step = 1
column = plot_args.get("column", "FLUX")
ax = self.plot(**plot_args)
def init():
return ax.images
def animate(i):
frame = i * step
ax.images[0].set_data(self.hdu[1].data[column][self.quality_mask][frame])
ax.set_title(f"Frame {frame}")
return ax.images
plt.close(ax.figure) # prevent figure from showing up in interactive mode
# `blit=True` means only re-draw the parts that have changed.
frames = len(self) // step
anim = matplotlib.animation.FuncAnimation(
ax.figure,
animate,
init_func=init,
frames=frames,
interval=interval,
blit=True,
)
return anim
def animate(self, step: int = None, interval: int = 200, **plot_args):
"""Displays an interactive HTML matplotlib animation.
This feature requires a Jupyter notebook environment to display correctly.
Parameters
----------
step : int
Spacing between frames. By default, the spacing will be determined such that
50 frames are shown, i.e. `step = len(tpf) // 50`. Showing more than 50 frames
will be slow on many systems.
interval : int
Delay between frames in milliseconds.
**plot_args : dict
Optional parameters passed to tpf.plot().
"""
try:
# To make installing Lightkurve easier, ipython is an optional dependency,
# because we can assume it is installed when notebook-specific features are called
from IPython.display import HTML
return HTML(self._to_matplotlib_animation(step=step, interval=interval, **plot_args).to_jshtml())
except ModuleNotFoundError:
log.error("ipython needs to be installed for animate() to work (e.g., `pip install ipython`)")
def to_fits(self, output_fn=None, overwrite=False):
"""Writes the TPF to a FITS file on disk."""
if output_fn is None:
output_fn = "{}-targ.fits".format(self.targetid)
self.hdu.writeto(output_fn, overwrite=overwrite, checksum=True)
def interact(
self,
notebook_url="localhost:8888",
max_cadences=200000,
aperture_mask="default",
exported_filename=None,
transform_func=None,
ylim_func=None,
**kwargs,
):
"""Display an interactive Jupyter Notebook widget to inspect the pixel data.
The widget will show both the lightcurve and pixel data. By default,
the lightcurve shown is obtained by calling the `to_lightcurve()` method,
unless the user supplies a custom `LightCurve` object.
This feature requires an optional dependency, bokeh (v0.12.15 or later).
This dependency can be installed using e.g. `conda install bokeh`.
At this time, this feature only works inside an active Jupyter
Notebook, and tends to be too slow when more than ~30,000 cadences
are contained in the TPF (e.g. short cadence data).
Parameters
----------
notebook_url : str
Location of the Jupyter notebook page (default: "localhost:8888")
When showing Bokeh applications, the Bokeh server must be
explicitly configured to allow connections originating from
different URLs. This parameter defaults to the standard notebook
host and port. If you are running on a different location, you
will need to supply this value for the application to display
properly. If no protocol is supplied in the URL, e.g. if it is
of the form "localhost:8888", then "http" will be used.
max_cadences : int
Print an error message if the number of cadences shown is larger than
this value. This limit helps keep browsers from becoming unresponsive.
aperture_mask : array-like, 'pipeline', 'threshold', 'default', or 'all'
A boolean array describing the aperture such that `True` means
that the pixel will be used.
If None or 'all' are passed, all pixels will be used.
If 'pipeline' is passed, the mask suggested by the official pipeline
will be returned.
If 'threshold' is passed, all pixels brighter than 3-sigma above
the median flux will be used.
If 'default' is passed, 'pipeline' mask will be used when available,
with 'threshold' as the fallback.
exported_filename: str
An optional filename to assign to exported fits files containing
the custom aperture mask generated by clicking on pixels in interact.
The default adds a suffix '-custom-aperture-mask.fits' to the
TargetPixelFile basename.
transform_func: function
A function that transforms the lightcurve. The function takes in a
LightCurve object as input and returns a LightCurve object as output.
The function can be complex, such as detrending the lightcurve. In this
way, the interactive selection of aperture mask can be evaluated after
inspection of the transformed lightcurve. The transform_func is applied
before saving a fits file. Default: None (no transform is applied).
ylim_func: function
A function that returns ylimits (low, high) given a LightCurve object.
The default is to return an expanded window around the 10-90th
percentile of lightcurve flux values.
Examples
--------
To select an aperture mask for V827 Tau::
>>> import lightkurve as lk
>>> tpf = lk.search_targetpixelfile("V827 Tau", mission="K2").download() # doctest: +SKIP
>>> tpf.interact() # doctest: +SKIP
To see the full y-axis dynamic range of your lightcurve and normalize
the lightcurve after each pixel selection::
>>> ylim_func = lambda lc: (0.0, lc.flux.max()) # doctest: +SKIP
>>> transform_func = lambda lc: lc.normalize() # doctest: +SKIP
>>> tpf.interact(ylim_func=ylim_func, transform_func=transform_func) # doctest: +SKIP
"""
from .interact import show_interact_widget
return show_interact_widget(
self,
notebook_url=notebook_url,
max_cadences=max_cadences,
aperture_mask=aperture_mask,
exported_filename=exported_filename,
transform_func=transform_func,
ylim_func=ylim_func,
**kwargs,
)
def interact_sky(self, notebook_url="localhost:8888", aperture_mask="empty", magnitude_limit=18):
"""Display a Jupyter Notebook widget showing Gaia DR2 positions on top of the pixels.
Parameters
----------
notebook_url : str
Location of the Jupyter notebook page (default: "localhost:8888")
When showing Bokeh applications, the Bokeh server must be
explicitly configured to allow connections originating from
different URLs. This parameter defaults to the standard notebook
host and port. If you are running on a different location, you
will need to supply this value for the application to display
properly. If no protocol is supplied in the URL, e.g. if it is
of the form "localhost:8888", then "http" will be used.
aperture_mask : array-like, 'pipeline', 'threshold', 'default', 'background', or 'empty'
Highlight pixels selected by aperture_mask.
Default is 'empty': no pixel is highlighted.
magnitude_limit : float
A value to limit the results in based on Gaia Gmag. Default, 18.
"""
from .interact import show_skyview_widget
return show_skyview_widget(
self, notebook_url=notebook_url, aperture_mask=aperture_mask, magnitude_limit=magnitude_limit
)
def to_corrector(self, method="pld", **kwargs):
"""Returns a `~correctors.corrector.Corrector` instance to remove systematics.
Parameters
----------
methods : string
Currently, only "pld" is supported. This will return a
`~correctors.PLDCorrector` class instance.
**kwargs : dict
Extra keyword arguments to be passed on to the corrector class.
Returns
-------
correcter : `~correctors.corrector.Corrector`
Instance of a Corrector class, which typically provides `~correctors.PLDCorrector.correct()`
and `~correctors.PLDCorrector.diagnose()` methods.
"""
allowed_methods = ["pld"]
if method == "sff":
raise ValueError(
"The 'sff' method requires a `LightCurve` instead "
"of a `TargetPixelFile` object. Use `to_lightcurve()` "
"to obtain a `LightCurve` first."
)
if method not in allowed_methods:
raise ValueError(
("Unrecognized method '{0}'\n" "allowed methods are: {1}").format(
method, allowed_methods
)
)
if method == "pld":
from .correctors import PLDCorrector
return PLDCorrector(self, **kwargs)
def cutout(self, center=None, size=5):
"""Cut a rectangle out of the Target Pixel File.
This methods returns a new `TargetPixelFile` object containing a
rectangle of a given ``size`` cut out around a given ``center``.
Parameters
----------
center : (int, int) tuple or `astropy.SkyCoord`
Center of the cutout. If an (int, int) tuple is passed, it will be
interpreted as the (column, row) coordinates relative to
the bottom-left corner of the TPF. If an `astropy.SkyCoord` is
passed then the sky coordinate will be used instead.
If `None` (default) then the center of the TPF will be used.
size : int or (int, int) tuple
Number of pixels to cut out. If a single integer is passed then
a square of that size will be cut. If a tuple is passed then a
rectangle with dimensions (column_size, row_size) will be cut.
Returns
-------
tpf : `lightkurve.TargetPixelFile` object
New and smaller Target Pixel File object containing only the data
cut out.
"""
imshape = self.flux.shape[1:]
# Parse the user input (``center``) into an (x, y) coordinate
if center is None:
x, y = imshape[0] // 2, imshape[1] // 2
elif isinstance(center, SkyCoord):
x, y = self.wcs.world_to_pixel(center)
elif isinstance(center, (tuple, list, np.ndarray)):
x, y = center
col = int(x)
row = int(y)
# Parse the user input (``size``)
if isinstance(size, int):
s = (size / 2, size / 2)
elif isinstance(size, (tuple, list, np.ndarray)):
s = (size[0] / 2, size[1] / 2)
# Find the TPF edges
col_edges = np.asarray(
[np.max([0, col - s[0]]), np.min([col + s[0], imshape[1]])], dtype=int
)
row_edges = np.asarray(
[np.max([0, row - s[1]]), np.min([row + s[1], imshape[0]])], dtype=int
)
# Make a copy of the data extension
hdu = self.hdu[0].copy()
# Find the new object coordinates
r, d = self.get_coordinates(cadence=len(self.flux) // 2)
hdu.header["RA_OBJ"] = np.nanmean(
r[row_edges[0] : row_edges[1], col_edges[0] : col_edges[1]]
)
hdu.header["DEC_OBJ"] = np.nanmean(
d[row_edges[0] : row_edges[1], col_edges[0] : col_edges[1]]
)
# Remove any KIC labels
labels = [
"*MAG",
"PM*",
"GL*",
"PARALLAX",
"*COLOR",
"TEFF",
"LOGG",
"FEH",
"EBMINUSV",
"AV",
"RADIUS",
"TMINDEX",
]
for label in labels:
if label in hdu.header:
hdu.header[label] = fits.card.Undefined()
# HDUList
hdus = [hdu]
# Copy the header
hdr = deepcopy(self.hdu[1].header)
# Trim any columns that have the shape of the image, to be the new shape
data_columns = []
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for idx, datacol in enumerate(self.hdu[1].columns):
# We exclude Kepler's obscure "RB_LEVEL" column from cutouts
# for now because it has an awkward shape
if datacol.name == "RB_LEVEL":
continue
# If the column is 3D
if len(self.hdu[1].data[datacol.name].shape) == 3:
# Make a copy, trim it and change the format
datacol = deepcopy(datacol)
datacol.array = datacol.array[
:, row_edges[0] : row_edges[1], col_edges[0] : col_edges[1]
]
datacol._dim = "{}".format(datacol.array.shape[1:]).replace(" ", "")
datacol._dims = datacol.array.shape[1:]
datacol._format = fits.column._ColumnFormat(
"{}{}".format(
np.product(datacol.array.shape[1:]), datacol._format[-1]
)
)
data_columns.append(datacol)
hdr["TDIM{}".format(idx)] = "{}".format(
datacol.array.shape[1:]
).replace(" ", "")
hdr["TDIM9"] = "{}".format(datacol.array.shape[1:]).replace(" ", "")
else:
data_columns.append(datacol)
# Get those coordinates sorted for the corner of the TPF and the WCS
hdr["1CRV*P"] = hdr["1CRV4P"] + col_edges[0]
hdr["2CRV*P"] = hdr["2CRV4P"] + row_edges[0]
hdr["1CRPX*"] = hdr["1CRPX4"] - col_edges[0]
hdr["2CRPX*"] = hdr["2CRPX4"] - row_edges[0]
# Make a table for the data
data_columns[-1]._dim = "{}".format(
(0, int(data_columns[5]._dim.split(",")[1][:-1]))
).replace(" ", "")
with warnings.catch_warnings():
warnings.simplefilter("ignore")
btbl = fits.BinTableHDU.from_columns(data_columns, header=hdr)
# Append it to the hdulist
hdus.append(btbl)
# Correct the aperture mask
hdu = self.hdu[2].copy()
ar = hdu.data
ar = ar[row_edges[0] : row_edges[1], col_edges[0] : col_edges[1]]
hdu.header["NAXIS1"] = ar.shape[0]
hdu.header["NAXIS2"] = ar.shape[1]
hdu.data = ar
hdus.append(hdu)
# Make a new tpf
with warnings.catch_warnings():
warnings.simplefilter("ignore")
newfits = fits.HDUList(hdus)
return self.__class__(newfits, quality_bitmask=self.quality_bitmask)
@staticmethod
def from_fits_images(
images_flux,
position,
images_raw_cnts=None,
images_flux_err=None,
images_flux_bkg=None,
images_flux_bkg_err=None,
images_cosmic_rays=None,
size=(11, 11),
extension=1,
target_id="unnamed-target",
hdu0_keywords=None,
**kwargs,
):
"""Creates a new Target Pixel File from a set of images.
This method is intended to make it easy to cut out targets from
Kepler/K2 "superstamp" regions or TESS FFI images.
Parameters
----------
images_flux : list of str, or list of fits.ImageHDU objects
Sorted list of FITS filename paths or ImageHDU objects to get
the flux data from.
position : astropy.SkyCoord
Position around which to cut out pixels.
images_raw_cnts : list of str, or list of fits.ImageHDU objects
Sorted list of FITS filename paths or ImageHDU objects to get
the raw counts data from.
images_flux_err : list of str, or list of fits.ImageHDU objects
Sorted list of FITS filename paths or ImageHDU objects to get
the flux error data from.
images_flux_bkg : list of str, or list of fits.ImageHDU objects
Sorted list of FITS filename paths or ImageHDU objects to get
the background data from.
images_flux_bkg_err : list of str, or list of fits.ImageHDU objects
Sorted list of FITS filename paths or ImageHDU objects to get
the background error data from.
images_cosmic_rays : list of str, or list of fits.ImageHDU objects
Sorted list of FITS filename paths or ImageHDU objects to get
the cosmic rays data from.
size : (int, int)
Dimensions (cols, rows) to cut out around `position`.
extension : int or str
If `images` is a list of filenames, provide the extension number
or name to use. This should be the same for all flux inputs
provided. Default: 1.
target_id : int or str
Unique identifier of the target to be recorded in the TPF.
hdu0_keywords : dict
Additional keywords to add to the first header file.
**kwargs : dict
Extra arguments to be passed to the `TargetPixelFile` constructor.
Returns
-------
tpf : TargetPixelFile
A new Target Pixel File assembled from the images.
"""
len_images = len(images_flux)
if len_images == 0:
raise ValueError("One or more images must be passed.")
if not isinstance(position, SkyCoord):
raise ValueError("Position must be an astropy.coordinates.SkyCoord.")
if hdu0_keywords is None:
hdu0_keywords = {}
basic_keywords = [
"MISSION",
"TELESCOP",
"INSTRUME",
"QUARTER",
"CAMPAIGN",
"CHANNEL",
"MODULE",
"OUTPUT",
"CAMERA",
"CCD",
"SECTOR",
]
carry_keywords = {}
# Define a helper function to accept images in a flexible way
def _open_image(img, extension):
if isinstance(img, fits.ImageHDU):
hdu = img
elif isinstance(img, fits.HDUList):
hdu = img[extension]
else:
with fits.open(img) as hdulist:
hdu = hdulist[extension].copy()
return hdu
# Define a helper function to cutout images if not None
def _cutout_image(hdu, position, wcs_ref, size):
if hdu is None:
cutout_data = None
cutout_wcs = None
elif position is None:
cutout_data = hdu.data
cutout_wcs = hdu.wcs
else:
cutout = Cutout2D(
hdu.data, position, wcs=wcs_ref, size=size, mode="partial"
)
cutout_data = cutout.data
cutout_wcs = cutout.wcs
return cutout_data, cutout_wcs
# Set the default extension if unspecified
if extension is None:
extension = 0
if isinstance(images_flux[0], str) and images_flux[0].endswith("ffic.fits"):
extension = 1 # TESS FFIs have the image data in extension #1
# If no position is given, ensure the cut-out size matches the image size
if position is None:
size = _open_image(images_flux[0], extension).data.shape
# Find middle image to use as a WCS reference
try:
mid_hdu = _open_image(images_flux[int(len_images / 2) - 1], extension)
wcs_ref = WCS(mid_hdu)
column, row = wcs_ref.all_world2pix(
np.asarray([[position.ra.deg], [position.dec.deg]]).T, 0
)[0]
except Exception as e:
raise e
# Create a factory and set default keyword values based on the middle image
factory = TargetPixelFileFactory(
n_cadences=len_images, n_rows=size[0], n_cols=size[1], target_id=target_id
)
# Get some basic keywords
for kw in basic_keywords:
if kw in mid_hdu.header:
if not isinstance(mid_hdu.header[kw], Undefined):
carry_keywords[kw] = mid_hdu.header[kw]
if ("MISSION" not in carry_keywords) and ("TELESCOP" in carry_keywords):
carry_keywords["MISSION"] = carry_keywords["TELESCOP"]
allkeys = hdu0_keywords.copy()
allkeys.update(carry_keywords)
img_list = [
images_raw_cnts,
images_flux,
images_flux_err,
images_flux_bkg,
images_flux_bkg_err,
images_cosmic_rays,
]
for idx, img in tqdm(enumerate(images_flux), total=len_images):
# Open images if provided and get HDUs
hdu_list = [
_open_image(i[idx], extension) if i is not None else None
for i in img_list
]
# Use the header in the flux image for each frame
hdu_idx = hdu_list[1].header
if idx == 0: # Get default keyword values from the first flux image
factory.keywords = hdu_idx
# Get positional shift of the image compared to the reference WCS
wcs_current = WCS(hdu_idx)
column_current, row_current = wcs_current.all_world2pix(
np.asarray([[position.ra.deg], [position.dec.deg]]).T, 0
)[0]
column_ref, row_ref = wcs_ref.all_world2pix(
np.asarray([[position.ra.deg], [position.dec.deg]]).T, 0
)[0]
with warnings.catch_warnings():
# Using `POS_CORR1` as a header keyword violates the FITS
# standard for being too long, but we use it for consistency
# with the TPF column name. Hence we ignore the warning.
warnings.simplefilter("ignore", AstropyWarning)
hdu_idx["POS_CORR1"] = column_current - column_ref
hdu_idx["POS_CORR2"] = row_current - row_ref
# Cutout (if neccessary) and get data
cutout_list = [
_cutout_image(hdu, position, wcs_ref, size) for hdu in hdu_list
]
# Flatten output list
cutout_list = [item for sublist in cutout_list for item in sublist]
(
raw_cnts,
_,
flux,
wcs,
flux_err,
_,
flux_bkg,
_,
flux_bkg_err,
_,
cosmic_rays,
_,
) = cutout_list
factory.add_cadence(
frameno=idx,
raw_cnts=raw_cnts,
flux=flux,
flux_err=flux_err,
flux_bkg=flux_bkg,
flux_bkg_err=flux_bkg_err,
cosmic_rays=cosmic_rays,
header=hdu_idx,
)
ext_info = {}
ext_info["TFORM4"] = "{}J".format(size[0] * size[1])
ext_info["TDIM4"] = "({},{})".format(size[0], size[1])
ext_info.update(wcs.to_header(relax=True))
# TPF contains multiple data columns that require WCS
for m in [4, 5, 6, 7, 8, 9]:
if m > 4:
ext_info["TFORM{}".format(m)] = "{}E".format(size[0] * size[1])
ext_info["TDIM{}".format(m)] = "({},{})".format(size[0], size[1])
# Compute the distance from the star to the TPF lower left corner
# That is approximately half the TPF size, with an adjustment factor if the star's pixel
# position gets rounded up or not.
# The first int is there so that even sizes always round to one less than half of their value
half_tpfsize_col = int((size[0] - 1) / 2.0) + (
int(round(column)) - int(column)
) * ((size[0] + 1) % 2)
half_tpfsize_row = int((size[1] - 1) / 2.0) + (
int(round(row)) - int(row)
) * ((size[1] + 1) % 2)
ext_info["1CRV{}P".format(m)] = (
int(round(column)) - half_tpfsize_col + factory.keywords["CRVAL1P"] - 1
)
ext_info["2CRV{}P".format(m)] = (
int(round(row)) - half_tpfsize_row + factory.keywords["CRVAL2P"] - 1
)
return factory.get_tpf(hdu0_keywords=allkeys, ext_info=ext_info, **kwargs)
def plot_pixels(
self,
ax=None,
periodogram=False,
aperture_mask=None,
show_flux=False,
corrector_func=None,
style="lightkurve",
title=None,
markersize=0.5,
**kwargs,
):
"""Show the light curve of each pixel in a single plot.
Note that all values are autoscaled and axis labels are not provided.
This utility is designed for by-eye inspection of signal morphology.
Parameters
----------
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be generated.
periodogram : bool
Default: False; if True, periodograms will be plotted, using normalized light curves.
Note that this keyword overrides normalized.
aperture_mask : ndarray or str
Highlight pixels selected by aperture_mask.
Only `pipeline`, `threshold`, or custom masks will be plotted.
`all` and None masks will be ignored.
show_flux : bool
Default: False; if True, shade pixels with frame 0 flux colour
Inspired by https://github.com/noraeisner/LATTE
corrector_func : function
Function that accepts and returns a `~lightkurve.lightcurve.LightCurve`.
This function is applied to each light curve in the collection
prior to stitching. The default is to normalize each light curve.
style : str
Path or URL to a matplotlib style file, or name of one of
matplotlib's built-in stylesheets (e.g. 'ggplot').
Lightkurve's custom stylesheet is used by default.
markersize : float
Size of the markers in the lightcurve plot. For periodogram plot, it is used as the line width.
Default: 0.5
kwargs : dict
e.g. extra parameters to be passed to `lc.to_periodogram`.
Examples
--------
Inspect the lightcurve around a possible transit at per-pixel level::
>>> import lightkurve as lk
>>> # A possible transit around time BTJD 2277.0. Inspect the lightcurve around that time
>>> tpf = tpf[(tpf.time.value >= 2276.5) & (tpf.time.value <= 2277.5)] # doctest: +SKIP
>>> tpf.plot_pixels(aperture_mask='pipeline') # doctest: +SKIP
>>>
>>> # Variation: shade the pixel based on the flux at frame 0
>>> # increase markersize so that it is more legible for pixels with yellow background (the brightest pixels)
>>> tpf.plot_pixels(aperture_mask='pipeline', show_flux=True, markersize=1.5) # doctest: +SKIP
>>>
>>> # Variation: Customize the plot's size so that each pixel is about 1 inch by 1 inch
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(tpf.flux[0].shape[0] * 1.0, tpf.flux[0].shape[1] * 1.0)) # doctest: +SKIP
>>> tpf.plot_pixels(ax=fig.gca(), aperture_mask='pipeline') # doctest: +SKIP
"""
if style == "lightkurve" or style is None:
style = MPLSTYLE
if title is None:
title = "Target ID: {0}, {1:.2f} - {2:.2f} {3}".format(
self.targetid,
self.time[0].value,
self.time[-1].value,
_time_label_brief(self.time),
)
if corrector_func is None:
corrector_func = lambda x: x.remove_outliers()
if show_flux:
cmap = plt.get_cmap()
norm = plt.Normalize(
vmin=np.nanmin(self.flux[0].value), vmax=np.nanmax(self.flux[0].value)
)
mask = self._parse_aperture_mask(aperture_mask)
with warnings.catch_warnings():
warnings.simplefilter(
"ignore", category=(RuntimeWarning, LightkurveWarning)
)
# get an aperture mask for each pixel
masks = np.zeros(
(self.shape[1] * self.shape[2], self.shape[1], self.shape[2]),
dtype="bool",
)
for i in range(self.shape[1] * self.shape[2]):
masks[i][np.unravel_index(i, (self.shape[1], self.shape[2]))] = True
pixel_list = []
for j in range(self.shape[1] * self.shape[2]):
lc = self.to_lightcurve(aperture_mask=masks[j])
lc = corrector_func(lc)
if periodogram:
try:
pixel_list.append(lc.to_periodogram(**kwargs))
except IndexError:
pixel_list.append(None)
else:
if len(lc.remove_nans().flux) == 0:
pixel_list.append(None)
else:
pixel_list.append(lc)
with plt.style.context(style):
if ax is None:
fig = plt.figure()
ax = plt.gca()
set_size = True
else:
fig = ax.get_figure()
set_size = False
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
if periodogram:
ax.set(
title=title,
xlabel="Frequency / Column (pixel)",
ylabel="Power / Row (pixel)",
)
else:
ax.set(
title=title,
xlabel="Time / Column (pixel)",
ylabel="Flux / Row (pixel)",
)
gs = gridspec.GridSpec(
self.shape[1], self.shape[2], wspace=0.01, hspace=0.01
)
for k in range(self.shape[1] * self.shape[2]):
if pixel_list[k]:
x, y = np.unravel_index(k, (self.shape[1], self.shape[2]))
# Highlight aperture mask in red
if aperture_mask is not None and mask[x, y]:
rc = {"axes.linewidth": 2, "axes.edgecolor": "red"}
else:
rc = {"axes.linewidth": 1}
with plt.rc_context(rc=rc):
gax = fig.add_subplot(gs[self.shape[1] - x - 1, y])
# Determine background and foreground color
if show_flux:
gax.set_facecolor(cmap(norm(self.flux.value[0, x, y])))
markercolor = "white"
else:
markercolor = "black"
# Plot flux or periodogram
if periodogram:
gax.plot(
pixel_list[k].frequency.value,
pixel_list[k].power.value,
marker="None",
color=markercolor,
lw=markersize,
)
else:
gax.plot(
pixel_list[k].time.value,
pixel_list[k].flux.value,
marker=".",
color=markercolor,
ms=markersize,
lw=0,
)
gax.margins(y=0.1, x=0)
gax.set_xticklabels("")
gax.set_yticklabels("")
gax.set_xticks([])
gax.set_yticks([])
# add row/column numbers to start / end
if x == 0 and y == 0:
gax.set_xlabel(f"{self.column}")
gax.set_ylabel(f"{self.row}")
if x == 0 and y == self.shape[2] - 1: # lower right
gax.set_xlabel(f"{self.column + self.shape[2] - 1}")
if x == self.shape[1] - 1 and y == 0: # upper left
gax.set_ylabel(f"{self.row + self.shape[1] - 1}")
if set_size: # use default size when caller does not supply ax
fig.set_size_inches((y * 1.5, x * 1.5))
return ax
class KeplerTargetPixelFile(TargetPixelFile):
"""Class to read and interact with the pixel data products
("Target Pixel Files") created by NASA's Kepler pipeline.
This class offers a user-friendly way to open a Kepler Target Pixel File
(TPF), access its meta data, visualize its contents, extract light curves
with custom aperture masks, estimate centroid positions, and more.
Please consult the `TargetPixelFile tutorial
<https://docs.lightkurve.org/tutorials/01-target-pixel-files.html>`_
in the online documentation for examples on using this class.
Parameters
----------
path : str or `~astropy.io.fits.HDUList`
Path to a Kepler Target Pixel file. Alternatively, you can pass a
`.HDUList` object, which is the AstroPy object returned by
the `astropy.io.fits.open` function.
quality_bitmask : "none", "default", "hard", "hardest", or int
Bitmask that should be used to ignore bad-quality cadences.
If a string is passed, it has the following meaning:
* "none": no cadences will be ignored (equivalent to
``quality_bitmask=0``).
* "default": cadences with severe quality issues will be ignored
(equivalent to ``quality_bitmask=1130799``).
* "hard": more conservative choice of flags to ignore
(equivalent to ``quality_bitmask=1664431``).
This is known to remove good data.
* "hardest": remove all cadences that have one or more flags raised
(equivalent to ``quality_bitmask=2096639``). This mask is not
recommended because some quality flags can safely be ignored.
If an integer is passed, it will be used as a bitmask, i.e. it will
have the effect of removing cadences where
``(tpf.hdu[1].data['QUALITY'] & quality_bitmask) > 0``.
See the :class:`KeplerQualityFlags` class for details on the bitmasks.
**kwargs : dict
Optional keyword arguments passed on to `astropy.io.fits.open`.
References
----------
.. [1] Kepler: A Search for Terrestrial Planets. Kepler Archive Manual.
http://archive.stsci.edu/kepler/manuals/archive_manual.pdf
"""
def __init__(self, path, quality_bitmask="default", **kwargs):
super(KeplerTargetPixelFile, self).__init__(
path, quality_bitmask=quality_bitmask, **kwargs
)
self.quality_mask = KeplerQualityFlags.create_quality_mask(
quality_array=self.hdu[1].data["QUALITY"], bitmask=quality_bitmask
)
# check to make sure the correct filetype has been provided
filetype = detect_filetype(self.hdu)
if filetype == "TessTargetPixelFile":
warnings.warn(
"A TESS data product is being opened using the "
"`KeplerTargetPixelFile` class. "
"Please use `TessTargetPixelFile` instead.",
LightkurveWarning,
)
elif filetype is None:
warnings.warn(
"File header not recognized as Kepler or TESS " "observation.",
LightkurveWarning,
)
# Use the KEPLERID keyword as the default targetid
if self.targetid is None:
self.targetid = self.get_header().get("KEPLERID")
def __repr__(self):
return "KeplerTargetPixelFile Object (ID: {})".format(self.targetid)
def get_prf_model(self):
"""Returns an object of KeplerPRF initialized using the
necessary metadata in the tpf object.
Returns
-------
prf : instance of SimpleKeplerPRF
"""
return KeplerPRF(
channel=self.channel, shape=self.shape[1:], column=self.column, row=self.row
)
@property
def obsmode(self):
"""'short cadence' or 'long cadence'. ('OBSMODE' header keyword)"""
return self.get_keyword("OBSMODE")
@property
def module(self):
"""Kepler CCD module number. ('MODULE' header keyword)"""
return self.get_keyword("MODULE")
@property
def output(self):
"""Kepler CCD module output number. ('OUTPUT' header keyword)"""
return self.get_keyword("OUTPUT")
@property
def channel(self):
"""Kepler CCD channel number. ('CHANNEL' header keyword)"""
return self.get_keyword("CHANNEL")
@property
def quarter(self):
"""Kepler quarter number. ('QUARTER' header keyword)"""
return self.get_keyword("QUARTER")
@property
def campaign(self):
"""K2 Campaign number. ('CAMPAIGN' header keyword)"""
return self.get_keyword("CAMPAIGN")
@property
def mission(self):
"""'Kepler' or 'K2'. ('MISSION' header keyword)"""
return self.get_keyword("MISSION")
def extract_aperture_photometry(
self, aperture_mask="default", flux_method="sum", centroid_method="moments"
):
"""Returns a LightCurve obtained using aperture photometry.
Parameters
----------
aperture_mask : array-like, 'pipeline', 'threshold', 'default', or 'all'
A boolean array describing the aperture such that `True` means
that the pixel will be used.
If None or 'all' are passed, all pixels will be used.
If 'pipeline' is passed, the mask suggested by the official pipeline
will be returned.
If 'threshold' is passed, all pixels brighter than 3-sigma above
the median flux will be used.
If 'default' is passed, 'pipeline' mask will be used when available,
with 'threshold' as the fallback.
flux_method: 'sum', 'median', or 'mean'
Determines how the pixel values within the aperture mask are combined
at each cadence. Defaults to 'sum'.
centroid_method : str, 'moments' or 'quadratic'
For the details on this arguments, please refer to the documentation
for `estimate_centroids()`.
Returns
-------
lc : KeplerLightCurve object
Array containing the summed flux within the aperture for each
cadence.
"""
# explicitly resolve default, so that the aperture_mask set in meta
# later will be the resolved one
aperture_mask = self._resolve_default_aperture_mask(aperture_mask)
flux, flux_err, centroid_col, centroid_row = self._aperture_photometry(
aperture_mask=aperture_mask,
flux_method=flux_method,
centroid_method=centroid_method,
)
keys = {
"centroid_col": centroid_col,
"centroid_row": centroid_row,
"quality": self.quality,
"channel": self.channel,
"campaign": self.campaign,
"quarter": self.quarter,
"mission": self.mission,
"cadenceno": self.cadenceno,
"ra": self.ra,
"dec": self.dec,
"label": self.get_keyword("OBJECT", default=self.targetid),
"targetid": self.targetid,
}
meta = {"APERTURE_MASK": aperture_mask}
return KeplerLightCurve(
time=self.time, flux=flux, flux_err=flux_err, **keys, meta=meta
)
def get_bkg_lightcurve(self, aperture_mask=None):
aperture_mask = self._parse_aperture_mask(aperture_mask)
# Ignore warnings related to zero or negative errors
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
flux_bkg_err = (
np.nansum(self.flux_bkg_err[:, aperture_mask] ** 2, axis=1) ** 0.5
)
keys = {
"quality": self.quality,
"channel": self.channel,
"campaign": self.campaign,
"quarter": self.quarter,
"mission": self.mission,
"cadenceno": self.cadenceno,
"ra": self.ra,
"dec": self.dec,
"label": self.get_keyword("OBJECT", default=self.targetid),
"targetid": self.targetid,
}
return KeplerLightCurve(
time=self.time,
flux=np.nansum(self.flux_bkg[:, aperture_mask], axis=1),
flux_err=flux_bkg_err,
**keys,
)
def get_model(self, star_priors=None, **kwargs):
"""Returns a default `TPFModel` object for PRF fitting.
The default model only includes one star and only allows its flux
and position to change. A different set of stars can be added using
the `star_priors` parameter.
Parameters
----------
**kwargs : dict
Arguments to be passed to the `TPFModel` constructor, e.g.
`star_priors`.
Returns
-------
model : TPFModel object
Model with appropriate defaults for this Target Pixel File.
"""
from .prf import TPFModel, StarPrior, BackgroundPrior
from .prf import UniformPrior, GaussianPrior
# Set up the model
if "star_priors" not in kwargs:
centr_col, centr_row = self.estimate_centroids()
star_priors = [
StarPrior(
col=GaussianPrior(
mean=np.nanmedian(centr_col.value),
var=np.nanstd(centr_col.value) ** 2,
),
row=GaussianPrior(
mean=np.nanmedian(centr_row.value),
var=np.nanstd(centr_row.value) ** 2,
),
flux=UniformPrior(
lb=0.5 * np.nanmax(self.flux[0].value),
ub=2 * np.nansum(self.flux[0].value) + 1e-10,
),
targetid=self.targetid,
)
]
kwargs["star_priors"] = star_priors
if "prfmodel" not in kwargs:
kwargs["prfmodel"] = self.get_prf_model()
if "background_prior" not in kwargs:
if np.all(
np.isnan(self.flux_bkg)
): # If TargetPixelFile has no background flux data
# Use the median of the lower half of flux as an estimate for flux_bkg
clipped_flux = np.ma.masked_where(
self.flux.value > np.percentile(self.flux.value, 50),
self.flux.value,
)
flux_prior = GaussianPrior(
mean=np.ma.median(clipped_flux), var=np.ma.std(clipped_flux) ** 2
)
else:
flux_prior = GaussianPrior(
mean=np.nanmedian(self.flux_bkg.value),
var=np.nanstd(self.flux_bkg.value) ** 2,
)
kwargs["background_prior"] = BackgroundPrior(flux=flux_prior)
return TPFModel(**kwargs)
def extract_prf_photometry(self, cadences=None, parallel=True, **kwargs):
"""Returns the results of PRF photometry applied to the pixel file.
Parameters
----------
cadences : list of int
Cadences to fit. If `None` (default) then all cadences will be fit.
parallel : bool
If `True`, fitting cadences will be distributed across multiple
cores using Python's `multiprocessing` module.
**kwargs : dict
Keywords to be passed to `get_model()` to create the
`~prf.TPFModel` object that will be fit.
Returns
-------
results : PRFPhotometry object
Object that provides access to PRF-fitting photometry results and
various diagnostics.
"""
from .prf import PRFPhotometry
log.warning(
"Warning: PRF-fitting photometry is experimental "
"in this version of lightkurve."
)
prfphot = PRFPhotometry(model=self.get_model(**kwargs))
prfphot.run(
self.flux + self.flux_bkg,
cadences=cadences,
parallel=parallel,
pos_corr1=self.pos_corr1,
pos_corr2=self.pos_corr2,
)
return prfphot
def prf_lightcurve(self, **kwargs):
lc = self.extract_prf_photometry(**kwargs).lightcurves[0]
keys = {
"quality": self.quality,
"channel": self.channel,
"campaign": self.campaign,
"quarter": self.quarter,
"mission": self.mission,
"cadenceno": self.cadenceno,
"ra": self.ra,
"dec": self.dec,
"targetid": self.targetid,
}
return KeplerLightCurve(time=self.time, flux=lc.flux, **keys)
class FactoryError(Exception):
"""Raised if there is a problem creating a TPF."""
pass
class TargetPixelFileFactory(object):
"""Class to create a TargetPixelFile."""
def __init__(
self, n_cadences, n_rows, n_cols, target_id="unnamed-target", keywords=None
):
self.n_cadences = n_cadences
self.n_rows = n_rows
self.n_cols = n_cols
self.target_id = target_id
if keywords is None:
self.keywords = {}
else:
self.keywords = keywords
# Initialize the 3D data structures
self.raw_cnts = np.empty((n_cadences, n_rows, n_cols), dtype="int")
self.flux = np.empty((n_cadences, n_rows, n_cols), dtype="float32")
self.flux_err = np.empty((n_cadences, n_rows, n_cols), dtype="float32")
self.flux_bkg = np.empty((n_cadences, n_rows, n_cols), dtype="float32")
self.flux_bkg_err = np.empty((n_cadences, n_rows, n_cols), dtype="float32")
self.cosmic_rays = np.empty((n_cadences, n_rows, n_cols), dtype="float32")
# Set 3D data defaults
self.raw_cnts[:, :, :] = -1
self.flux[:, :, :] = np.nan
self.flux_err[:, :, :] = np.nan
self.flux_bkg[:, :, :] = np.nan
self.flux_bkg_err[:, :, :] = np.nan
self.cosmic_rays[:, :, :] = np.nan
# Initialize the 1D data structures
self.mjd = np.zeros(n_cadences, dtype="float64")
self.time = np.zeros(n_cadences, dtype="float64")
self.timecorr = np.zeros(n_cadences, dtype="float32")
self.cadenceno = np.zeros(n_cadences, dtype="int")
self.quality = np.zeros(n_cadences, dtype="int")
self.pos_corr1 = np.zeros(n_cadences, dtype="float32")
self.pos_corr2 = np.zeros(n_cadences, dtype="float32")
def add_cadence(
self,
frameno,
raw_cnts=None,
flux=None,
flux_err=None,
flux_bkg=None,
flux_bkg_err=None,
cosmic_rays=None,
header=None,
):
"""Populate the data for a single cadence."""
if frameno >= self.n_cadences:
raise FactoryError(
"Can not add cadence {}, n_cadences set to {}".format(
frameno, self.n_cadences
)
)
if header is None:
header = {}
# 2D-data
for col in [
"raw_cnts",
"flux",
"flux_err",
"flux_bkg",
"flux_bkg_err",
"cosmic_rays",
]:
if locals()[col] is not None:
if locals()[col].shape != (self.n_rows, self.n_cols):
raise FactoryError(
"Can not add cadence with a different shape ({} x {})".format(
self.n_rows, self.n_cols
)
)
vars(self)[col][frameno] = locals()[col]
# 1D-data
if "TSTART" in header and "TSTOP" in header:
self.time[frameno] = (header["TSTART"] + header["TSTOP"]) / 2.0
if "TIMECORR" in header:
self.timecorr[frameno] = header["TIMECORR"]
if "CADENCEN" in header:
self.cadenceno[frameno] = header["CADENCEN"]
if "QUALITY" in header:
self.quality[frameno] = header["QUALITY"]
if "POS_CORR1" in header:
self.pos_corr1[frameno] = header["POS_CORR1"]
if "POS_CORR2" in header:
self.pos_corr2[frameno] = header["POS_CORR2"]
def _check_data(self):
"""Check the data before writing to a TPF for any obvious errors."""
if len(self.time) != len(np.unique(self.time)):
warnings.warn(
"The factory-created TPF contains cadences with "
"identical TIME values.",
LightkurveWarning,
)
if ~np.all(self.time == np.sort(self.time)):
warnings.warn(
"Cadences in the factory-created TPF do not appear "
"to be sorted in chronological order.",
LightkurveWarning,
)
if np.nansum(self.flux) == 0:
warnings.warn(
"The factory-created TPF does not appear to contain "
"non-zero flux values.",
LightkurveWarning,
)
def get_tpf(self, hdu0_keywords=None, ext_info=None, **kwargs):
"""Returns a TargetPixelFile object."""
if hdu0_keywords is None:
hdu0_keywords = {}
if ext_info is None:
ext_info = {}
self._check_data()
# Detect filetype
hdulist = self._hdulist(hdu0_keywords=hdu0_keywords, ext_info=ext_info)
filetype = detect_filetype(hdulist)
if filetype == "TessTargetPixelFile":
tpf = TessTargetPixelFile(hdulist, **kwargs)
elif filetype == "KeplerTargetPixelFile":
tpf = KeplerTargetPixelFile(hdulist, **kwargs)
else:
warnings.warn(
"Could not detect filetype as TESSTargetPixelFile or KeplerTargetPixelFile, "
"returning generic TargetPixelFile instead.",
LightkurveWarning,
)
tpf = TargetPixelFile(hdulist, **kwargs)
return tpf
def _hdulist(self, hdu0_keywords, ext_info):
"""Returns an astropy.io.fits.HDUList object."""
return fits.HDUList(
[
self._make_primary_hdu(hdu0_keywords=hdu0_keywords),
self._make_target_extension(ext_info=ext_info),
self._make_aperture_extension(),
]
)
def _header_template(self, extension):
"""Returns a template `fits.Header` object for a given extension."""
template_fn = os.path.join(
PACKAGEDIR, "data", "tpf-ext{}-header.txt".format(extension)
)
return fits.Header.fromtextfile(template_fn)
def _make_primary_hdu(self, hdu0_keywords):
"""Returns the primary extension (#0)."""
hdu = fits.PrimaryHDU()
# Copy the default keywords from a template file from the MAST archive
tmpl = self._header_template(0)
for kw in tmpl:
hdu.header[kw] = (tmpl[kw], tmpl.comments[kw])
# Override the defaults where necessary
hdu.header["ORIGIN"] = "Unofficial data product"
hdu.header["DATE"] = datetime.datetime.now().strftime("%Y-%m-%d")
hdu.header["CREATOR"] = "lightkurve.TargetPixelFileFactory"
hdu.header["OBJECT"] = self.target_id
if hdu.header["TELESCOP"] is not None and hdu.header["TELESCOP"] == "Kepler":
hdu.header["KEPLERID"] = self.target_id
# Empty a bunch of keywords rather than having incorrect info
for kw in [
"PROCVER",
"FILEVER",
"CHANNEL",
"MODULE",
"OUTPUT",
"TIMVERSN",
"CAMPAIGN",
"DATA_REL",
"TTABLEID",
"RA_OBJ",
"DEC_OBJ",
]:
hdu.header[kw] = ""
# Some keywords just shouldn't be passed to the new header.
bad_keys = [
"ORIGIN",
"DATE",
"OBJECT",
"SIMPLE",
"BITPIX",
"NAXIS",
"EXTEND",
"NEXTEND",
"EXTNAME",
"NAXIS1",
"NAXIS2",
"QUALITY",
]
for kw, val in hdu0_keywords.items():
if kw in bad_keys:
continue
if kw in hdu.header:
hdu.header[kw] = val
else:
hdu.header.append((kw, val))
return hdu
def _make_target_extension(self, ext_info):
"""Create the 'TARGETTABLES' extension (i.e. extension #1)."""
# Turn the data arrays into fits columns and initialize the HDU
coldim = "({},{})".format(self.n_cols, self.n_rows)
eformat = "{}E".format(self.n_rows * self.n_cols)
jformat = "{}J".format(self.n_rows * self.n_cols)
cols = []
cols.append(
fits.Column(name="TIME", format="D", unit="BJD - 2454833", array=self.time)
)
cols.append(
fits.Column(name="TIMECORR", format="E", unit="D", array=self.timecorr)
)
cols.append(fits.Column(name="CADENCENO", format="J", array=self.cadenceno))
cols.append(
fits.Column(
name="RAW_CNTS",
format=jformat,
unit="count",
dim=coldim,
array=self.raw_cnts,
)
)
cols.append(
fits.Column(
name="FLUX", format=eformat, unit="e-/s", dim=coldim, array=self.flux
)
)
cols.append(
fits.Column(
name="FLUX_ERR",
format=eformat,
unit="e-/s",
dim=coldim,
array=self.flux_err,
)
)
cols.append(
fits.Column(
name="FLUX_BKG",
format=eformat,
unit="e-/s",
dim=coldim,
array=self.flux_bkg,
)
)
cols.append(
fits.Column(
name="FLUX_BKG_ERR",
format=eformat,
unit="e-/s",
dim=coldim,
array=self.flux_bkg_err,
)
)
cols.append(
fits.Column(
name="COSMIC_RAYS",
format=eformat,
unit="e-/s",
dim=coldim,
array=self.cosmic_rays,
)
)
cols.append(fits.Column(name="QUALITY", format="J", array=self.quality))
cols.append(
fits.Column(
name="POS_CORR1", format="E", unit="pixels", array=self.pos_corr1
)
)
cols.append(
fits.Column(
name="POS_CORR2", format="E", unit="pixels", array=self.pos_corr2
)
)
coldefs = fits.ColDefs(cols)
hdu = fits.BinTableHDU.from_columns(coldefs)
# Set the header with defaults
template = self._header_template(1)
for kw in template:
if kw not in ["XTENSION", "NAXIS1", "NAXIS2", "CHECKSUM", "BITPIX"]:
try:
hdu.header[kw] = (self.keywords[kw], self.keywords.comments[kw])
except KeyError:
hdu.header[kw] = (template[kw], template.comments[kw])
wcs_keywords = {
"CTYPE1": "1CTYP{}",
"CTYPE2": "2CTYP{}",
"CRPIX1": "1CRPX{}",
"CRPIX2": "2CRPX{}",
"CRVAL1": "1CRVL{}",
"CRVAL2": "2CRVL{}",
"CUNIT1": "1CUNI{}",
"CUNIT2": "2CUNI{}",
"CDELT1": "1CDLT{}",
"CDELT2": "2CDLT{}",
"PC1_1": "11PC{}",
"PC1_2": "12PC{}",
"PC2_1": "21PC{}",
"PC2_2": "22PC{}",
}
# Override defaults using data calculated in from_fits_images
for kw in ext_info.keys():
if kw in wcs_keywords.keys():
for x in [4, 5, 6, 7, 8, 9]:
hdu.header[wcs_keywords[kw].format(x)] = ext_info[kw]
else:
hdu.header[kw] = ext_info[kw]
return hdu
def _make_aperture_extension(self):
"""Create the aperture mask extension (i.e. extension #2)."""
mask = 3 * np.ones((self.n_rows, self.n_cols), dtype="int32")
hdu = fits.ImageHDU(mask)
# Set the header from the template TPF again
template = self._header_template(2)
for kw in template:
if kw not in ["XTENSION", "NAXIS1", "NAXIS2", "CHECKSUM", "BITPIX"]:
try:
hdu.header[kw] = (self.keywords[kw], self.keywords.comments[kw])
except KeyError:
hdu.header[kw] = (template[kw], template.comments[kw])
# Override the defaults where necessary
for keyword in [
"CTYPE1",
"CTYPE2",
"CRPIX1",
"CRPIX2",
"CRVAL1",
"CRVAL2",
"CUNIT1",
"CUNIT2",
"CDELT1",
"CDELT2",
"PC1_1",
"PC1_2",
"PC2_1",
"PC2_2",
]:
hdu.header[keyword] = "" # override wcs keywords
hdu.header["EXTNAME"] = "APERTURE"
return hdu
class TessTargetPixelFile(TargetPixelFile):
"""Represents pixel data products created by NASA's TESS pipeline.
This class enables extraction of custom light curves and centroid positions.
Parameters
----------
path : str
Path to a TESS Target Pixel (FITS) File.
quality_bitmask : "none", "default", "hard", "hardest", or int
Bitmask that should be used to ignore bad-quality cadences.
If a string is passed, it has the following meaning:
* "none": no cadences will be ignored (`quality_bitmask=0`).
* "default": cadences with severe quality issues will be ignored
(`quality_bitmask=175`).
* "hard": more conservative choice of flags to ignore
(`quality_bitmask=7407`). This is known to remove good data.
* "hardest": removes all data that has been flagged
(`quality_bitmask=8191`). This mask is not recommended.
If an integer is passed, it will be used as a bitmask, i.e. it will
have the effect of removing cadences where
``(tpf.hdu[1].data['QUALITY'] & quality_bitmask) > 0``.
See the :class:`KeplerQualityFlags` class for details on the bitmasks.
kwargs : dict
Keyword arguments passed to `astropy.io.fits.open()`.
"""
def __init__(self, path, quality_bitmask="default", **kwargs):
super(TessTargetPixelFile, self).__init__(
path, quality_bitmask=quality_bitmask, **kwargs
)
self.quality_mask = TessQualityFlags.create_quality_mask(
quality_array=self.hdu[1].data["QUALITY"], bitmask=quality_bitmask
)
# Early TESS releases had cadences with time=NaN (i.e. missing data)
# which were not flagged by a QUALITY flag yet; the line below prevents
# these cadences from being used. They would break most methods!
if (quality_bitmask != 0) and (quality_bitmask != "none"):
self.quality_mask &= np.isfinite(self.hdu[1].data["TIME"])
# check to make sure the correct filetype has been provided
filetype = detect_filetype(self.hdu)
if filetype == "KeplerTargetPixelFile":
warnings.warn(
"A Kepler data product is being opened using the "
"`TessTargetPixelFile` class. "
"Please use `KeplerTargetPixelFile` instead.",
LightkurveWarning,
)
elif filetype is None:
warnings.warn(
"File header not recognized as Kepler or TESS " "observation.",
LightkurveWarning,
)
# Use the TICID keyword as the default targetid
if self.targetid is None:
self.targetid = self.get_header().get("TICID")
def __repr__(self):
return "TessTargetPixelFile(TICID: {})".format(self.targetid)
@property
def background_mask(self):
"""Returns the background mask used by the TESS pipeline."""
# The TESS pipeline flags the pixels in the background aperture using
# bit number 4, cf. Section 6 of the TESS Data Products documentation
# (EXP-TESS-ARC-ICD-TM-0014.pdf).
try:
return self.hdu[2].data & 4 > 0
except (IndexError, TypeError):
# `IndexError` may be raised if the aperture extension (#2) is missing
# `TypeError` may be raised because early versions of TESScut returned floats in HDU 2
return np.zeros(self.hdu[1].data["FLUX"][0].shape, dtype=bool)
@property
def sector(self):
"""TESS Sector number ('SECTOR' header keyword)."""
return self.get_keyword("SECTOR")
@property
def camera(self):
"""TESS Camera number ('CAMERA' header keyword)."""
return self.get_keyword("CAMERA")
@property
def ccd(self):
"""TESS CCD number ('CCD' header keyword)."""
return self.get_keyword("CCD")
@property
def mission(self):
return "TESS"
def extract_aperture_photometry(
self, aperture_mask="default", flux_method="sum", centroid_method="moments"
):
"""Returns a LightCurve obtained using aperture photometry.
Parameters
----------
aperture_mask : array-like, 'pipeline', 'threshold', 'default', or 'all'
A boolean array describing the aperture such that `True` means
that the pixel will be used.
If None or 'all' are passed, all pixels will be used.
If 'pipeline' is passed, the mask suggested by the official pipeline
will be returned.
If 'threshold' is passed, all pixels brighter than 3-sigma above
the median flux will be used.
If 'default' is passed, 'pipeline' mask will be used when available,
with 'threshold' as the fallback.
flux_method: 'sum', 'median', or 'mean'
Determines how the pixel values within the aperture mask are combined
at each cadence. Defaults to 'sum'.
centroid_method : str, 'moments' or 'quadratic'
For the details on this arguments, please refer to the documentation
for `estimate_centroids()`.
Returns
-------
lc : TessLightCurve object
Contains the summed flux within the aperture for each cadence.
"""
# explicitly resolve default, so that the aperture_mask set in meta
# later will be the resolved one
aperture_mask = self._resolve_default_aperture_mask(aperture_mask)
flux, flux_err, centroid_col, centroid_row = self._aperture_photometry(
aperture_mask=aperture_mask,
flux_method=flux_method,
centroid_method=centroid_method,
)
keys = {
"centroid_col": centroid_col,
"centroid_row": centroid_row,
"quality": self.quality,
"sector": self.sector,
"camera": self.camera,
"ccd": self.ccd,
"mission": self.mission,
"cadenceno": self.cadenceno,
"ra": self.ra,
"dec": self.dec,
"label": self.get_keyword("OBJECT", default=self.targetid),
"targetid": self.targetid,
}
meta = {"APERTURE_MASK": aperture_mask}
return TessLightCurve(
time=self.time, flux=flux, flux_err=flux_err, **keys, meta=meta
)
def get_bkg_lightcurve(self, aperture_mask=None):
aperture_mask = self._parse_aperture_mask(aperture_mask)
# Ignore warnings related to zero or negative errors
with warnings.catch_warnings():
warnings.simplefilter("ignore", RuntimeWarning)
flux_bkg_err = (
np.nansum(self.flux_bkg_err[:, aperture_mask] ** 2, axis=1) ** 0.5
)
keys = {
"quality": self.quality,
"sector": self.sector,
"camera": self.camera,
"ccd": self.ccd,
"cadenceno": self.cadenceno,
"ra": self.ra,
"dec": self.dec,
"label": self.get_keyword("OBJECT", default=self.targetid),
"targetid": self.targetid,
}
return TessLightCurve(
time=self.time,
flux=np.nansum(self.flux_bkg[:, aperture_mask], axis=1),
flux_err=flux_bkg_err,
**keys,
)
| 115,945
| 38.981379
| 143
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/periodogram.py
|
"""Defines the Periodogram class and associated tools."""
from __future__ import division, print_function
import copy
import logging
import math
import re
import warnings
import numpy as np
from matplotlib import pyplot as plt
import astropy
from astropy.table import Table
from astropy import units as u
from astropy.units import cds
from astropy.convolution import convolve, Box1DKernel
from astropy.time import Time
from astropy.timeseries import LombScargle
from astropy.timeseries.periodograms.lombscargle import implementations # for .main._is_regular
from . import MPLSTYLE
from .utils import LightkurveWarning, validate_method
from .lightcurve import LightCurve
log = logging.getLogger(__name__)
__all__ = ["Periodogram", "LombScarglePeriodogram", "BoxLeastSquaresPeriodogram"]
class Periodogram(object):
"""Generic class to represent a power spectrum (frequency vs power data).
The Periodogram class represents a power spectrum, with values of
frequency on the x-axis (in any frequency units) and values of power on the
y-axis (in units of flux^2 / [frequency units]).
Attributes
----------
frequency : `~astropy.units.Quantity`
Array of frequencies as an AstroPy Quantity object.
power : `~astropy.units.Quantity`
Array of power-spectral-densities. The Quantity must have units of
`flux^2 / freq_unit`, where freq_unit is the unit of the frequency
attribute.
nyquist : float
The Nyquist frequency of the lightcurve. In units of freq_unit, where
freq_unit is the unit of the frequency attribute.
label : str
Human-friendly object label, e.g. "KIC 123456789".
targetid : str
Identifier of the target.
default_view : "frequency" or "period"
Should plots be shown in frequency space or period space by default?
meta : dict
Free-form metadata associated with the Periodogram.
"""
frequency = None
"""The array of frequency values."""
power = None
"""The array of power values."""
def __init__(
self,
frequency,
power,
nyquist=None,
label=None,
targetid=None,
default_view="frequency",
meta={},
):
# Input validation
if not isinstance(frequency, u.quantity.Quantity):
raise ValueError("frequency must be an `astropy.units.Quantity` object.")
if not isinstance(power, u.quantity.Quantity):
raise ValueError("power must be an `astropy.units.Quantity` object.")
# Frequency must have frequency units
try:
frequency.to(u.Hz)
except u.UnitConversionError:
raise ValueError("Frequency must be in units of 1/time.")
# Frequency and power must have sensible shapes
if frequency.shape[0] <= 1:
raise ValueError("frequency and power must have a length greater than 1.")
if frequency.shape != power.shape:
raise ValueError("frequency and power must have the same length.")
self.frequency = frequency
self.power = power
self.nyquist = nyquist
self.label = label
self.targetid = targetid
self.default_view = self._validate_view(default_view)
self.meta = meta
def _validate_view(self, view):
"""Verifies whether `view` is is one of {"frequency", "period"} and
raises a helpful `ValueError` if not.
"""
if view is None and hasattr(self, "default_view"):
view = self.default_view
return validate_method(view, ["frequency", "period"])
def _is_evenly_spaced(self):
"""Returns true if the values in ``frequency`` are evenly spaced.
This helper method exists because some features, such as ``smooth()``,
``estimate_numax()``, and ``estimate_deltanu()``, require a grid of
evenly-spaced frequencies.
"""
# verify that the first differences are all equal
freqdiff = np.diff(self.frequency.value)
if np.allclose(freqdiff[0], freqdiff):
return True
return False
@property
def period(self):
"""The array of periods, i.e. 1/frequency."""
return 1.0 / self.frequency
@property
def max_power(self):
"""Power of the highest peak in the periodogram."""
return np.nanmax(self.power)
@property
def frequency_at_max_power(self):
"""Frequency value corresponding to the highest peak in the periodogram."""
return self.frequency[np.nanargmax(self.power)]
@property
def period_at_max_power(self):
"""Period value corresponding to the highest peak in the periodogram."""
return 1.0 / self.frequency_at_max_power
def bin(self, binsize=10, method="mean"):
"""Bins the power spectrum.
Parameters
----------
binsize : int
The factor by which to bin the power spectrum, in the sense that
the power spectrum will be smoothed by taking the mean in bins
of size N / binsize, where N is the length of the original
frequency array. Defaults to 10.
method : str, one of 'mean' or 'median'
Method to use for binning. Default is 'mean'.
Returns
-------
binned_periodogram : a `Periodogram` object
Returns a new `Periodogram` object which has been binned.
"""
# Input validation
if binsize < 1:
raise ValueError("binsize must be larger than or equal to 1")
method = validate_method(method, ["mean", "median"])
m = int(len(self.power) / binsize) # length of the binned arrays
if method == "mean":
binned_freq = self.frequency[: m * binsize].reshape((m, binsize)).mean(1)
binned_power = self.power[: m * binsize].reshape((m, binsize)).mean(1)
elif method == "median":
binned_freq = np.nanmedian(
self.frequency[: m * binsize].reshape((m, binsize)), axis=1
)
binned_power = np.nanmedian(
self.power[: m * binsize].reshape((m, binsize)), axis=1
)
binned_pg = self.copy()
binned_pg.frequency = binned_freq
binned_pg.power = binned_power
return binned_pg
def smooth(self, method="boxkernel", filter_width=0.1):
"""Smooths the power spectrum using the 'boxkernel' or 'logmedian' method.
If `method` is set to 'boxkernel', this method will smooth the power
spectrum by convolving with a numpy Box1DKernel with a width of
`filter_width`, where `filter width` is in units of frequency.
This is best for filtering out noise while maintaining seismic mode
peaks. This method requires the Periodogram to have an evenly spaced
grid of frequencies. A `ValueError` exception will be raised if this is
not the case.
If `method` is set to 'logmedian', it smooths the power spectrum using
a moving median which moves across the power spectrum in a steps of
log10(x0) + 0.5 * filter_width
where `filter width` is in log10(frequency) space. This is best for
estimating the noise background, as it filters over the seismic peaks.
Periodograms that are unsmoothed have multiplicative noise that is
distributed as chi squared 2 degrees of freedom. This noise
distribution has a well defined mean and median but the two are not
equivalent. The mean of a chi squared 2 dof distribution is 2, but the
median is 2(8/9)**3.
(see https://en.wikipedia.org/wiki/Chi-squared_distribution)
In order to maintain consistency between 'boxkernel' and 'logmedian' a
correction factor of (8/9)**3 is applied to (i.e., the median is divided
by the factor) to the median values.
In addition to consistency with the 'boxkernel' method, the correction
of the median values is useful when applying the periodogram flatten
method. The flatten method divides the periodgram by the smoothed
periodogram using the 'logmedian' method. By appyling the correction
factor we follow asteroseismic convention that the signal-to-noise
power has a mean value of unity. (note the signal-to-noise power is
really the signal plus noise divided by the noise and hence should be
unity in the absence of any signal)
Parameters
----------
method : str, one of 'boxkernel' or 'logmedian'
The smoothing method to use. Defaults to 'boxkernel'.
filter_width : float
If `method` = 'boxkernel', this is the width of the smoothing filter
in units of frequency.
If method = `logmedian`, this is the width of the smoothing filter
in log10(frequency) space.
Returns
-------
smoothed_pg : `Periodogram` object
Returns a new `Periodogram` object in which the power spectrum
has been smoothed.
"""
method = validate_method(method, ["boxkernel", "logmedian"])
if method == "boxkernel":
if filter_width <= 0.0:
raise ValueError(
"the `filter_width` parameter must be "
"larger than 0 for the 'boxkernel' method."
)
try:
filter_width = u.Quantity(filter_width, self.frequency.unit)
except u.UnitConversionError:
raise ValueError(
"the `filter_width` parameter must have " "frequency units."
)
# Check to see if we have a grid of evenly spaced periods instead.
if not self._is_evenly_spaced():
raise ValueError(
"the 'boxkernel' method requires the periodogram "
"to have a grid of evenly spaced frequencies."
)
fs = np.mean(np.diff(self.frequency))
box_kernel = Box1DKernel(math.ceil((filter_width / fs).value))
smooth_power = convolve(self.power.value, box_kernel)
smooth_pg = self.copy()
smooth_pg.power = u.Quantity(smooth_power, self.power.unit)
return smooth_pg
if method == "logmedian":
if isinstance(filter_width, astropy.units.quantity.Quantity):
raise ValueError(
"the 'logmedian' method requires a dimensionless "
"value for `filter_width` in log10(frequency) space."
)
count = np.zeros(len(self.frequency.value), dtype=int)
bkg = np.zeros_like(self.frequency.value)
x0 = np.log10(self.frequency[0].value)
corr_factor = (8.0 / 9.0) ** 3
while x0 < np.log10(self.frequency[-1].value):
m = np.abs(np.log10(self.frequency.value) - x0) < filter_width
if len(bkg[m] > 0):
bkg[m] += np.nanmedian(self.power[m].value) / corr_factor
count[m] += 1
x0 += 0.5 * filter_width
bkg /= count
smooth_pg = self.copy()
smooth_pg.power = u.Quantity(bkg, self.power.unit)
return smooth_pg
def plot(
self,
scale="linear",
ax=None,
xlabel=None,
ylabel=None,
title="",
style="lightkurve",
view=None,
unit=None,
**kwargs
):
"""Plots the Periodogram.
Parameters
----------
scale: str
Set x,y axis to be "linear" or "log". Default is linear.
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be generated.
xlabel : str
Plot x axis label
ylabel : str
Plot y axis label
title : str
Plot set_title
style : str
Path or URL to a matplotlib style file, or name of one of
matplotlib's built-in stylesheets (e.g. 'ggplot').
Lightkurve's custom stylesheet is used by default.
view : str
{'frequency', 'period'}. Default 'frequency'. If 'frequency', x-axis
units will be frequency. If 'period', the x-axis units will be
period and 'log' scale.
kwargs : dict
Dictionary of arguments to be passed to `matplotlib.pyplot.plot`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
if isinstance(unit, u.quantity.Quantity):
unit = unit.unit
view = self._validate_view(view)
if unit is None:
unit = self.frequency.unit
if view == "period":
unit = self.period.unit
if style is None or style == "lightkurve":
style = MPLSTYLE
if ylabel is None:
ylabel = "Power"
if self.power.unit.to_string() != "":
unit_label = self.power.unit.to_string("latex")
# The line below is a workaround for AstroPy bug #9218.
# It can be removed once the fix for that issue is widespread.
# See https://github.com/astropy/astropy/pull/9218
unit_label = re.sub(
r"\^{([^}]+)}\^{([^}]+)}", r"^{\g<1>^{\g<2>}}", unit_label
)
ylabel += " [{}]".format(unit_label)
# This will need to be fixed with housekeeping. Self.label currently doesnt exist.
if ("label" not in kwargs) and ("label" in dir(self)):
kwargs["label"] = self.label
with plt.style.context(style):
if ax is None:
fig, ax = plt.subplots()
# Plot frequency and power
if view == "frequency":
ax.plot(self.frequency.to(unit), self.power, **kwargs)
if xlabel is None:
xlabel = "Frequency [{}]".format(unit.to_string("latex"))
elif view == "period":
ax.plot(self.period.to(unit), self.power, **kwargs)
if xlabel is None:
xlabel = "Period [{}]".format(unit.to_string("latex"))
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
# Show the legend if labels were set
legend_labels = ax.get_legend_handles_labels()
if np.sum([len(a) for a in legend_labels]) != 0:
ax.legend(loc="best")
ax.set_yscale(scale)
ax.set_xscale(scale)
ax.set_title(title)
return ax
def flatten(self, method="logmedian", filter_width=0.01, return_trend=False):
"""Estimates the Signal-To-Noise (SNR) spectrum by dividing out an
estimate of the noise background.
This method divides the power spectrum by a background estimated
using a moving filter in log10 space by default. For details on the
`method` and `filter_width` parameters, see `Periodogram.smooth()`
Dividing the power through by the noise background produces a spectrum
with no units of power. Since the signal is divided through by a measure
of the noise, we refer to this as a `Signal-To-Noise` spectrum.
Parameters
----------
method : str, one of 'boxkernel' or 'logmedian'
Background estimation method passed on to `Periodogram.smooth()`.
Defaults to 'logmedian'.
filter_width : float
If `method` = 'boxkernel', this is the width of the smoothing filter
in units of frequency.
If method = `logmedian`, this is the width of the smoothing filter
in log10(frequency) space.
return_trend : bool
If True, then the background estimate, alongside the SNR spectrum,
will be returned.
Returns
-------
snr_spectrum : `Periodogram` object
Returns a periodogram object where the power is an estimate of the
signal-to-noise of the spectrum, creating by dividing the powers
with a simple estimate of the noise background using a smoothing filter.
bkg : `Periodogram` object
The estimated power spectrum of the background noise. This is only
returned if `return_trend = True`.
"""
bkg = self.smooth(method=method, filter_width=filter_width)
snr_pg = self / bkg.power
snr = SNRPeriodogram(
snr_pg.frequency,
snr_pg.power,
nyquist=self.nyquist,
targetid=self.targetid,
label=self.label,
meta=self.meta,
)
if return_trend:
return snr, bkg
return snr
def to_table(self):
"""Exports the Periodogram as an Astropy Table.
Returns
-------
table : `~astropy.table.Table` object
An AstroPy Table with columns 'frequency', 'period', and 'power'.
"""
return Table(
data=(self.frequency, self.period, self.power),
names=("frequency", "period", "power"),
meta=self.meta,
)
def copy(self):
"""Returns a copy of the Periodogram object.
This method uses the `copy.deepcopy` function to ensure that all
objects stored within the Periodogram are copied.
Returns
-------
pg_copy : Periodogram
A new `Periodogram` object which is a copy of the original.
"""
return copy.deepcopy(self)
def __repr__(self):
return "Periodogram(ID: {})".format(self.label)
def __getitem__(self, key):
copy_self = self.copy()
copy_self.frequency = self.frequency[key]
copy_self.power = self.power[key]
return copy_self
def __add__(self, other):
copy_self = self.copy()
copy_self.power = copy_self.power + u.Quantity(other, self.power.unit)
return copy_self
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
return self.__add__(-other)
def __rsub__(self, other):
copy_self = self.copy()
copy_self.power = other - copy_self.power
return copy_self
def __mul__(self, other):
copy_self = self.copy()
copy_self.power = other * copy_self.power
return copy_self
def __rmul__(self, other):
return self.__mul__(other)
def __truediv__(self, other):
return self.__mul__(1.0 / other)
def __rtruediv__(self, other):
copy_self = self.copy()
copy_self.power = other / copy_self.power
return copy_self
def __div__(self, other):
return self.__truediv__(other)
def __rdiv__(self, other):
return self.__rtruediv__(other)
def show_properties(self):
"""Prints a summary of the non-callable attributes of the Periodogram object.
Prints in order of type (ints, strings, lists, arrays and others).
Prints in alphabetical order.
"""
attrs = {}
for attr in dir(self):
if not attr.startswith("_"):
res = getattr(self, attr)
if callable(res):
continue
if isinstance(res, astropy.units.quantity.Quantity):
unit = res.unit
res = res.value
attrs[attr] = {"res": res}
attrs[attr]["unit"] = unit.to_string()
else:
attrs[attr] = {"res": res}
attrs[attr]["unit"] = ""
if attr == "hdu":
attrs[attr] = {"res": res, "type": "list"}
for idx, r in enumerate(res):
if idx == 0:
attrs[attr]["print"] = "{}".format(r.header["EXTNAME"])
else:
attrs[attr]["print"] = "{}, {}".format(
attrs[attr]["print"], "{}".format(r.header["EXTNAME"])
)
continue
if isinstance(res, int):
attrs[attr]["print"] = "{}".format(res)
attrs[attr]["type"] = "int"
elif isinstance(res, float):
attrs[attr]["print"] = "{}".format(np.round(res, 4))
attrs[attr]["type"] = "float"
elif isinstance(res, np.ndarray):
attrs[attr]["print"] = "array {}".format(res.shape)
attrs[attr]["type"] = "array"
elif isinstance(res, list):
attrs[attr]["print"] = "list length {}".format(len(res))
attrs[attr]["type"] = "list"
elif isinstance(res, str):
if res == "":
attrs[attr]["print"] = "{}".format("None")
else:
attrs[attr]["print"] = "{}".format(res)
attrs[attr]["type"] = "str"
elif attr == "wcs":
attrs[attr]["print"] = "astropy.wcs.wcs.WCS"
attrs[attr]["type"] = "other"
else:
attrs[attr]["print"] = "{}".format(type(res))
attrs[attr]["type"] = "other"
output = Table(
names=["Attribute", "Description", "Units"], dtype=[object, object, object]
)
idx = 0
types = ["int", "str", "float", "list", "array", "other"]
for typ in types:
for attr, dic in attrs.items():
if dic["type"] == typ:
output.add_row([attr, dic["print"], dic["unit"]])
idx += 1
print("lightkurve.Periodogram properties:")
output.pprint(max_lines=-1, max_width=-1)
def to_seismology(self, **kwargs):
"""Returns a `~lightkurve.seismology.Seismology` object to analyze the periodogram.
Returns
-------
seismology : `~lightkurve.seismology.Seismology`
Helper object to run asteroseismology methods.
"""
from .seismology import Seismology
return Seismology(self)
class SNRPeriodogram(Periodogram):
"""Defines a Signal-to-Noise Ratio (SNR) Periodogram class.
This class is nearly identical to the standard :class:`Periodogram` class,
but has different plotting defaults.
"""
def __init__(self, *args, **kwargs):
super(SNRPeriodogram, self).__init__(*args, **kwargs)
def __repr__(self):
return "SNRPeriodogram(ID: {})".format(self.label)
def plot(self, **kwargs):
"""Plot the SNR spectrum using matplotlib's `plot` method.
See `Periodogram.plot` for details on the accepted arguments.
Parameters
----------
kwargs : dict
Dictionary of arguments ot be passed to `Periodogram.plot`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
ax = super(SNRPeriodogram, self).plot(**kwargs)
if "ylabel" not in kwargs:
ax.set_ylabel("Signal to Noise Ratio (SNR)")
return ax
class LombScarglePeriodogram(Periodogram):
"""Subclass of :class:`Periodogram <lightkurve.periodogram.Periodogram>`
representing a power spectrum generated using the Lomb Scargle method.
"""
def __init__(self, *args, **kwargs):
self._LS_object = kwargs.pop("ls_obj", None)
self.nterms = kwargs.pop("nterms", 1)
self.ls_method = kwargs.pop("ls_method", "fastchi2")
super(LombScarglePeriodogram, self).__init__(*args, **kwargs)
def __repr__(self):
return "LombScarglePeriodogram(ID: {})".format(self.label)
@staticmethod
def from_lightcurve(
lc,
minimum_frequency=None,
maximum_frequency=None,
minimum_period=None,
maximum_period=None,
frequency=None,
period=None,
nterms=1,
nyquist_factor=1,
oversample_factor=None,
freq_unit=None,
normalization="amplitude",
ls_method="fast",
**kwargs
):
"""Creates a `Periodogram` from a LightCurve using the Lomb-Scargle method.
By default, the periodogram will be created for a regular grid of
frequencies from one frequency separation to the Nyquist frequency,
where the frequency separation is determined as 1 / the time baseline.
The min frequency and/or max frequency (or max period and/or min period)
can be passed to set custom limits for the frequency grid. Alternatively,
the user can provide a custom regular grid using the `frequency`
parameter or a custom regular grid of periods using the `period`
parameter.
The sampling of the spectrum can be changed using the
`oversample_factor` parameter. An oversampled spectrum
(oversample_factor > 1) is useful for displaying the full details
of the spectrum, allowing the frequencies and amplitudes to be
measured directly from the plot itself, with no fitting required.
This is recommended for most applications, with a value of 5 or
10. On the other hand, an oversample_factor of 1 means the spectrum
is critically sampled, where every point in the spectrum is
independent of the others. This may be used when Lorentzians are to
be fitted to modes in the power spectrum, in cases where the mode
lifetimes are shorter than the time-base of the data (which is
sometimes the case for solar-like oscillations). An
oversample_factor of 1 is suitable for these stars because the
modes are usually fully resolved. That is, the power from each mode
is spread over a range of frequencies due to damping. Hence, any
small error from measuring mode frequencies by taking the maximum
of the peak is negligible compared with the intrinsic linewidth of
the modes.
The `normalization` parameter will normalize the spectrum to either
power spectral density ("psd") or amplitude ("amplitude"). Users
doing asteroseismology on classical pulsators (e.g. delta Scutis)
typically prefer `normalization="amplitude"` because "amplitude"
has higher dynamic range (high and low peaks visible
simultaneously), and we often want to read off amplitudes from the
plot. If `normalization="amplitude"`, the default value for
`oversample_factor` is set to 5 and `freq_unit` is 1/day.
Alternatively, users doing asteroseismology on solar-like
oscillators tend to prefer `normalization="psd"` because power
density has a scaled axis that depends on the length of the
observing time, and is used when we are interested in noise levels
(e.g. granulation) and are looking at damped oscillations. If
`normalization="psd"`, the default value for `oversample_factor` is
set to 1 and `freq_unit` is set to microHz. Default values of
`freq_unit` and `oversample_factor` can be overridden. See Appendix
A of Kjeldsen & Bedding, 1995 for a full discussion of
normalization and measurement of oscillation amplitudes
(http://adsabs.harvard.edu/abs/1995A%26A...293...87K).
The parameter nterms controls how many Fourier terms are used in the
model. Setting the Nyquist_factor to be greater than 1 will sample the
space beyond the Nyquist frequency, which may introduce aliasing.
The `freq_unit` parameter allows a request for alternative units in frequency
space. By default frequency is in (1/day) and power in (amplitude).
Asteroseismologists for example may want frequency in (microHz)
in which case they would pass `freq_unit=u.microhertz`.
By default this method uses the LombScargle 'fast' method, which assumes
a regular grid. If a regular grid of periods (i.e. an irregular grid of
frequencies) it will use the 'slow' method. If nterms > 1 is passed, it
will use the 'fastchi2' method for regular grids, and 'chi2' for
irregular grids.
Caution: this method assumes that the LightCurve's time (lc.time)
is given in units of days.
Parameters
----------
lc : LightCurve object
The LightCurve from which to compute the Periodogram.
minimum_frequency : float
If specified, use this minimum frequency rather than one over the
time baseline.
maximum_frequency : float
If specified, use this maximum frequency rather than nyquist_factor
times the nyquist frequency.
minimum_period : float
If specified, use 1./minium_period as the maximum frequency rather
than nyquist_factor times the nyquist frequency.
maximum_period : float
If specified, use 1./maximum_period as the minimum frequency rather
than one over the time baseline.
frequency : array-like
The grid of frequencies to use. If given a unit, it is converted to
units of freq_unit. If not, it is assumed to be in units of
freq_unit. This over rides any set frequency limits.
period : array-like
The grid of periods to use (as 1/period). If given a unit, it is
converted to units of freq_unit. If not, it is assumed to be in
units of 1/freq_unit. This overrides any set period limits.
nterms : int
Default 1. Number of terms to use in the Fourier fit.
nyquist_factor : int
Default 1. The multiple of the average Nyquist frequency. Is
overriden by maximum_frequency (or minimum period).
oversample_factor : int
Default: None. The frequency spacing, determined by the time
baseline of the lightcurve, is divided by this factor, oversampling
the frequency space. This parameter is identical to the
samples_per_peak parameter in astropy.LombScargle(). If
normalization='amplitude', oversample_factor will be set to 5. If
normalization='psd', it will be 1. These defaults can be
overridden.
freq_unit : `astropy.units.core.CompositeUnit`
Default: None. The desired frequency units for the Lomb Scargle
periodogram. This implies that 1/freq_unit is the units for period.
With default normalization ('amplitude'), the freq_unit is set to
1/day, which can be overridden. 'psd' normalization will set
freq_unit to microhertz.
normalization : 'psd' or 'amplitude'
Default: `'amplitude'`. The desired normalization of the spectrum.
Can be either power spectral density (`'psd'`) or amplitude
(`'amplitude'`).
ls_method : str
Default: `'fast'`. Passed to the `method` keyword of
`astropy.stats.LombScargle()`.
kwargs : dict
Keyword arguments passed to `astropy.stats.LombScargle()`
Returns
-------
Periodogram : `Periodogram` object
Returns a Periodogram object extracted from the lightcurve.
"""
# Input validation
normalization = validate_method(normalization, ["psd", "amplitude"])
if np.isnan(lc.flux).any() or (hasattr(lc.flux, 'unmasked') and np.isnan(lc.flux.unmasked).any()):
lc = lc.remove_nans()
log.debug(
"Lightcurve contains NaN values."
"These are removed before creating the periodogram."
)
# Setting default frequency units
if freq_unit is None:
freq_unit = 1 / u.day if normalization == "amplitude" else u.microhertz
# Default oversample factor
if oversample_factor is None:
oversample_factor = 5.0 if normalization == "amplitude" else 1.0
if "min_period" in kwargs:
warnings.warn(
"`min_period` keyword is deprecated, "
"please use `minimum_period` instead.",
LightkurveWarning,
)
minimum_period = kwargs.pop("min_period", None)
if "max_period" in kwargs:
warnings.warn(
"`max_period` keyword is deprecated, "
"please use `maximum_period` instead.",
LightkurveWarning,
)
maximum_period = kwargs.pop("max_period", None)
if "min_frequency" in kwargs:
warnings.warn(
"`min_frequency` keyword is deprecated, "
"please use `minimum_frequency` instead.",
LightkurveWarning,
)
minimum_frequency = kwargs.pop("min_frequency", None)
if "max_frequency" in kwargs:
warnings.warn(
"`max_frequency` keyword is deprecated, "
"please use `maximum_frequency` instead.",
LightkurveWarning,
)
maximum_frequency = kwargs.pop("max_frequency", None)
# Check if any values of period have been passed and set format accordingly
if not all(b is None for b in [period, minimum_period, maximum_period]):
default_view = "period"
else:
default_view = "frequency"
# If period and frequency keywords have both been set, throw an error
if (not all(b is None for b in [period, minimum_period, maximum_period])) & (
not all(
b is None for b in [frequency, minimum_frequency, maximum_frequency]
)
):
raise ValueError(
"You have input keyword arguments for both frequency and period. "
"Please only use one."
)
time = lc.time.copy()
# Approximate Nyquist Frequency and frequency bin width in terms of days
nyquist = 0.5 * (1.0 / (np.median(np.diff(time.value)))) * (1 / cds.d)
fs = (1.0 / (time[-1] - time[0])) / oversample_factor
# Convert these values to requested frequency unit
nyquist = nyquist.to(freq_unit)
fs = fs.to(freq_unit)
# Warn if there is confusing input
if (frequency is not None) & (
any([a is not None for a in [minimum_frequency, maximum_frequency]])
):
log.warning(
"You have passed both a grid of frequencies "
"and min_frequency/maximum_frequency arguments; "
"the latter will be ignored."
)
if (period is not None) & (
any([a is not None for a in [minimum_period, maximum_period]])
):
log.warning(
"You have passed a grid of periods "
"and minimum_period/maximum_period arguments; "
"the latter will be ignored."
)
# Tidy up the period stuff...
if maximum_period is not None:
# minimum_frequency MUST be none by this point.
minimum_frequency = 1.0 / maximum_period
if minimum_period is not None:
# maximum_frequency MUST be none by this point.
maximum_frequency = 1.0 / minimum_period
# If the user specified a period, copy it into the frequency.
if period is not None:
frequency = 1.0 / period
# Do unit conversions if user input min/max frequency or period
if frequency is None:
if minimum_frequency is not None:
minimum_frequency = u.Quantity(minimum_frequency, freq_unit)
if maximum_frequency is not None:
maximum_frequency = u.Quantity(maximum_frequency, freq_unit)
if (minimum_frequency is not None) & (maximum_frequency is not None):
if minimum_frequency > maximum_frequency:
if default_view == "frequency":
raise ValueError(
"minimum_frequency cannot be larger than maximum_frequency"
)
if default_view == "period":
raise ValueError(
"minimum_period cannot be larger than maximum_period"
)
# If nothing has been passed in, set them to the defaults
if minimum_frequency is None:
minimum_frequency = fs
if maximum_frequency is None:
maximum_frequency = nyquist * nyquist_factor
# Create frequency grid evenly spaced in frequency
frequency = np.arange(
minimum_frequency.value, maximum_frequency.value, fs.value
)
# Convert to desired units
frequency = u.Quantity(frequency, freq_unit)
# Change to compatible ls method if sampling not even in frequency
if not implementations.main._is_regular(frequency) and ls_method in [
"fastchi2",
"fast",
]:
oldmethod = ls_method
ls_method = {"fastchi2": "chi2", "fast": "slow"}[ls_method]
log.warning(
"The requested periodogram is not evenly sampled in frequency.\n"
"Method has been changed from '{}' to '{}' to allow for this.".format(
oldmethod, ls_method
)
)
if (nterms > 1) and (ls_method not in ["fastchi2", "chi2"]):
warnings.warn(
"Building a Lomb Scargle Periodogram using the `slow` method. "
"`nterms` has been set to >1, however this is not supported under the `{}` method. "
"To run with higher nterms, set `ls_method` to either 'fastchi2', or 'chi2'. "
"Please refer to the `astropy.timeseries.periodogram.LombScargle` documentation.".format(
ls_method
),
LightkurveWarning,
)
nterms = 1
if float(astropy.__version__[0]) >= 3:
LS = LombScargle(
time, lc.flux, nterms=nterms, normalization="psd", **kwargs
)
power = LS.power(frequency, method=ls_method)
else:
LS = LombScargle(time, lc.flux, nterms=nterms, **kwargs)
power = LS.power(frequency, method=ls_method, normalization="psd")
if normalization == "psd": # Power spectral density
# Rescale from the unnormalized power output by Astropy's
# Lomb-Scargle function to units of flux_variance / [frequency unit]
# that may be of more interest for asteroseismology.
power *= 2.0 / (len(time) * oversample_factor * fs)
elif normalization == "amplitude":
power = np.sqrt(power) * np.sqrt(4.0 / len(lc.time))
# Periodogram needs properties
return LombScarglePeriodogram(
frequency=frequency,
power=power,
nyquist=nyquist,
targetid=lc.meta.get("TARGETID"),
label=lc.meta.get("LABEL"),
default_view=default_view,
ls_obj=LS,
nterms=nterms,
ls_method=ls_method,
meta=lc.meta,
)
def model(self, time, frequency=None):
"""Obtain the flux model for a given frequency and time
Parameters
----------
time : np.ndarray
Time points to evaluate model.
frequency : frequency to evaluate model. Default is the frequency at
max power.
Returns
-------
result : lightkurve.LightCurve
Model object with the time and flux model
"""
if self._LS_object is None:
raise ValueError("No `astropy` Lomb Scargle object exists.")
if frequency is None:
frequency = self.frequency_at_max_power
f = self._LS_object.model(time, frequency)
lc = LightCurve(
time=time,
flux=f,
meta={"FREQUENCY": frequency},
label="LS Model",
targetid="{} LS Model".format(self.targetid),
)
return lc.normalize()
class BoxLeastSquaresPeriodogram(Periodogram):
"""Subclass of :class:`Periodogram <lightkurve.periodogram.Periodogram>`
representing a power spectrum generated using the Box Least Squares (BLS) method.
"""
def __init__(self, *args, **kwargs):
self.duration = kwargs.pop("duration", None)
self.depth = kwargs.pop("depth", None)
self.snr = kwargs.pop("snr", None)
self._BLS_result = kwargs.pop("bls_result", None)
self._BLS_object = kwargs.pop("bls_obj", None)
self.transit_time = kwargs.pop("transit_time", None)
self.time = kwargs.pop("time", None)
self.flux = kwargs.pop("flux", None)
self.time_unit = kwargs.pop("time_unit", None)
super(BoxLeastSquaresPeriodogram, self).__init__(*args, **kwargs)
def __repr__(self):
return "BoxLeastSquaresPeriodogram(ID: {})".format(self.label)
@staticmethod
def from_lightcurve(lc, **kwargs):
"""Creates a `Periodogram` from a LightCurve using the Box Least Squares (BLS) method.
Parameters
----------
lc : `LightCurve` object
The LightCurve from which to compute the Periodogram.
duration : float, array_like, or `~astropy.units.Quantity`, optional
The set of durations that will be considered.
Default to `[0.05, 0.10, 0.15, 0.20, 0.25, 0.33]` if not specified.
period : array_like or `~astropy.units.Quantity`, optional
The periods where the Periodogram should be computed.
If not provided, a default will be created using
`BoxLeastSquares.autoperiod() <astropy.timeseries.BoxLeastSquares.autoperiod>`.
minimum_period, maximum_period : float or `~astropy.units.Quantity`, optional
If ``period`` is not provided, the minimum/maximum periods to search.
The defaults will be computed as described in the notes below.
frequency_factor : float, optional
If ``period`` is not provided, a factor to control the frequency spacing of periods
to be considered.
kwargs : dict
Keyword arguments passed to
`BoxLeastSquares.power() <astropy.timeseries.BoxLeastSquares.power>`
Returns
-------
Periodogram : `Periodogram` object
Returns a Periodogram object extracted from the lightcurve.
Notes
-----
If ``period`` is not provided, the default minimum period is computed from maximum duration and
the median observation time gap as
.. code-block:: python
minimum_period = max(median(diff(lc.time)) * 4,
max(duration) + median(diff(lc.time)))
The default maximum period is computed as
.. code-block:: python
maximum_period = (max(lc.time) - min(lc.time)) / 3
ensuring that any systems with at least 3 transits are within the range of searched periods.
"""
# BoxLeastSquares was added to `astropy.stats` in AstroPy v3.1 and then
# moved to `astropy.timeseries` in v3.2, which makes the import below
# somewhat complicated.
try:
from astropy.timeseries import BoxLeastSquares
except ImportError:
try:
from astropy.stats import BoxLeastSquares
except ImportError:
raise ImportError("BLS requires AstroPy v3.1 or later")
# Validate user input for `lc`
# (BoxLeastSquares will not work if flux or flux_err contain NaNs)
lc = lc.remove_nans()
if np.isfinite(lc.flux_err).all():
dy = lc.flux_err
else:
dy = None
# Validate user input for `duration`
duration = kwargs.pop("duration", [0.05, 0.10, 0.15, 0.20, 0.25, 0.33])
if duration is not None and ~np.all(np.isfinite(duration)):
raise ValueError(
"`duration` parameter contains illegal nan or inf value(s)"
)
# Validate user input for `period`
period = kwargs.pop("period", None)
minimum_period = kwargs.pop("minimum_period", None)
maximum_period = kwargs.pop("maximum_period", None)
if period is not None and ~np.all(np.isfinite(period)):
raise ValueError("`period` parameter contains illegal nan or inf value(s)")
if minimum_period is None:
if period is None:
minimum_period = np.max(
[
np.median(np.diff(lc.time.value)) * 4,
np.max(duration) + np.median(np.diff(lc.time.value)),
]
)
else:
minimum_period = np.min(period)
if maximum_period is None:
if period is None:
maximum_period = (np.max(lc.time.value) - np.min(lc.time.value)) / 3.0
else:
maximum_period = np.max(period)
# Validate user input for `time_unit`
time_unit = kwargs.pop("time_unit", "day")
if time_unit not in dir(u):
raise ValueError(
"{} is not a valid value for `time_unit`".format(time_unit)
)
# Validate user input for `frequency_factor`
frequency_factor = kwargs.pop("frequency_factor", 10)
df = (
frequency_factor
* np.min(duration)
/ (np.max(lc.time.value) - np.min(lc.time.value)) ** 2
)
npoints = int(((1 / minimum_period) - (1 / maximum_period)) / df)
if npoints > 1e7:
raise ValueError(
"`period` contains {} points."
"Periodogram is too large to evaluate. "
"Consider setting `frequency_factor` to a higher value."
"".format(np.round(npoints, 4))
)
elif npoints > 1e5:
log.warning(
"`period` contains {} points."
"Periodogram is likely to be large, and slow to evaluate. "
"Consider setting `frequency_factor` to a higher value."
"".format(np.round(npoints, 4))
)
# Create BLS object and run the BLS search
bls = BoxLeastSquares(lc.time, lc.flux, dy)
if period is None:
period = bls.autoperiod(
duration,
minimum_period=minimum_period,
maximum_period=maximum_period,
frequency_factor=frequency_factor,
)
result = bls.power(period, duration, **kwargs)
if not isinstance(result.period, u.quantity.Quantity):
result.period = u.Quantity(result.period, time_unit)
if not isinstance(result.power, u.quantity.Quantity):
result.power = result.power * u.dimensionless_unscaled
if not isinstance(result.duration, u.quantity.Quantity):
result.duration = u.Quantity(result.duration, time_unit)
return BoxLeastSquaresPeriodogram(
frequency=1.0 / result.period,
power=result.power,
default_view="period",
label=lc.meta.get("LABEL"),
targetid=lc.meta.get("TARGETID"),
transit_time=result.transit_time,
duration=result.duration,
depth=result.depth,
bls_result=result,
snr=result.depth_snr,
bls_obj=bls,
time=lc.time,
flux=lc.flux,
time_unit=time_unit,
)
def compute_stats(self, period=None, duration=None, transit_time=None):
"""Computes commonly used vetting statistics for a transit model.
See `~astropy.timeseries.BoxLeastSquares` docs for further details.
Parameters
----------
period : float or Quantity
Period of the transits. Default is `period_at_max_power`
duration : float or Quantity
Duration of the transits. Default is `duration_at_max_power`
transit_time : float or Quantity
Transit midpoint of the transits. Default is `transit_time_at_max_power`
Returns
-------
stats : dict
Dictionary of vetting statistics
"""
if period is None:
period = self.period_at_max_power
log.warning("No period specified. Using period at max power")
if duration is None:
duration = self.duration_at_max_power
log.warning("No duration specified. Using duration at max power")
if transit_time is None:
transit_time = self.transit_time_at_max_power
log.warning("No transit time specified. Using transit time at max power")
if not isinstance(transit_time, Time):
transit_time = Time(
transit_time, format=self.time.format, scale=self.time.scale
)
return self._BLS_object.compute_stats(
u.Quantity(period, "d").value, u.Quantity(duration, "d").value, transit_time
)
def get_transit_model(self, period=None, duration=None, transit_time=None):
"""Computes the transit model using the BLS, returns a lightkurve.LightCurve
See `~astropy.timeseries.BoxLeastSquares` docs for further details.
Parameters
----------
period : float or Quantity
Period of the transits. Default is `period_at_max_power`
duration : float or Quantity
Duration of the transits. Default is `duration_at_max_power`
transit_time : float or Quantity
Transit midpoint of the transits. Default is `transit_time_at_max_power`
Returns
-------
model : lightkurve.LightCurve
Model of transit
"""
from .lightcurve import LightCurve
if period is None:
period = self.period_at_max_power
log.warning("No period specified. Using period at max power")
if duration is None:
duration = self.duration_at_max_power
log.warning("No duration specified. Using duration at max power")
if transit_time is None:
transit_time = self.transit_time_at_max_power
log.warning("No transit time specified. Using transit time at max power")
if not isinstance(transit_time, Time):
transit_time = Time(
transit_time, format=self.time.format, scale=self.time.scale
)
model_flux = self._BLS_object.model(
self.time,
u.Quantity(period, "d").value,
u.Quantity(duration, "d").value,
transit_time,
)
model = LightCurve(time=self.time, flux=model_flux, label="Transit Model Flux")
return model
def get_transit_mask(self, period=None, duration=None, transit_time=None):
"""Returns a boolean array that is ``True`` during transits and
``False`` elsewhere.
Parameters
----------
period : float or Quantity
Period of the transits. Default is `period_at_max_power`
duration : float or Quantity
Duration of the transits. Default is `duration_at_max_power`
transit_time : float or Quantity
Transit midpoint of the transits. Default is `transit_time_at_max_power`
Returns
-------
transit_mask : np.array of bool
Mask that flags transits. Mask is ``True`` where there are transits.
"""
model = self.get_transit_model(
period=period, duration=duration, transit_time=transit_time
)
return model.flux != np.median(model.flux)
@property
def transit_time_at_max_power(self):
"""Returns the transit time corresponding to the highest peak in the periodogram."""
return self.transit_time[np.nanargmax(self.power)]
@property
def duration_at_max_power(self):
"""Returns the duration corresponding to the highest peak in the periodogram."""
return self.duration[np.nanargmax(self.power)]
@property
def depth_at_max_power(self):
"""Returns the depth corresponding to the highest peak in the periodogram."""
return self.depth[np.nanargmax(self.power)]
def plot(self, **kwargs):
"""Plot the BoxLeastSquaresPeriodogram spectrum using matplotlib's `plot` method.
See `Periodogram.plot` for details on the accepted arguments.
Parameters
----------
kwargs : dict
Dictionary of arguments ot be passed to `Periodogram.plot`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
ax = super(BoxLeastSquaresPeriodogram, self).plot(**kwargs)
if "ylabel" not in kwargs:
ax.set_ylabel("BLS Power")
return ax
def flatten(self, **kwargs):
raise NotImplementedError(
"`flatten` is not implemented for `BoxLeastSquaresPeriodogram`."
)
def smooth(self, **kwargs):
raise NotImplementedError(
"`smooth` is not implemented for `BoxLeastSquaresPeriodogram`. "
)
| 53,767
| 39.487952
| 106
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
PACKAGEDIR = os.path.abspath(os.path.dirname(__file__))
MPLSTYLE = "{}/data/lightkurve.mplstyle".format(PACKAGEDIR)
"""Lightkurve's stylesheet for matplotlib.
It is useful for users who create their own figures and want their figures following
Lightkurve's style.
Examples
--------
Create a scatter plot with a custom size using Lightkurve's style.
>>> with plt.style.context(MPLSTYLE): # doctest: +SKIP
>>> ax = plt.figure(figsize=(6, 3)).gca() # doctest: +SKIP
>>> lc.scatter(ax=ax) # doctest: +SKIP
"""
# Bibtex entry detailing how to cite the package
__citation__ = """@MISC{2018ascl.soft12013L,
author = {{Lightkurve Collaboration} and {Cardoso}, J.~V.~d.~M. and
{Hedges}, C. and {Gully-Santiago}, M. and {Saunders}, N. and
{Cody}, A.~M. and {Barclay}, T. and {Hall}, O. and
{Sagear}, S. and {Turtelboom}, E. and {Zhang}, J. and
{Tzanidakis}, A. and {Mighell}, K. and {Coughlin}, J. and
{Bell}, K. and {Berta-Thompson}, Z. and {Williams}, P. and
{Dotson}, J. and {Barentsen}, G.},
title = "{Lightkurve: Kepler and TESS time series analysis in Python}",
keywords = {Software, NASA},
howpublished = {Astrophysics Source Code Library},
year = 2018,
month = dec,
archivePrefix = "ascl",
eprint = {1812.013},
adsurl = {http://adsabs.harvard.edu/abs/2018ascl.soft12013L},
}"""
import logging
log = logging.getLogger(__name__)
log.addHandler(logging.StreamHandler())
from . import config as _config
class Conf(_config.ConfigNamespace):
"""
Configuration parameters for `lightkurve`.
Refer to `astropy.config.ConfigNamespace` for API details.
Refer to `Astropy documentation <https://docs.astropy.org/en/stable/config/index.html#accessing-values>`_
for usage.
The attributes listed below are the available configuration parameters.
Attributes
----------
search_result_display_extra_columns
List of extra columns to be included when displaying a SearchResult object.
cache_dir
Default cache directory for data files downloaded, etc. Defaults to ``~/.lightkurve/cache`` if not specified.
warn_legacy_cache_dir
If set to True, issue warning if the legacy default cache directory exists. Default is True.
"""
# Note: when using list or string_list datatype,
# the behavior of astropy's parsing of the config file value:
# - it does not accept python list literal
# - it accepts a comma-separated list of string
# - for a single value, it needs to be ended with a comma
# see: https://configobj.readthedocs.io/en/latest/configobj.html#the-config-file-format
search_result_display_extra_columns = _config.ConfigItem(
[],
"List of extra columns to be included when displaying a SearchResult object.",
cfgtype="string_list",
module="lightkurve.search"
)
cache_dir = _config.ConfigItem(
None,
"Default cache directory for data files downloaded, etc.",
cfgtype="string",
module="lightkurve.config"
)
warn_legacy_cache_dir = _config.ConfigItem(
True,
"If set to True, issue warning if the legacy default cache directory exists.",
cfgtype="boolean",
module="lightkurve.config"
)
conf = Conf()
from .version import __version__
from . import units # enable ppt and ppm as units
from .time import *
from .lightcurve import *
from .lightcurvefile import *
from .correctors import *
from .targetpixelfile import *
from .utils import *
from .convenience import *
from .collections import *
from .io import *
from .search import *
from . import config
config.warn_if_default_cache_dir_migration_needed()
| 3,896
| 31.747899
| 117
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/search.py
|
"""Defines tools to retrieve Kepler data from the archive at MAST."""
from __future__ import division
import glob
import logging
import os
import re
import warnings
import numpy as np
from astropy import units as u
from astropy.coordinates import SkyCoord
from astropy.io import ascii
from astropy.table import Row, Table, join
from astropy.time import Time
from astropy.utils import deprecated
from memoization import cached
from requests import HTTPError
from . import PACKAGEDIR, conf, config
from .collections import LightCurveCollection, TargetPixelFileCollection
from .io import read
from .targetpixelfile import TargetPixelFile
from .utils import (
LightkurveDeprecationWarning,
LightkurveError,
LightkurveWarning,
suppress_stdout,
)
log = logging.getLogger(__name__)
__all__ = [
"search_targetpixelfile",
"search_lightcurve",
"search_lightcurvefile",
"search_tesscut",
"SearchResult",
]
# Which external links should we display in the SearchResult repr?
AUTHOR_LINKS = {
"Kepler": "https://archive.stsci.edu/kepler/data_products.html",
"K2": "https://archive.stsci.edu/k2/data_products.html",
"SPOC": "https://heasarc.gsfc.nasa.gov/docs/tess/pipeline.html",
"TESS-SPOC": "https://archive.stsci.edu/hlsp/tess-spoc",
"QLP": "https://archive.stsci.edu/hlsp/qlp",
"TASOC": "https://archive.stsci.edu/hlsp/tasoc",
"PATHOS": "https://archive.stsci.edu/hlsp/pathos",
"CDIPS": "https://archive.stsci.edu/hlsp/cdips",
"K2SFF": "https://archive.stsci.edu/hlsp/k2sff",
"EVEREST": "https://archive.stsci.edu/hlsp/everest",
"TESScut": "https://mast.stsci.edu/tesscut/",
"GSFC-ELEANOR-LITE": "https://archive.stsci.edu/hlsp/gsfc-eleanor-lite",
"TGLC": "https://archive.stsci.edu/hlsp/tglc",
}
REPR_COLUMNS_BASE = [
"#",
"mission",
"year",
"author",
"exptime",
"target_name",
"distance",
]
class SearchError(Exception):
pass
class SearchResult(object):
"""Container for the results returned by the search functions.
The purpose of this class is to provide a convenient way to inspect and
download products that have been identified using one of the data search
functions.
Parameters
----------
table : `~astropy.table.Table` object
Astropy table returned by a join of the astroquery `Observations.query_criteria()`
and `Observations.get_product_list()` methods.
"""
table = None
"""`~astropy.table.Table` containing the full search results returned by the MAST API."""
display_extra_columns = []
"""A list of extra columns to be included in the default display of the search result.
It can be configured in a few different ways.
For example, to include ``proposal_id`` in the default display, users can set it:
1. in the user's ``lightkurve.cfg`` file::
[search]
# The extra comma at the end is needed for a single extra column
search_result_display_extra_columns = proposal_id,
2. at run time::
import lightkurve as lk
lk.conf.search_result_display_extra_columns = ['proposal_id']
3. for a specific `SearchResult` object instance::
result.display_extra_columns = ['proposal_id']
See :ref:`configuration <api.config>` for more information.
"""
def __init__(self, table=None):
if table is None:
self.table = Table()
else:
self.table = table
if len(table) > 0:
self._add_columns()
self._sort_table()
self.display_extra_columns = conf.search_result_display_extra_columns
def _sort_table(self):
"""Sort the table of search results by distance, author, and filename.
The reason we include "author" in the sort criteria is that Lightkurve v1 only
showed data products created by the official pipelines (i.e. author equal to
"Kepler", "K2", or "SPOC"). To maintain backwards compatibility, we want to
show products from these authors at the top, so that `search.download()`
operations tend to download the same product in Lightkurve v1 vs v2.
This ordering is not a judgement on the quality of one product vs another,
because we love all pipelines!
"""
sort_priority = {"Kepler": 1, "K2": 1, "SPOC": 1, "TESS-SPOC": 2, "QLP": 3}
self.table["sort_order"] = [
sort_priority.get(author, 9) for author in self.table["author"]
]
self.table.sort(["distance", "year", "mission", "sort_order", "exptime"])
def _add_columns(self):
"""Adds a user-friendly index (``#``) column and adds column unit
and display format information.
"""
if "#" not in self.table.columns:
self.table["#"] = None
self.table["exptime"].unit = "s"
self.table["exptime"].format = ".0f"
self.table["distance"].unit = "arcsec"
# Add the year column from `t_min` or `productFilename`
year = np.floor(Time(self.table["t_min"], format="mjd").decimalyear)
self.table["year"] = year.astype(int)
# `t_min` is incorrect for Kepler products, so we extract year from the filename for those =(
for idx in np.where(self.table["author"] == "Kepler")[0]:
self.table["year"][idx] = re.findall(
r"\d+.(\d{4})\d+", self.table["productFilename"][idx]
)[0]
def __repr__(self, html=False):
def to_tess_gi_url(proposal_id):
if re.match("^G0[12].+", proposal_id) is not None:
return f"https://heasarc.gsfc.nasa.gov/docs/tess/approved-programs-primary.html#:~:text={proposal_id}"
elif re.match("^G0[34].+", proposal_id) is not None:
return f"https://heasarc.gsfc.nasa.gov/docs/tess/approved-programs-em1.html#:~:text={proposal_id}"
else:
return f"https://heasarc.gsfc.nasa.gov/docs/tess/approved-programs.html#:~:text={proposal_id}"
out = "SearchResult containing {} data products.".format(len(self.table))
if len(self.table) == 0:
return out
columns = REPR_COLUMNS_BASE
if self.display_extra_columns is not None:
columns = REPR_COLUMNS_BASE + self.display_extra_columns
# search_tesscut() has fewer columns, ensure we don't try to display columns that do not exist
columns = [c for c in columns if c in self.table.colnames]
self.table["#"] = [idx for idx in range(len(self.table))]
out += "\n\n" + "\n".join(self.table[columns].pformat(max_width=300, html=html))
# Make sure author names show up as clickable links
if html:
for author, url in AUTHOR_LINKS.items():
out = out.replace(f">{author}<", f"><a href='{url}'>{author}</a><")
# special HTML formating for TESS proposal_id
tess_table = self.table[self.table["project"] == "TESS"]
if "proposal_id" in tess_table.colnames:
proposal_id_col = np.unique(tess_table["proposal_id"])
else:
proposal_id_col = []
for p_ids in proposal_id_col:
# for CDIPS products, proposal_id is a np MaskedConstant, not a string
if p_ids == "N/A" or (not isinstance(p_ids, str)):
continue
# e.g., handle cases with multiple proposals, e.g., G12345_G67890
p_id_links = [
f"""\
<a href='{to_tess_gi_url(p_id)}'>{p_id}</a>\
"""
for p_id in p_ids.split("_")
]
out = out.replace(f">{p_ids}<", f">{' , '.join(p_id_links)}<")
return out
def _repr_html_(self):
return self.__repr__(html=True)
def __getitem__(self, key):
"""Implements indexing and slicing, e.g. SearchResult[2:5]."""
selection = self.table[key]
# Indexing a Table with an integer will return a Row
if isinstance(selection, Row):
selection = Table(selection)
return SearchResult(table=selection)
def __len__(self):
"""Returns the number of products in the SearchResult table."""
return len(self.table)
@property
def unique_targets(self):
"""Returns a table of targets and their RA & dec values produced by search"""
mask = ["target_name", "s_ra", "s_dec"]
return Table.from_pandas(
self.table[mask]
.to_pandas()
.drop_duplicates("target_name")
.reset_index(drop=True)
)
@property
def obsid(self):
"""MAST observation ID for each data product found."""
return np.asarray(np.unique(self.table["obsid"]), dtype="int64")
@property
def ra(self):
"""Right Ascension coordinate for each data product found."""
return self.table["s_ra"].data.data
@property
def dec(self):
"""Declination coordinate for each data product found."""
return self.table["s_dec"].data.data
@property
def mission(self):
"""Kepler quarter or TESS sector names for each data product found."""
return self.table["mission"].data
@property
def year(self):
"""Year the observation was made."""
return self.table["year"].data
@property
def author(self):
"""Pipeline name for each data product found."""
return self.table["author"].data
@property
def target_name(self):
"""Target name for each data product found."""
return self.table["target_name"].data
@property
def exptime(self):
"""Exposure time for each data product found."""
return self.table["exptime"].quantity
@property
def distance(self):
"""Distance from the search position for each data product found."""
return self.table["distance"].quantity
def _download_one(
self, table, quality_bitmask, download_dir, cutout_size, **kwargs
):
"""Private method used by `download()` and `download_all()` to download
exactly one file from the MAST archive.
Always returns a `TargetPixelFile` or `LightCurve` object.
"""
# Make sure astroquery uses the same level of verbosity
logging.getLogger("astropy").setLevel(log.getEffectiveLevel())
if download_dir is None:
download_dir = self._default_download_dir()
# if the SearchResult row is a TESScut entry, then download cutout
if "FFI Cutout" in table[0]["description"]:
try:
log.debug(
"Started downloading TESSCut for '{}' sector {}."
"".format(table[0]["target_name"], table[0]["sequence_number"])
)
path = self._fetch_tesscut_path(
table[0]["target_name"],
table[0]["sequence_number"],
download_dir,
cutout_size,
)
except Exception as exc:
msg = str(exc)
if "504" in msg:
# TESSCut will occasionally return a "504 Gateway Timeout
# error" when it is overloaded.
raise HTTPError(
"The TESS FFI cutout service at MAST appears "
"to be temporarily unavailable. It returned "
"the following error: {}".format(exc)
)
else:
raise SearchError(
"Unable to download FFI cutout. Desired target "
"coordinates may be too near the edge of the FFI."
"Error: {}".format(exc)
)
return read(
path, quality_bitmask=quality_bitmask, targetid=table[0]["targetid"]
)
else:
if cutout_size is not None:
warnings.warn(
"`cutout_size` can only be specified for TESS "
"Full Frame Image cutouts.",
LightkurveWarning,
)
# Whenever `astroquery.mast.Observations.download_products` is called,
# a HTTP request will be sent to determine the length of the file
# prior to checking if the file already exists in the local cache.
# For performance, we skip this HTTP request and immediately try to
# find the file in the cache. The path we check here is consistent
# with the one hard-coded inside `astroquery.mast.Observations._download_files()`
# in Astroquery v0.4.1. It would be good to submit a PR to astroquery
# so we can avoid having to use this hard-coded hack.
path = os.path.join(
download_dir.rstrip("/"),
"mastDownload",
table["obs_collection"][0],
table["obs_id"][0],
table["productFilename"][0],
)
if os.path.exists(path):
log.debug("File found in local cache.")
else:
from astroquery.mast import Observations
download_url = table[:1]["dataURL"][0]
log.debug("Started downloading {}.".format(download_url))
download_response = Observations.download_products(
table[:1], mrp_only=False, download_dir=download_dir
)[0]
if download_response["Status"] != "COMPLETE":
raise LightkurveError(
f"Download of {download_url} failed. "
f"MAST returns {download_response['Status']}: {download_response['Message']}"
)
path = download_response["Local Path"]
log.debug("Finished downloading.")
return read(path, quality_bitmask=quality_bitmask, **kwargs)
@suppress_stdout
def download(
self, quality_bitmask="default", download_dir=None, cutout_size=None, **kwargs
):
"""Download and open the first data product in the search result.
If multiple files are present in `SearchResult.table`, only the first
will be downloaded.
Parameters
----------
quality_bitmask : str or int, optional
Bitmask (integer) which identifies the quality flag bitmask that should
be used to mask out bad cadences. If a string is passed, it has the
following meaning:
* "none": no cadences will be ignored
* "default": cadences with severe quality issues will be ignored
* "hard": more conservative choice of flags to ignore
This is known to remove good data.
* "hardest": removes all data that has been flagged
This mask is not recommended.
See the :class:`KeplerQualityFlags <lightkurve.utils.KeplerQualityFlags>` or :class:`TessQualityFlags <lightkurve.utils.TessQualityFlags>` class for details on the bitmasks.
download_dir : str, optional
Location where the data files will be stored.
If `None` is passed, the value from `cache_dir` configuration parameter is used,
with "~/.lightkurve/cache" as the default.
See `~lightkurve.config.get_cache_dir()` for details.
cutout_size : int, float or tuple, optional
Side length of cutout in pixels. Tuples should have dimensions (y, x).
Default size is (5, 5)
flux_column : str, optional
The column in the FITS file to be read as `flux`. Defaults to 'pdcsap_flux'.
Typically 'pdcsap_flux' or 'sap_flux'.
kwargs : dict, optional
Extra keyword arguments passed on to the file format reader function.
Returns
-------
data : `TargetPixelFile` or `LightCurve` object
The first entry in the products table.
Raises
------
HTTPError
If the TESSCut service times out (i.e. returns HTTP status 504).
SearchError
If any other error occurs.
"""
if len(self.table) == 0:
warnings.warn(
"Cannot download from an empty search result.", LightkurveWarning
)
return None
if len(self.table) != 1:
warnings.warn(
"Warning: {} files available to download. "
"Only the first file has been downloaded. "
"Please use `download_all()` or specify additional "
"criteria (e.g. quarter, campaign, or sector) "
"to limit your search.".format(len(self.table)),
LightkurveWarning,
)
return self._download_one(
table=self.table[:1],
quality_bitmask=quality_bitmask,
download_dir=download_dir,
cutout_size=cutout_size,
**kwargs,
)
@suppress_stdout
def download_all(
self, quality_bitmask="default", download_dir=None, cutout_size=None, **kwargs
):
"""Download and open all data products in the search result.
This method will return a `~lightkurve.TargetPixelFileCollection` or
`~lightkurve.LightCurveCollection`.
Parameters
----------
quality_bitmask : str or int, optional
Bitmask (integer) which identifies the quality flag bitmask that should
be used to mask out bad cadences. If a string is passed, it has the
following meaning:
* "none": no cadences will be ignored
* "default": cadences with severe quality issues will be ignored
* "hard": more conservative choice of flags to ignore
This is known to remove good data.
* "hardest": removes all data that has been flagged
This mask is not recommended.
See the :class:`KeplerQualityFlags <lightkurve.utils.KeplerQualityFlags>` or :class:`TessQualityFlags <lightkurve.utils.TessQualityFlags>` class for details on the bitmasks.
download_dir : str, optional
Location where the data files will be stored.
If `None` is passed, the value from `cache_dir` configuration parameter is used,
with "~/.lightkurve/cache" as the default.
See `~lightkurve.config.get_cache_dir()` for details.
cutout_size : int, float or tuple, optional
Side length of cutout in pixels. Tuples should have dimensions (y, x).
Default size is (5, 5)
flux_column : str, optional
The column in the FITS file to be read as `flux`. Defaults to 'pdcsap_flux'.
Typically 'pdcsap_flux' or 'sap_flux'.
kwargs : dict, optional
Extra keyword arguments passed on to the file format reader function.
Returns
-------
collection : `~lightkurve.collections.Collection` object
Returns a `~lightkurve.LightCurveCollection` or
`~lightkurve.TargetPixelFileCollection`,
containing all entries in the products table
Raises
------
HTTPError
If the TESSCut service times out (i.e. returns HTTP status 504).
SearchError
If any other error occurs.
"""
if len(self.table) == 0:
warnings.warn(
"Cannot download from an empty search result.", LightkurveWarning
)
return None
log.debug("{} files will be downloaded.".format(len(self.table)))
products = []
for idx in range(len(self.table)):
products.append(
self._download_one(
table=self.table[idx : idx + 1],
quality_bitmask=quality_bitmask,
download_dir=download_dir,
cutout_size=cutout_size,
**kwargs,
)
)
if isinstance(products[0], TargetPixelFile):
return TargetPixelFileCollection(products)
else:
return LightCurveCollection(products)
def _default_download_dir(self):
return config.get_cache_dir()
def _fetch_tesscut_path(self, target, sector, download_dir, cutout_size):
"""Downloads TESS FFI cutout and returns path to local file.
Parameters
----------
download_dir : str
Path to location of `.lightkurve-cache` directory where downloaded
cutouts are stored
cutout_size : int, float or tuple
Side length of cutout in pixels. Tuples should have dimensions (y, x).
Default size is (5, 5)
Returns
-------
path : str
Path to locally downloaded cutout file
"""
from astroquery.mast import TesscutClass
coords = _resolve_object(target)
# Set cutout_size defaults
if cutout_size is None:
cutout_size = 5
# Check existence of `~/.lightkurve-cache/tesscut`
tesscut_dir = os.path.join(download_dir, "tesscut")
if not os.path.isdir(tesscut_dir):
# if it doesn't exist, make a new cache directory
try:
os.mkdir(tesscut_dir)
# downloads into default cache if OSError occurs
except OSError:
tesscut_dir = download_dir
# Resolve SkyCoord of given target
coords = _resolve_object(target)
# build path string name and check if it exists
# this is necessary to ensure cutouts are not downloaded multiple times
sec = TesscutClass().get_sectors(coordinates=coords)
sector_name = sec[sec["sector"] == sector]["sectorName"][0]
if isinstance(cutout_size, int):
size_str = str(int(cutout_size)) + "x" + str(int(cutout_size))
elif isinstance(cutout_size, tuple) or isinstance(cutout_size, list):
size_str = str(int(cutout_size[1])) + "x" + str(int(cutout_size[0]))
# search cache for file with matching ra, dec, and cutout size
# ra and dec are searched within 0.001 degrees of input target
ra_string = str(coords.ra.value)
dec_string = str(coords.dec.value)
matchstring = r"{}_{}*_{}*_{}_astrocut.fits".format(
sector_name,
ra_string[: ra_string.find(".") + 4],
dec_string[: dec_string.find(".") + 4],
size_str,
)
cached_files = glob.glob(os.path.join(tesscut_dir, matchstring))
# if any files exist, return the path to them instead of downloading
if len(cached_files) > 0:
path = cached_files[0]
log.debug("Cached file found.")
# otherwise the file will be downloaded
else:
cutout_path = TesscutClass().download_cutouts(
coordinates=coords, size=cutout_size, sector=sector, path=tesscut_dir
)
path = cutout_path[0][0] # the cutoutpath already contains testcut_dir
log.debug("Finished downloading.")
return path
@cached
def search_targetpixelfile(
target,
radius=None,
exptime=None,
cadence=None,
mission=("Kepler", "K2", "TESS"),
author=None,
quarter=None,
month=None,
campaign=None,
sector=None,
limit=None,
):
"""Search the `MAST data archive <https://archive.stsci.edu>`_ for target pixel files.
This function fetches a data table that lists the Target Pixel Files (TPFs)
that fall within a region of sky centered around the position of `target`
and within a cone of a given `radius`. If no value is provided for `radius`,
only a single target will be returned.
Parameters
----------
target : str, int, or `astropy.coordinates.SkyCoord` object
Target around which to search. Valid inputs include:
* The name of the object as a string, e.g. "Kepler-10".
* The KIC or EPIC identifier as an integer, e.g. 11904151.
* A coordinate string in decimal format, e.g. "285.67942179 +50.24130576".
* A coordinate string in sexagesimal format, e.g. "19:02:43.1 +50:14:28.7".
* An `astropy.coordinates.SkyCoord` object.
radius : float or `astropy.units.Quantity` object
Conesearch radius. If a float is given it will be assumed to be in
units of arcseconds. If `None` then we default to 0.0001 arcsec.
exptime : 'long', 'short', 'fast', or float
'long' selects 10-min and 30-min cadence products;
'short' selects 1-min and 2-min products;
'fast' selects 20-sec products.
Alternatively, you can pass the exact exposure time in seconds as
an int or a float, e.g., ``exptime=600`` selects 10-minute cadence.
By default, all cadence modes are returned.
cadence : 'long', 'short', 'fast', or float
Synonym for `exptime`. Will likely be deprecated in the future.
mission : str, tuple of str
'Kepler', 'K2', or 'TESS'. By default, all will be returned.
author : str, tuple of str, or "any"
Author of the data product (`provenance_name` in the MAST API).
Official Kepler, K2, and TESS pipeline products have author names
'Kepler', 'K2', and 'SPOC'.
By default, all light curves are returned regardless of the author.
quarter, campaign, sector : int, list of ints
Kepler Quarter, K2 Campaign, or TESS Sector number.
By default all quarters/campaigns/sectors will be returned.
month : 1, 2, 3, 4 or list of int
For Kepler's prime mission, there are three short-cadence
TargetPixelFiles for each quarter, each covering one month.
Hence, if ``exptime='short'`` you can specify month=1, 2, 3, or 4.
By default all months will be returned.
limit : int
Maximum number of products to return.
Returns
-------
result : :class:`SearchResult` object
Object detailing the data products found.
Examples
--------
This example demonstrates how to use the `search_targetpixelfile()` function
to query and download data. Before instantiating a
`~lightkurve.targetpixelfile.KeplerTargetPixelFile` object or
downloading any science products, we can identify potential desired targets
with `search_targetpixelfile()`::
>>> search_result = search_targetpixelfile('Kepler-10') # doctest: +SKIP
>>> print(search_result) # doctest: +SKIP
The above code will query mast for Target Pixel Files (TPFs) available for
the known planet system Kepler-10, and display a table containing the
available science products. Because Kepler-10 was observed during 15 Quarters,
the table will have 15 entries. To obtain a
`~lightkurve.collections.TargetPixelFileCollection` object containing all
15 observations, use::
>>> search_result.download_all() # doctest: +SKIP
or we can download a single product by limiting our search::
>>> tpf = search_targetpixelfile('Kepler-10', quarter=2).download() # doctest: +SKIP
The above line of code will only download Quarter 2 and create a single
`~lightkurve.targetpixelfile.KeplerTargetPixelFile` object called `tpf`.
We can also pass a radius into `search_targetpixelfile` to perform a cone search::
>>> search_targetpixelfile('Kepler-10', radius=100).targets # doctest: +SKIP
This will display a table containing all targets within 100 arcseconds of Kepler-10.
We can download a `~lightkurve.collections.TargetPixelFileCollection` object
containing all available products for these targets in Quarter 4 with::
>>> search_targetpixelfile('Kepler-10', radius=100, quarter=4).download_all() # doctest: +SKIP
"""
try:
return _search_products(
target,
radius=radius,
filetype="Target Pixel",
exptime=exptime or cadence,
mission=mission,
provenance_name=author,
quarter=quarter,
month=month,
campaign=campaign,
sector=sector,
limit=limit,
)
except SearchError as exc:
log.error(exc)
return SearchResult(None)
@deprecated(
"2.0", alternative="search_lightcurve()", warning_type=LightkurveDeprecationWarning
)
def search_lightcurvefile(*args, **kwargs):
return search_lightcurve(*args, **kwargs)
@cached
def search_lightcurve(
target,
radius=None,
exptime=None,
cadence=None,
mission=("Kepler", "K2", "TESS"),
author=None,
quarter=None,
month=None,
campaign=None,
sector=None,
limit=None,
):
"""Search the `MAST data archive <https://archive.stsci.edu>`_ for light curves.
This function fetches a data table that lists the Light Curve Files
that fall within a region of sky centered around the position of `target`
and within a cone of a given `radius`. If no value is provided for `radius`,
only a single target will be returned.
Parameters
----------
target : str, int, or `astropy.coordinates.SkyCoord` object
Target around which to search. Valid inputs include:
* The name of the object as a string, e.g. "Kepler-10".
* The KIC or EPIC identifier as an integer, e.g. 11904151.
* A coordinate string in decimal format, e.g. "285.67942179 +50.24130576".
* A coordinate string in sexagesimal format, e.g. "19:02:43.1 +50:14:28.7".
* An `astropy.coordinates.SkyCoord` object.
radius : float or `astropy.units.Quantity` object
Conesearch radius. If a float is given it will be assumed to be in
units of arcseconds. If `None` then we default to 0.0001 arcsec.
exptime : 'long', 'short', 'fast', or float
'long' selects 10-min and 30-min cadence products;
'short' selects 1-min and 2-min products;
'fast' selects 20-sec products.
Alternatively, you can pass the exact exposure time in seconds as
an int or a float, e.g., ``exptime=600`` selects 10-minute cadence.
By default, all cadence modes are returned.
cadence : 'long', 'short', 'fast', or float
Synonym for `exptime`. This keyword will likely be deprecated in the future.
mission : str, tuple of str
'Kepler', 'K2', or 'TESS'. By default, all will be returned.
author : str, tuple of str, or "any"
Author of the data product (`provenance_name` in the MAST API).
Official Kepler, K2, and TESS pipeline products have author names
'Kepler', 'K2', and 'SPOC'.
Community-provided products that are supported include 'K2SFF', 'EVEREST'.
By default, all light curves are returned regardless of the author.
quarter, campaign, sector : int, list of ints
Kepler Quarter, K2 Campaign, or TESS Sector number.
By default all quarters/campaigns/sectors will be returned.
month : 1, 2, 3, 4 or list of int
For Kepler's prime mission, there are three short-cadence
TargetPixelFiles for each quarter, each covering one month.
Hence, if ``exptime='short'`` you can specify month=1, 2, 3, or 4.
By default all months will be returned.
limit : int
Maximum number of products to return.
Returns
-------
result : :class:`SearchResult` object
Object detailing the data products found.
Examples
--------
This example demonstrates how to use the `search_lightcurve()` function to
query and download data. Before instantiating a `LightCurve` object or
downloading any science products, we can identify potential desired targets with
`search_lightcurve`::
>>> from lightkurve import search_lightcurve # doctest: +SKIP
>>> search_result = search_lightcurve("Kepler-10") # doctest: +SKIP
>>> print(search_result) # doctest: +SKIP
The above code will query mast for lightcurve files available for the known
planet system Kepler-10, and display a table containing the available
data products. Because Kepler-10 was observed in multiple quarters and sectors
by both Kepler and TESS, the search will return many dozen results.
If we want to narrow down the search to only return Kepler light curves
in long cadence, we can use::
>>> search_result = search_lightcurve("Kepler-10", author="Kepler", exptime=1800) # doctest: +SKIP
>>> print(search_result) # doctest: +SKIP
That is better, we now see 15 light curves corresponding to 15 Kepler quarters.
If we want to download a `~lightkurve.collections.LightCurveCollection` object containing all
15 observations, use::
>>> search_result.download_all() # doctest: +SKIP
or we can specify the downloaded products by selecting a specific row using
rectangular brackets, for example::
>>> lc = search_result[2].download() # doctest: +SKIP
The above line of code will only search and download Quarter 2 data and
create a `LightCurve` object called lc.
We can also pass a radius into `search_lightcurve` to perform a cone search::
>>> search_lightcurve('Kepler-10', radius=100, quarter=4, exptime=1800) # doctest: +SKIP
This will display a table containing all targets within 100 arcseconds of
Kepler-10 and in Quarter 4. We can then download a
`~lightkurve.collections.LightCurveFile` containing all these
light curves using::
>>> search_lightcurve('Kepler-10', radius=100, quarter=4, exptime=1800).download_all() # doctest: +SKIP
"""
try:
return _search_products(
target,
radius=radius,
filetype="Lightcurve",
exptime=exptime or cadence,
mission=mission,
provenance_name=author,
quarter=quarter,
month=month,
campaign=campaign,
sector=sector,
limit=limit,
)
except SearchError as exc:
log.error(exc)
return SearchResult(None)
@cached
def search_tesscut(target, sector=None):
"""Search the `MAST TESSCut service <https://mast.stsci.edu/tesscut/>`_ for a region
of sky that is available as a TESS Full Frame Image cutout.
This feature uses the `TESScut service <https://mast.stsci.edu/tesscut/>`_
provided by the TESS data archive at MAST. If you use this service in
your work, please `cite TESScut <https://ascl.net/code/v/2239>`_ in your
publications.
Parameters
----------
target : str, int, or `astropy.coordinates.SkyCoord` object
Target around which to search. Valid inputs include:
* The name of the object as a string, e.g. "Kepler-10".
* The KIC or EPIC identifier as an integer, e.g. 11904151.
* A coordinate string in decimal format, e.g. "285.67942179 +50.24130576".
* A coordinate string in sexagesimal format, e.g. "19:02:43.1 +50:14:28.7".
* An `astropy.coordinates.SkyCoord` object.
sector : int or list
TESS Sector number. Default (None) will return all available sectors. A
list of desired sectors can also be provided.
Returns
-------
result : :class:`SearchResult` object
Object detailing the data products found.
"""
try:
return _search_products(target, filetype="ffi", mission="TESS", sector=sector)
except SearchError as exc:
log.error(exc)
return SearchResult(None)
def _search_products(
target,
radius=None,
filetype="Lightcurve",
mission=("Kepler", "K2", "TESS"),
provenance_name=None,
exptime=(0, 9999),
quarter=None,
month=None,
campaign=None,
sector=None,
limit=None,
**extra_query_criteria,
):
"""Helper function which returns a SearchResult object containing MAST
products that match several criteria.
Parameters
----------
target : str, int, or `astropy.coordinates.SkyCoord` object
See docstrings above.
radius : float or `astropy.units.Quantity` object
Conesearch radius. If a float is given it will be assumed to be in
units of arcseconds. If `None` then we default to 0.0001 arcsec.
filetype : {'Target pixel', 'Lightcurve', 'FFI'}
Type of files queried at MAST.
exptime : 'long', 'short', 'fast', or float
'long' selects 10-min and 30-min cadence products;
'short' selects 1-min and 2-min products;
'fast' selects 20-sec products.
Alternatively, you can pass the exact exposure time in seconds as
an int or a float, e.g., ``exptime=600`` selects 10-minute cadence.
By default, all cadence modes are returned.
mission : str, list of str
'Kepler', 'K2', or 'TESS'. By default, all will be returned.
provenance_name : str, list of str
Provenance of the data product. Defaults to official products, i.e.
('Kepler', 'K2', 'SPOC'). Community-provided products such as 'K2SFF'
are supported as well.
quarter, campaign, sector : int, list of ints
Kepler Quarter, K2 Campaign, or TESS Sector number.
By default all quarters/campaigns/sectors will be returned.
month : 1, 2, 3, 4 or list of int
For Kepler's prime mission, there are three short-cadence
TargetPixelFiles for each quarter, each covering one month.
Hence, if ``exptime='short'`` you can specify month=1, 2, 3, or 4.
By default all months will be returned.
limit : int
Maximum number of products to return
Returns
-------
SearchResult : :class:`SearchResult` object.
"""
if isinstance(target, int):
if (0 < target) and (target < 13161030):
log.warning(
"Warning: {} may refer to a different Kepler or TESS target. "
"Please add the prefix 'KIC' or 'TIC' to disambiguate."
"".format(target)
)
elif (0 < 200000000) and (target < 251813739):
log.warning(
"Warning: {} may refer to a different K2 or TESS target. "
"Please add the prefix 'EPIC' or 'TIC' to disambiguate."
"".format(target)
)
# Specifying quarter, campaign, or quarter should constrain the mission
if quarter:
mission = "Kepler"
if campaign:
mission = "K2"
if sector:
mission = "TESS"
# Ensure mission is a list
mission = np.atleast_1d(mission).tolist()
# Avoid filtering on `provenance_name` if `author` equals "any" or "all"
if provenance_name in ("any", "all") or provenance_name is None:
provenance_name = None
else:
provenance_name = np.atleast_1d(provenance_name).tolist()
# Speed up by restricting the MAST query if we don't want FFI image data
extra_query_criteria = {}
if filetype in ["Lightcurve", "Target Pixel"]:
# At MAST, non-FFI Kepler pipeline products are known as "cube" products,
# and non-FFI TESS pipeline products are listed as "timeseries".
extra_query_criteria["dataproduct_type"] = ["cube", "timeseries"]
# Make sure `search_tesscut` always performs a cone search (i.e. always
# passed a radius value), because strict target name search does not apply.
if filetype.lower() == "ffi" and radius is None:
radius = 0.0001 * u.arcsec
observations = _query_mast(
target,
radius=radius,
project=mission,
provenance_name=provenance_name,
exptime=exptime,
sequence_number=campaign or sector,
**extra_query_criteria,
)
log.debug(
"MAST found {} observations. "
"Now querying MAST for the corresponding data products."
"".format(len(observations))
)
if len(observations) == 0:
raise SearchError('No data found for target "{}".'.format(target))
# Light curves and target pixel files
if filetype.lower() != "ffi":
from astroquery.mast import Observations
products = Observations.get_product_list(observations)
result = join(
observations,
products,
keys="obs_id",
join_type="right",
uniq_col_name="{col_name}{table_name}",
table_names=["", "_products"],
)
result.sort(["distance", "obs_id"])
# Add the user-friendly 'author' column (synonym for 'provenance_name')
result["author"] = result["provenance_name"]
# Add the user-friendly 'mission' column
result["mission"] = None
obs_prefix = {"Kepler": "Quarter", "K2": "Campaign", "TESS": "Sector"}
for idx in range(len(result)):
obs_project = result["project"][idx]
tmp_seqno = result["sequence_number"][idx]
obs_seqno = f"{tmp_seqno:02d}" if tmp_seqno else ""
# Kepler sequence_number values were not populated at the time of
# writing this code, so we parse them from the description field.
if obs_project == "Kepler" and result["sequence_number"].mask[idx]:
try:
tmp_seqno = re.findall(r".*Q(\d+)", result["description"][idx])[0]
obs_seqno = f"{int(tmp_seqno):02d}"
except IndexError:
obs_seqno = ""
# K2 campaigns 9, 10, and 11 were split into two sections, which are
# listed separately in the table with suffixes "a" and "b"
if obs_project == "K2" and result["sequence_number"][idx] in [9, 10, 11]:
for half, letter in zip([1, 2], ["a", "b"]):
if f"c{tmp_seqno}{half}" in result["productFilename"][idx]:
obs_seqno = f"{int(tmp_seqno):02d}{letter}"
result["mission"][idx] = "{} {} {}".format(
obs_project, obs_prefix.get(obs_project, ""), obs_seqno
)
masked_result = _filter_products(
result,
filetype=filetype,
campaign=campaign,
quarter=quarter,
exptime=exptime,
project=mission,
provenance_name=provenance_name,
month=month,
sector=sector,
limit=limit,
)
log.debug("MAST found {} matching data products.".format(len(masked_result)))
masked_result["distance"].info.format = ".1f" # display <0.1 arcsec
return SearchResult(masked_result)
# Full Frame Images
else:
cutouts = []
for idx in np.where(["TESS FFI" in t for t in observations["target_name"]])[0]:
# if target passed in is a SkyCoord object, convert to RA, dec pair
if isinstance(target, SkyCoord):
target = "{}, {}".format(target.ra.deg, target.dec.deg)
# pull sector numbers
s = observations["sequence_number"][idx]
# if the desired sector is available, add a row
if s in np.atleast_1d(sector) or sector is None:
cutouts.append(
{
"description": f"TESS FFI Cutout (sector {s})",
"mission": f"TESS Sector {s:02d}",
"target_name": str(target),
"targetid": str(target),
"t_min": observations["t_min"][idx],
"exptime": observations["exptime"][idx],
"productFilename": "TESScut",
"provenance_name": "TESScut",
"author": "TESScut",
"distance": 0.0,
"sequence_number": s,
"project": "TESS",
"obs_collection": "TESS",
}
)
if len(cutouts) > 0:
log.debug("Found {} matching cutouts.".format(len(cutouts)))
masked_result = Table(cutouts)
masked_result.sort(["distance", "sequence_number"])
else:
masked_result = None
return SearchResult(masked_result)
def _query_mast(
target,
radius=None,
project=("Kepler", "K2", "TESS"),
provenance_name=None,
exptime=(0, 9999),
sequence_number=None,
**extra_query_criteria,
):
"""Helper function which wraps `astroquery.mast.Observations.query_criteria()`
to return a table of all Kepler/K2/TESS observations of a given target.
By default only the official data products are returned, but this can be
adjusted by adding alternative data product names into `provenance_name`.
Parameters
----------
target : str, int, or `astropy.coordinates.SkyCoord` object
See docstrings above.
radius : float or `astropy.units.Quantity` object
Conesearch radius. If a float is given it will be assumed to be in
units of arcseconds. If `None` then we default to 0.0001 arcsec.
project : str, list of str
Mission name. Typically 'Kepler', 'K2', or 'TESS'.
This parameter is case-insensitive.
provenance_name : str, list of str
Provenance of the observation. Common options include 'Kepler', 'K2',
'SPOC', 'K2SFF', 'EVEREST', 'KEPSEISMIC'.
This parameter is case-insensitive.
exptime : (float, float) tuple
Exposure time range in seconds. Common values include `(59, 61)`
for Kepler short cadence and `(1799, 1801)` for Kepler long cadence.
sequence_number : int, list of int
Quarter, Campaign, or Sector number.
**extra_query_criteria : kwargs
Extra criteria to be passed to `astroquery.mast.Observations.query_criteria`.
Returns
-------
obs : astropy.Table
Table detailing the available observations on MAST.
"""
# Local astroquery import because the package is not used elsewhere
from astroquery.exceptions import NoResultsWarning, ResolverError
from astroquery.mast import Observations
# If passed a SkyCoord, convert it to an "ra, dec" string for MAST
if isinstance(target, SkyCoord):
target = "{}, {}".format(target.ra.deg, target.dec.deg)
# We pass the following `query_criteria` to MAST regardless of whether
# we search by position or target name:
query_criteria = {"project": project, **extra_query_criteria}
if provenance_name is not None:
query_criteria["provenance_name"] = provenance_name
if sequence_number is not None:
query_criteria["sequence_number"] = sequence_number
if exptime is not None:
query_criteria["t_exptime"] = exptime
# If an exact KIC ID is passed, we will search by the exact `target_name`
# under which MAST will know the object to prevent source confusion.
# For discussion, see e.g. GitHub issues #148, #718.
exact_target_name = None
target_lower = str(target).lower()
# Was a Kepler target ID passed?
kplr_match = re.match(r"^(kplr|kic) ?(\d+)$", target_lower)
if kplr_match:
exact_target_name = f"kplr{kplr_match.group(2).zfill(9)}"
# Was a K2 target ID passed?
ktwo_match = re.match(r"^(ktwo|epic) ?(\d+)$", target_lower)
if ktwo_match:
exact_target_name = f"ktwo{ktwo_match.group(2).zfill(9)}"
# Was a TESS target ID passed?
tess_match = re.match(r"^(tess|tic) ?(\d+)$", target_lower)
if tess_match:
exact_target_name = f"{tess_match.group(2).zfill(9)}"
if exact_target_name and radius is None:
log.debug(
"Started querying MAST for observations with the exact "
f"target_name='{exact_target_name}'."
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=NoResultsWarning)
warnings.filterwarnings("ignore", message="t_exptime is continuous")
obs = Observations.query_criteria(
target_name=exact_target_name, **query_criteria
)
if len(obs) > 0:
# We use `exptime` as an alias for `t_exptime`
obs["exptime"] = obs["t_exptime"]
# astroquery does not report distance when querying by `target_name`;
# we add it here so that the table returned always has this column.
obs["distance"] = 0.0
return obs
else:
log.debug(f"No observations found. Now performing a cone search instead.")
# If the above did not return a result, then do a cone search using the MAST name resolver
# `radius` defaults to 0.0001 and unit arcsecond
if radius is None:
radius = 0.0001 * u.arcsec
elif not isinstance(radius, u.quantity.Quantity):
radius = radius * u.arcsec
query_criteria["radius"] = str(radius.to(u.deg))
try:
log.debug(
"Started querying MAST for observations within "
f"{radius.to(u.arcsec)} arcsec of objectname='{target}'."
)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=NoResultsWarning)
warnings.filterwarnings("ignore", message="t_exptime is continuous")
obs = Observations.query_criteria(objectname=target, **query_criteria)
obs.sort("distance")
# We use `exptime` as an alias for `t_exptime`
obs["exptime"] = obs["t_exptime"]
return obs
except ResolverError as exc:
# MAST failed to resolve the object name to sky coordinates
raise SearchError(exc) from exc
def _filter_products(
products,
campaign=None,
quarter=None,
month=None,
sector=None,
exptime=None,
limit=None,
project=("Kepler", "K2", "TESS"),
provenance_name=None,
filetype="Target Pixel",
):
"""Helper function which filters a SearchResult's products table by one or
more criteria.
Parameters
----------
products : `astropy.table.Table` object
Astropy table containing data products returned by MAST
campaign : int or list
Desired campaign of observation for data products
quarter : int or list
Desired quarter of observation for data products
month : int or list
Desired month of observation for data products
exptime : 'long', 'short', 'fast', or float
'long' selects 10-min and 30-min cadence products;
'short' selects 1-min and 2-min products;
'fast' selects 20-sec products.
Alternatively, you can pass the exact exposure time in seconds as
an int or a float, e.g., ``exptime=600`` selects 10-minute cadence.
By default, all cadence modes are returned.
filetype : str
Type of files queried at MAST (`Target Pixel` or `Lightcurve`).
Returns
-------
products : `astropy.table.Table` object
Masked astropy table containing desired data products
"""
if provenance_name is None: # apply all filters
provenance_lower = ("kepler", "k2", "spoc")
else:
provenance_lower = [p.lower() for p in np.atleast_1d(provenance_name)]
mask = np.ones(len(products), dtype=bool)
# Kepler data needs a special filter for quarter and month
mask &= ~np.array(
[prov.lower() == "kepler" for prov in products["provenance_name"]]
)
if "kepler" in provenance_lower and campaign is None and sector is None:
mask |= _mask_kepler_products(products, quarter=quarter, month=month)
# HLSP products need to be filtered by extension
if filetype.lower() == "lightcurve":
mask &= np.array(
[uri.lower().endswith("lc.fits") for uri in products["productFilename"]]
)
elif filetype.lower() == "target pixel":
mask &= np.array(
[
uri.lower().endswith(("tp.fits", "targ.fits.gz"))
for uri in products["productFilename"]
]
)
elif filetype.lower() == "ffi":
mask &= np.array(["TESScut" in desc for desc in products["description"]])
# Allow only fits files
mask &= np.array(
[
uri.lower().endswith("fits") or uri.lower().endswith("fits.gz")
for uri in products["productFilename"]
]
)
# Filter by cadence
mask &= _mask_by_exptime(products, exptime)
products = products[mask]
products.sort(["distance", "productFilename"])
if limit is not None:
return products[0:limit]
return products
def _mask_kepler_products(products, quarter=None, month=None):
"""Returns a mask flagging the Kepler products that match the criteria."""
mask = np.array([proj.lower() == "kepler" for proj in products["provenance_name"]])
if mask.sum() == 0:
return mask
# Identify quarter by the description.
# This is necessary because the `sequence_number` field was not populated
# for Kepler prime data at the time of writing this function.
if quarter is not None:
quarter_mask = np.zeros(len(products), dtype=bool)
for q in np.atleast_1d(quarter):
quarter_mask |= np.array(
[
desc.lower().replace("-", "").endswith("q{}".format(q))
for desc in products["description"]
]
)
mask &= quarter_mask
# For Kepler short cadence data the month can be specified
if month is not None:
month = np.atleast_1d(month)
# Get the short cadence date lookup table.
table = ascii.read(
os.path.join(PACKAGEDIR, "data", "short_cadence_month_lookup.csv")
)
# The following line is needed for systems where the default integer type
# is int32 (e.g. Windows/Appveyor), the column will then be interpreted
# as string which makes the test fail.
table["StartTime"] = table["StartTime"].astype(str)
# Grab the dates of each of the short cadence files.
# Make sure every entry has the correct month
is_shortcadence = mask & np.asarray(
["Short" in desc for desc in products["description"]]
)
for idx in np.where(is_shortcadence)[0]:
quarter = int(
products["description"][idx].split(" - ")[-1][1:].replace("-", "")
)
date = products["dataURI"][idx].split("/")[-1].split("-")[1].split("_")[0]
permitted_dates = []
for m in month:
try:
permitted_dates.append(
table["StartTime"][
np.where(
(table["Month"] == m) & (table["Quarter"] == quarter)
)[0][0]
]
)
except IndexError:
pass
if not (date in permitted_dates):
mask[idx] = False
return mask
def _mask_by_exptime(products, exptime):
"""Helper function to filter by exposure time."""
mask = np.ones(len(products), dtype=bool)
if isinstance(exptime, (int, float)):
mask &= products["exptime"] == exptime
elif isinstance(exptime, str):
exptime = exptime.lower()
if exptime in ["fast"]:
mask &= products["exptime"] < 60
elif exptime in ["short"]:
mask &= (products["exptime"] >= 60) & (products["exptime"] < 300)
elif exptime in ["long", "ffi"]:
mask &= products["exptime"] >= 300
return mask
def _resolve_object(target):
"""Ask MAST to resolve an object string to a set of coordinates."""
from astroquery.mast import MastClass
# Note: `_resolve_object` was renamed `resolve_object` in astroquery 0.3.10 (2019)
return MastClass().resolve_object(target)
| 56,015
| 39.328294
| 185
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/collections.py
|
"""Defines collections of data products."""
import warnings
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from astropy.table import vstack
from astropy.utils.decorators import deprecated
from . import MPLSTYLE
from .targetpixelfile import TargetPixelFile
from .utils import LightkurveWarning, LightkurveDeprecationWarning
__all__ = ["LightCurveCollection", "TargetPixelFileCollection"]
class Collection(object):
"""Base class for `LightCurveCollection` and `TargetPixelFileCollection`.
A collection can be indexed by standard Python list syntax.
Additionally, it can be indexed by a subset of `numpy.ndarray` syntax:
boolean array indexing and integer array indexing.
Attributes
----------
data: array-like
List of data objects.
Examples
--------
Filter a collection by boolean array indexing.
>>> lcc_filtered = lcc[(lcc.sector >= 13) & (lcc.sector <= 19)] # doctest: +SKIP
>>> lc22 = lcc[lcc.sector == 22][0] # doctest: +SKIP
Filter a collection by integer array indexing to get the object at index 0 and 2.
>>> lcc_filtered = lcc[0, 2] # doctest: +SKIP
"""
def __init__(self, data):
if data is not None:
# ensure we have our own container
self.data = [item for item in data]
else:
self.data = []
def __len__(self):
return len(self.data)
def __getitem__(self, index_or_mask):
if isinstance(index_or_mask, (int, np.integer)):
return self.data[index_or_mask]
elif isinstance(index_or_mask, slice):
return type(self)(self.data[index_or_mask])
elif all([isinstance(i, (bool, np.bool_)) for i in index_or_mask]):
# case indexOrMask is bool array like, e.g., np.ndarray, collections.abc.Sequence, etc.
# note: filter using nd.array is very slow
# np.array(self.data)[np.nonzero(indexOrMask)]
# specifically, nd.array(self.data) is very slow, as it deep copies the data
# so we create the filtered list on our own
if len(index_or_mask) != len(self.data):
raise IndexError(
f"boolean index did not match indexed array; dimension is {len(self.data)} "
f"but corresponding boolean dimension is {len(index_or_mask)}"
)
return type(self)([self.data[i] for i in np.nonzero(index_or_mask)[0]])
elif all([isinstance(i, (int, np.integer)) for i in index_or_mask]):
# case int array like, follow ndarray behavior
return type(self)([self.data[i] for i in index_or_mask])
else:
raise IndexError(
"only integers, slices (`:`) and integer or boolean arrays are valid indices"
)
def __setitem__(self, index, obj):
self.data[index] = obj
def append(self, obj):
"""Appends a new object to the collection.
Parameters
----------
obj : object
Typically a LightCurve or TargetPixelFile object
"""
self.data.append(obj)
def __repr__(self):
result = f"{self.__class__.__name__} of {len(self)} objects:\n "
# LightCurve objects provide a special `_repr_simple_` method
# to avoid printing an entire table here
result += "\n ".join(
[
f"{idx}: " + getattr(obj, "_repr_simple_", obj.__repr__)()
for idx, obj in enumerate(self)
]
)
return result
def _safeGetScalarAttr(self, attrName):
# return np.nan when the attribute is missing, so that the returned value can be used in a comparison
# e.g., lcc[lcc.sector < 25]
return np.array([getattr(lcOrTpf, attrName, np.nan) for lcOrTpf in self.data])
@property
def sector(self):
"""(TESS-specific) the quarters of the lightcurves / target pixel files.
Returns `numpy.nan` for data products with lack a sector meta data keyword.
The attribute is useful for filtering a collection by sector.
Examples
--------
Plot two lightcurves, one from TESS sectors 13 to 19, and one for sector 22.
>>> import lightkurve as lk
>>> lcc = lk.search_lightcurve('TIC286923464', author='SPOC').download_all() # doctest: +SKIP
>>> lcc_filtered = lcc[(lcc.sector >= 13) & (lcc.sector <= 19)] # doctest: +SKIP
>>> lcc_filtered.plot() # doctest: +SKIP
>>> lcc[lcc.sector == 22][0].plot() # doctest: +SKIP
"""
return self._safeGetScalarAttr("sector")
@property
def quarter(self):
"""(Kepler-specific) the quarters of the lightcurves / target pixel files.
The Kepler quarters of the lightcurves / target pixel files; `numpy.nan` for those with none.
"""
return self._safeGetScalarAttr("quarter")
@property
def campaign(self):
"""(K2-specific) the campaigns of the lightcurves / target pixel files.
The K2 campaigns of the lightcurves / target pixel files; `numpy.nan` for those with none.
"""
return self._safeGetScalarAttr("campaign")
class LightCurveCollection(Collection):
"""Class to hold a collection of LightCurve objects.
Attributes
----------
lightcurves : array-like
List of LightCurve objects.
"""
def __init__(self, lightcurves):
super(LightCurveCollection, self).__init__(lightcurves)
@property
@deprecated("2.0", warning_type=LightkurveDeprecationWarning)
def PDCSAP_FLUX(self):
"""DEPRECATED. Replaces `LightCurveFileCollection.PDCSAP_FLUX`.
Provided for backwards-compatibility with Lightkurve v1.x;
will be removed soon."""
return LightCurveCollection([lc.PDCSAP_FLUX for lc in self])
@property
@deprecated("2.0", warning_type=LightkurveDeprecationWarning)
def SAP_FLUX(self):
"""DEPRECATED. Replaces `LightCurveFileCollection.SAP_FLUX`.
Provided for backwards-compatibility with Lightkurve v1.x;
will be removed soon."""
return LightCurveCollection([lc.SAP_FLUX for lc in self])
def stitch(self, corrector_func=lambda x: x.normalize()):
"""Stitch all light curves in the collection into a single `LightCurve`.
Any function passed to `corrector_func` will be applied to each light curve
before stitching. For example, passing "lambda x: x.normalize().flatten()"
will normalize and flatten each light curve before stitching.
Parameters
----------
corrector_func : function
Function that accepts and returns a `~lightkurve.lightcurve.LightCurve`.
This function is applied to each light curve in the collection
prior to stitching. The default is to normalize each light curve.
Returns
-------
lc : `~lightkurve.lightcurve.LightCurve`
Stitched light curve.
"""
if corrector_func is None:
corrector_func = lambda x: x # noqa: E731
with warnings.catch_warnings(): # ignore "already normalized" message
warnings.filterwarnings("ignore", message=".*already.*")
lcs = [corrector_func(lc) for lc in self]
# Address issue #954: ignore incompatible columns with the same name
columns_to_remove = set()
for col in lcs[0].columns:
for lc in lcs[1:]:
if col in lc.columns:
if not (
issubclass(lcs[0][col].__class__, lc[col].__class__)
or issubclass(lc[col].__class__, lcs[0][col].__class__)
or lcs[0][col].__class__.info is lc[col].__class__.info
):
columns_to_remove.add(col)
continue
if len(columns_to_remove) > 0:
warnings.warn(
f"The following columns will be excluded from stitching because the column types are incompatible: {columns_to_remove}",
LightkurveWarning,
)
lcs = [lc.copy() for lc in lcs]
[
lc.remove_columns(columns_to_remove.intersection(lc.columns))
for lc in lcs
]
# Need `join_type='inner'` until AstroPy supports masked Quantities
return vstack(lcs, join_type="inner", metadata_conflicts="silent")
def plot(self, ax=None, offset=0.0, **kwargs) -> matplotlib.axes.Axes:
"""Plots all light curves in the collection on a single plot.
Parameters
----------
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be created.
offset : float
Offset to add to targets with different labels, to prevent light
curves from being plotted on top of each other. For example, if
the collection contains light curves with unique labels "A", "B",
and "C", light curves "A" will have `0*offset` added to their flux,
light curves "B" will have `1*offset` offset added, and "C" will
have `2*offset` added.
**kwargs : dict
Dictionary of arguments to be passed to `LightCurve.plot`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
with plt.style.context(MPLSTYLE):
if ax is None:
_, ax = plt.subplots()
for kwarg in ["c", "color", "label"]:
if kwarg in kwargs:
kwargs.pop(kwarg)
for idx, lc in enumerate(self):
kwargs["label"] = f"{idx}: {lc.meta.get('LABEL', '(missing label)')}"
lc.plot(ax=ax, c=f"C{idx}", offset=idx * offset, **kwargs)
# If some but not all light curves are normalized, ensure the Y label
# says "Flux" and not "Normalized Flux"
normstatus = [lc.meta.get("NORMALIZED", False) for lc in self]
if "normalize" not in kwargs and any(normstatus) and not all(normstatus):
warnings.warn(
"Some but not all of the light curves in the collection appear to be normalized. "
"You may wish to use `normalize=True` to ensure all are normalized.",
LightkurveWarning,
)
if "ylabel" not in kwargs:
ax.set_ylabel("Flux")
return ax
class TargetPixelFileCollection(Collection):
"""Class to hold a collection of `~lightkurve.targetpixelfile.TargetPixelFile` objects.
Parameters
----------
tpfs : list or iterable
List of `~lightkurve.targetpixelfile.TargetPixelFile` objects.
"""
def __init__(self, tpfs):
super(TargetPixelFileCollection, self).__init__(tpfs)
def plot(self, ax=None):
"""Individually plots all TargetPixelFile objects in a single
matplotlib axes object.
Parameters
----------
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be created.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
if ax is None:
_, ax = plt.subplots(len(self.data), 1, figsize=(7, (7 * len(self.data))))
if len(self.data) == 1:
self.data[0].plot(ax=ax)
else:
for i, tpf in enumerate(self.data):
tpf.plot(ax=ax[i])
return ax
| 11,762
| 36.945161
| 136
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/convenience.py
|
from __future__ import division, print_function
import numpy as np
from .lightcurve import LightCurve
__all__ = ["estimate_cdpp"]
def estimate_cdpp(flux, **kwargs):
"""A convenience function which wraps LightCurve.estimate_cdpp().
For details on the algorithm used to compute the Combined Differential
Photometric Precision (CDPP) noise metric, please see the docstring of
the `LightCurve.estimate_cdpp()` method.
Parameters
----------
flux : array-like
Flux values.
**kwargs : dict
Dictionary of arguments to be passed to `LightCurve.estimate_cdpp()`.
Returns
-------
cdpp : float
Savitzky-Golay CDPP noise metric in units parts-per-million (ppm).
"""
return LightCurve(time=np.arange(len(flux)), flux=flux).estimate_cdpp(**kwargs)
| 818
| 25.419355
| 83
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/io/tasoc.py
|
"""TESS Asteroseismic Science Operations Center - https://tasoc.dk
TESS Data For Asteroseismology Lightcurves - https://archive.stsci.edu/hlsp/tasoc
Data provided with this release have been extracted using the TASOC Photometry pipeline. The TASOC
pipeline used to generate the data is open source and available on GitHub - https://github.com/tasoc
"""
from ..lightcurve import TessLightCurve
from ..utils import TessQualityFlags
from .generic import read_generic_lightcurve
def read_tasoc_lightcurve(filename, flux_column="FLUX_RAW", quality_bitmask=None):
"""Returns a TESS TASOC `~lightkurve.lightcurve.LightCurve`.
More information: https://archive.stsci.edu/hlsp/tasoc
Parameters
----------
filename : str
Local path or remote url of TASOC light curve FITS file.
flux_column : str
Column that will be used to populate the flux values.
By default, "FLUX_RAW" is used. It contains the T'DA extracted lightcurve,
with no corrections applied to the raw light curves. Corrected lightcurves
may become available in the future.
"""
lc = read_generic_lightcurve(
filename, flux_column=flux_column.lower(), time_format="btjd"
)
lc.meta["AUTHOR"] = "TASOC"
lc.meta["TARGETID"] = lc.meta.get("TICID")
# TASOC light curves are normalized by default
lc.meta["NORMALIZED"] = True
return TessLightCurve(data=lc)
| 1,421
| 38.5
| 103
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/io/tglc.py
|
"""Reader for TGLC light curve files.
Details can be found at https://archive.stsci.edu/hlsp/tglc
"""
import numpy as np
from astropy import units as u
from ..lightcurve import TessLightCurve
from ..utils import TessQualityFlags
from .generic import read_generic_lightcurve
def read_tglc_lightcurve(
filename, flux_column="cal_psf_flux", quality_bitmask="default"
):
"""Returns a `~lightkurve.lightcurve.LightCurve` object given a light curve file from
TGLC HLSP
By default, TGLC's `cal_psf_flux` values are used for the `flux` column. No errors are
provided by this HLSP.
Note this reader does not use the TGLC_FLAG extension to inform the bitmask for the
returned light curve, but those flags can still be accessed.
More information on TGLC: https://archive.stsci.edu/hlsp/tglc
Parameters
----------
filename : str
Local path or remote url of a TGLC light curve FITS file.
flux_column : 'CAL_PSF_FLUX', 'CAL_APER_FLUX', 'PSF_FLUX', or 'APERTURE_FLUX'
Which column in the FITS file contains the preferred flux data?
By default the "Corrected PSF Flux" flux (CAL_PSF_FLUX) is used.
quality_bitmask : str or int
Bitmask (integer) which identifies the quality flag bitmask that should
be used to mask out bad cadences. If a string is passed, it has the
following meaning:
* "none": no cadences will be ignored (`quality_bitmask=0`).
* "default": cadences with flags indicating AttitudeTweak, SafeMode, CoarsePoint, EarthPoint, Desat, or
ManualExclude will be ignored.
* "hard": cadences with default flags, ApertureCosmic, CollateralCosmic, Straylight, or Straylight2 will be
ignored.
* "hardest": cadences with all the above flags will be ignored, in addition to cadences with GSFC-ELEANOR-LITE
bit flags of 17 (decimal value 131072) and 18 (decimal value 262144).
"""
lc = read_generic_lightcurve(
filename,
time_column="time",
flux_column=flux_column.lower(),
quality_column="tess_flags",
cadenceno_column="cadence_num",
time_format="btjd",
)
quality_mask = TessQualityFlags.create_quality_mask(
quality_array=lc["quality"], bitmask=quality_bitmask
)
# TGLC FITS file do not have units specified. re-add them.
for colname in ["psf_flux", "aperture_flux", "background"]:
if colname in lc.colnames:
if lc[colname].unit is not None:
# for case flux, flux_err, lightkurve has forced it to be u.dimensionless_unscaled
# can't reset a unit, so we create a new column
lc[colname] = u.Quantity(
lc[colname].value, "electron/s", dtype=np.float32
)
else:
lc[colname].unit = "electron/s"
# Calibrated columns are normalized, so they are unitless
for colname in ["cal_psf_flux", "cal_aper_flux"]:
if colname in lc.colnames:
if lc[colname].unit is not None:
# for case flux, flux_err, lightkurve has forced it to be u.dimensionless_unscaled
# can't reset a unit, so we create a new column
lc[colname] = u.Quantity(lc[colname].value, "", dtype=np.float32)
else:
lc[colname].unit = ""
lc = lc[quality_mask]
lc.meta["AUTHOR"] = "TGLC"
lc.meta["TARGETID"] = lc.meta.get("OBJECT")
lc.meta["QUALITY_BITMASK"] = quality_bitmask
lc.meta["QUALITY_MASK"] = quality_mask
lc.meta["NORMALIZED"] = True
tic = lc.meta.get("TICID")
if tic is not None:
tic = int(tic)
# compatibility with SPOC, QLP, etc.
lc.meta["TARGETID"] = tic
lc.meta["TICID"] = tic
lc.meta["OBJECT"] = f"TIC {tic}"
# for Lightkurve's plotting methods
lc.meta["LABEL"] = f"TIC {tic}"
return TessLightCurve(data=lc)
| 3,971
| 39.948454
| 122
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/io/kepseismic.py
|
"""Reader function for KEPSEISMIC community light curve products."""
from ..lightcurve import KeplerLightCurve
from .generic import read_generic_lightcurve
def read_kepseismic_lightcurve(filename, **kwargs):
"""Read a KEPSEISMIC light curve file.
More information: https://archive.stsci.edu/prepds/kepseismic
Parameters
----------
filename : str
Path or URL of a K2SFF light curve FITS file.
Returns
-------
lc : `KeplerLightCurve`
A populated light curve object.
"""
lc = read_generic_lightcurve(
filename,
time_format='bkjd')
lc.meta["AUTHOR"] = "KEPSEISMIC"
lc.meta["TARGETID"] = lc.meta.get("KEPLERID")
# KEPSEISMIC light curves are normalized by default
lc.meta["NORMALIZED"] = True
return KeplerLightCurve(data=lc, **kwargs)
| 832
| 24.242424
| 68
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/io/k2sff.py
|
"""Reader function for K2SFF community light curve products."""
from ..lightcurve import KeplerLightCurve
from ..utils import validate_method
from .generic import read_generic_lightcurve
def read_k2sff_lightcurve(filename, ext="BESTAPER", **kwargs):
"""Read a K2SFF light curve file.
More information: https://archive.stsci.edu/hlsp/k2sff
Parameters
----------
filename : str
Path or URL of a K2SFF light curve FITS file.
ext : str
Version of the light curve to use. Valid options include "BESTAPER",
"CIRC_APER0" through "CIRC_APER9", and "PRF_APER0" through "PRF_APER9".
Returns
-------
lc : `KeplerLightCurve`
A populated light curve object.
"""
lc = read_generic_lightcurve(
filename, flux_column="fcor", time_format="bkjd", ext=ext
)
lc.meta["AUTHOR"] = "K2SFF"
lc.meta["TARGETID"] = lc.meta.get("KEPLERID")
return KeplerLightCurve(data=lc, **kwargs)
| 965
| 27.411765
| 79
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/io/detect.py
|
"""Provides a function to automatically detect Kepler/TESS file types."""
from astropy.io import fits
from astropy.io.fits import HDUList
__all__ = ["detect_filetype"]
def detect_filetype(hdulist: HDUList) -> str:
"""Returns Kepler and TESS file types given a FITS object.
This function will detect the file type by looking at both the TELESCOP and
CREATOR keywords in the first extension of the FITS header. If the file is
recognized as a Kepler or TESS data product, one of the following strings
will be returned:
* `'KeplerTargetPixelFile'`
* `'TessTargetPixelFile'`
* `'KeplerLightCurve'`
* `'TessLightCurve'`
* `'K2SFF'`
* `'EVEREST'`
* `'K2SC'`
* `'K2VARCAT'`
* `'QLP'`
* `'GSFC-ELEANOR-LITE'`
* `'PATHOS'`
* `'TASOC'`
* `'KEPSEISMIC'`
* `'CDIPS'`
* `'TGLC'`
If the data product cannot be detected, `None` will be returned.
Parameters
----------
hdulist : astropy.io.fits.HDUList object
A FITS file.
Returns
-------
filetype : str or None
A string describing the detected filetype. If the filetype is not
recognized, `None` will be returned.
"""
# Is it a MIT/QLP TESS FFI Quicklook Pipeline light curve?
# cf. http://archive.stsci.edu/hlsp/qlp
if "mit/qlp" in hdulist[0].header.get("origin", "").lower():
return "QLP"
# Is it a vanilla eleanor or GSFC-ELEANOR-LITE light curve?
if (
hdulist[0].header.get("LITE") is not None
and hdulist[0].header.get("PCORIGIN") is not None
):
return "ELEANOR"
# Is it a PATHOS TESS light curve?
# cf. http://archive.stsci.edu/hlsp/pathos
# The 'pathos' name doesn't stay in the filename if when we use fits.open
# to download a file, so we have to check for all the important columns
# This will cause problems if another HLSP has the exact same colnames...
if all(
x in hdulist[1].columns.names
for x in [
"PSF_FLUX_RAW",
"PSF_FLUX_COR",
"AP4_FLUX_RAW",
"AP4_FLUX_COR",
"SKY_LOCAL",
]
):
return "PATHOS"
# Is it a TASOC TESS light curve?
# cf. https://tasoc.dk and https://archive.stsci.edu/hlsp/tasoc
if hdulist[0].header.get("ORIGIN") == "TASOC/Aarhus":
return "TASOC"
# Is it a CDIPS TESS light curve?
# cf. http://archive.stsci.edu/hlsp/cdips
if "cdips" in hdulist[0].header.get("ORIGIN", "").lower():
return "CDIPS"
# Is it a K2VARCAT file?
# There are no self-identifying keywords in the header, so go by filename.
if "hlsp_k2varcat" in (hdulist.filename() or ""):
return "K2VARCAT"
# Is it a K2SC file?
if "k2sc" in hdulist[0].header.get("creator", "").lower():
return "K2SC"
# Is it a K2SFF file?
try:
# There are no metadata keywords identifying K2SFF FITS files,
# so we go by structure.
if (
hdulist[1].header.get("EXTNAME") == "BESTAPER"
and hdulist[1].header.get("TTYPE4") == "ARCLENGTH"
):
return "K2SFF"
except Exception:
pass
# Is it an EVEREST file?
try:
if "EVEREST" in str(hdulist[0].header.get("COMMENT")):
return "EVEREST"
except Exception:
pass
# Is it a KEPSEISMIC file?
if hdulist[0].header.get("ORIGIN") == "CEA & SSI":
return "KEPSEISMIC"
# Is it a TGLC file?
if hdulist[0].header.get("ORIGIN") == "UCSB/TGLC":
return "TGLC"
# Is it an official data product?
header = hdulist[0].header
try:
# use `telescop` keyword to determine mission
# and `creator` to determine tpf or lc
if "TELESCOP" in header.keys():
telescop = header["telescop"].lower()
else:
# Some old custom TESS data did not define the `TELESCOP` card
telescop = header["mission"].lower()
creator = header["creator"].lower()
origin = header["origin"].lower()
if telescop == "kepler":
# Kepler TPFs will contain "TargetPixelExporterPipelineModule"
if "targetpixel" in creator:
return "KeplerTargetPixelFile"
# Kepler LCFs will contain "FluxExporter2PipelineModule"
elif (
"fluxexporter" in creator
or "lightcurve" in creator
or "lightcurve" in creator
):
return "KeplerLightCurve"
elif telescop == "tess":
# TESS TPFs will contain "TargetPixelExporterPipelineModule"
if "targetpixel" in creator:
return "TessTargetPixelFile"
# TESS LCFs will contain "LightCurveExporterPipelineModule"
elif "lightcurve" in creator:
return "TessLightCurve"
# Early versions of TESScut did not set a good CREATOR keyword
elif "stsci" in origin:
return "TessTargetPixelFile"
# If the TELESCOP or CREATOR keywords don't exist we expect a KeyError;
# if one of them is Undefined we expect `.lower()` to yield an AttributeError.
except (KeyError, AttributeError):
return None
| 5,304
| 32.575949
| 82
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/io/everest.py
|
"""Reader for K2 EVEREST light curves."""
from ..lightcurve import KeplerLightCurve
from ..utils import KeplerQualityFlags
from .generic import read_generic_lightcurve
def read_everest_lightcurve(
filename, flux_column="flux", quality_bitmask="default", **kwargs
):
"""Read an EVEREST light curve file.
More information: https://archive.stsci.edu/hlsp/everest
Parameters
----------
filename : str
Local path or remote url of a Kepler light curve FITS file.
flux_column : 'pdcsap_flux' or 'sap_flux'
Which column in the FITS file contains the preferred flux data?
quality_bitmask : str or int
Bitmask (integer) which identifies the quality flag bitmask that should
be used to mask out bad cadences. If a string is passed, it has the
following meaning:
* "none": no cadences will be ignored (`quality_bitmask=0`).
* "default": cadences with severe quality issues will be ignored
(`quality_bitmask=1130799`).
* "hard": more conservative choice of flags to ignore
(`quality_bitmask=1664431`). This is known to remove good data.
* "hardest": removes all data that has been flagged
(`quality_bitmask=2096639`). This mask is not recommended.
See the :class:`KeplerQualityFlags` class for details on the bitmasks.
Returns
-------
lc : `KeplerLightCurve`
A populated light curve object.
"""
lc = read_generic_lightcurve(
filename,
flux_column=flux_column,
quality_column="quality",
cadenceno_column="cadn",
time_format="bkjd",
)
# Filter out poor-quality data
# NOTE: Unfortunately Astropy Table masking does not yet work for columns
# that are Quantity objects, so for now we remove poor-quality data instead
# of masking. Details: https://github.com/astropy/astropy/issues/10119
quality_mask = KeplerQualityFlags.create_quality_mask(
quality_array=lc["quality"], bitmask=quality_bitmask
)
lc = lc[quality_mask]
lc.meta["AUTHOR"] = "EVEREST"
lc.meta["TARGETID"] = lc.meta.get("KEPLERID")
lc.meta["QUALITY_BITMASK"] = quality_bitmask
lc.meta["QUALITY_MASK"] = quality_mask
return KeplerLightCurve(data=lc, **kwargs)
| 2,316
| 35.203125
| 79
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/io/qlp.py
|
"""Reader for MIT Quicklook Pipeline (QLP) light curve files.
Website: http://archive.stsci.edu/hlsp/qlp
Product description: https://archive.stsci.edu/hlsps/qlp/hlsp_qlp_tess_ffi_all_tess_v1_data-prod-desc.pdf
"""
from ..lightcurve import TessLightCurve
from ..utils import TessQualityFlags
from .generic import read_generic_lightcurve
def read_qlp_lightcurve(filename, flux_column="sap_flux", flux_err_column="kspsap_flux_err", quality_bitmask="default"):
"""Returns a `~lightkurve.lightcurve.LightCurve` object given a light curve file from the MIT Quicklook Pipeline (QLP).
By default, QLP's `sap_flux` column is used to populate the `flux` values,
and 'kspsap_flux_err' is used to populate `flux_err`. For a discussion
related to this choice, see https://github.com/lightkurve/lightkurve/issues/1083
More information: https://archive.stsci.edu/hlsp/qlp
Parameters
----------
filename : str
Local path or remote url of a QLP light curve FITS file.
flux_column : 'sap_flux', 'kspsap_flux', 'kspsap_flux_sml', 'kspsap_flux_lag', or 'sap_bkg'
Which column in the FITS file contains the preferred flux data?
By default the "Simple Aperture Photometry" flux (sap_flux) is used.
flux_err_column: 'kspsap_flux_err', or 'sap_bkg_err'
Which column in the FITS file contains the preferred flux_err data?
quality_bitmask : str or int
Bitmask (integer) which identifies the quality flag bitmask that should
be used to mask out bad cadences. If a string is passed, it has the
following meaning:
* "none": no cadences will be ignored.
* "default": cadences with severe quality issues will be ignored.
* "hard": more conservative choice of flags to ignore.
This is known to remove good data.
* "hardest": removes all data that has been flagged.
This mask is not recommended.
See the `~lightkurve.utils.TessQualityFlags` class for details on the bitmasks.
"""
lc = read_generic_lightcurve(filename, flux_column=flux_column, flux_err_column=flux_err_column, time_format="btjd")
# Filter out poor-quality data
# NOTE: Unfortunately Astropy Table masking does not yet work for columns
# that are Quantity objects, so for now we remove poor-quality data instead
# of masking. Details: https://github.com/astropy/astropy/issues/10119
quality_mask = TessQualityFlags.create_quality_mask(
quality_array=lc["quality"], bitmask=quality_bitmask
)
lc = lc[quality_mask]
lc.meta["AUTHOR"] = "QLP"
lc.meta["TARGETID"] = lc.meta.get("TICID")
lc.meta["QUALITY_BITMASK"] = quality_bitmask
lc.meta["QUALITY_MASK"] = quality_mask
# QLP light curves are normalized by default
lc.meta["NORMALIZED"] = True
return TessLightCurve(data=lc)
| 2,872
| 43.890625
| 123
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/io/generic.py
|
"""Read a generic FITS table containing a light curve."""
import logging
import warnings
from copy import deepcopy
from astropy.io import fits
from astropy.table import Table
from astropy.time import Time
from astropy.units import UnitsWarning
import numpy as np
from ..utils import validate_method
from ..lightcurve import LightCurve
from ..units import ppm
log = logging.getLogger(__name__)
def read_generic_lightcurve(
filename,
time_column="time",
flux_column="flux",
flux_err_column="flux_err",
quality_column="quality",
cadenceno_column="cadenceno",
centroid_col_column="mom_centr1",
centroid_row_column="mom_centr2",
time_format=None,
ext=1,
):
"""Generic helper function to convert a Kepler ot TESS light curve file
into a generic `LightCurve` object.
"""
if isinstance(filename, fits.HDUList):
hdulist = filename # Allow HDUList to be passed
else:
with fits.open(filename) as hdulist:
hdulist = deepcopy(hdulist)
# Raise an exception if the requested extension is invalid
if isinstance(ext, str):
validate_method(ext, supported_methods=[hdu.name.lower() for hdu in hdulist])
with warnings.catch_warnings():
# By default, AstroPy emits noisy warnings about units commonly used
# in archived TESS data products (e.g., "e-/s" and "pixels").
# We ignore them here because they don't affect Lightkurve's features.
# Inconsistencies between TESS data products and the FITS standard
# out to be addressed at the archive level. (See issue #1216.)
warnings.simplefilter("ignore", category=UnitsWarning)
tab = Table.read(hdulist[ext], format="fits")
# Make sure the meta data also includes header fields from extension #0
tab.meta.update(hdulist[0].header)
tab.meta = {k: v for k, v in tab.meta.items()}
for colname in tab.colnames:
# Ensure units have the correct astropy format
# Speed-up: comparing units by their string representation is 1000x
# faster than performing full-blown unit comparison
unitstr = str(tab[colname].unit)
if unitstr == "e-/s":
tab[colname].unit = "electron/s"
elif unitstr == "pixels":
tab[colname].unit = "pixel"
elif unitstr == "ppm" and repr(tab[colname].unit).startswith("Unrecognized"):
# Workaround for issue #956
tab[colname].unit = ppm
# Rename columns to lowercase
tab.rename_column(colname, colname.lower())
# Some KEPLER files used to have a T column instead of TIME.
if time_column == "time" and "time" not in tab.columns and "t" in tab.colnames:
tab.rename_column("t", "time")
if time_column != "time":
tab.rename_column(time_column, "time")
# We *have* to remove rows with TIME=NaN because the Astropy Time
# object does not support the presence of NaNs.
# Fortunately, such rows are always bad data.
nans = np.isnan(tab["time"].data)
if np.any(nans):
log.debug("Ignoring {} rows with NaN times".format(np.sum(nans)))
tab = tab[~nans]
# Prepare a special time column
if not time_format:
if hdulist[ext].header.get("BJDREFI") == 2454833:
time_format = "bkjd"
elif hdulist[ext].header.get("BJDREFI") == 2457000:
time_format = "btjd"
else:
raise ValueError(f"Input file has unclear time format: {filename}")
time = Time(
tab["time"].data,
scale=hdulist[ext].header.get("TIMESYS", "tdb").lower(),
format=time_format,
)
tab.remove_column("time")
# For backwards compatibility with Lightkurve v1.x,
# we make sure standard columns and attributes exist.
if "flux" not in tab.columns:
tab.add_column(tab[flux_column], name="flux", index=0)
if "flux_err" not in tab.columns:
# Try falling back to `{flux_column}_err` if possible
if flux_err_column not in tab.columns:
flux_err_column = flux_column + "_err"
if flux_err_column in tab.columns:
tab.add_column(tab[flux_err_column], name="flux_err", index=1)
if "quality" not in tab.columns and quality_column in tab.columns:
tab.add_column(tab[quality_column], name="quality", index=2)
if "cadenceno" not in tab.columns and cadenceno_column in tab.columns:
tab.add_column(tab[cadenceno_column], name="cadenceno", index=3)
if "centroid_col" not in tab.columns and centroid_col_column in tab.columns:
tab.add_column(tab[centroid_col_column], name="centroid_col", index=4)
if "centroid_row" not in tab.columns and centroid_row_column in tab.columns:
tab.add_column(tab[centroid_row_column], name="centroid_row", index=5)
tab.meta["LABEL"] = hdulist[0].header.get("OBJECT")
tab.meta["MISSION"] = hdulist[0].header.get(
"MISSION", hdulist[0].header.get("TELESCOP")
)
tab.meta["RA"] = hdulist[0].header.get("RA_OBJ")
tab.meta["DEC"] = hdulist[0].header.get("DEC_OBJ")
tab.meta["FILENAME"] = filename
tab.meta["FLUX_ORIGIN"] = flux_column
return LightCurve(time=time, data=tab)
| 5,192
| 38.340909
| 85
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/io/cdips.py
|
"""Reader for A PSF-Based Approach to TESS Cluster Difference Imaging Photometric Survey (CDIPS) light curves
Website: https://archive.stsci.edu/hlsp/cdips
Product Description: https://archive.stsci.edu/hlsps/cdips/hlsp_cdips_tess_ffi_all_tess_v01_readme.md
"""
import logging
from ..lightcurve import TessLightCurve
from ..utils import TessQualityFlags
from .generic import read_generic_lightcurve
log = logging.getLogger(__name__)
def read_cdips_lightcurve(filename,
flux_column="IRM1",
include_inst_errs=False,
quality_bitmask=None):
"""Returns a TESS CDIPS `~lightkurve.lightcurve.LightCurve`.
Note: CDIPS light curves have already had quality filtering applied, and
do not provide the bitflags necessary for a user to apply a new bitmask.
Therefore, frames corresponding to "momentum dumps and coarse point modes"
are removed according to Bouma et al. 2019, and no other quality filtering
is allowed. The `quality_bitmask` parameter is ignored but accepted for
compatibility with other data format readers.
More information: https://archive.stsci.edu/hlsp/cdips
Parameters
----------
filename : str
Local path or remote url of CDIPS light curve FITS file.
flux_column : str
'IFL#', 'IRM#', 'TFA#', or 'PCA#' (# = 1, 2, or 3)
Which column in the FITS file contains the preferred flux data?
include_inst_errs: bool
Whether to include the instrumental flux/magnitude errors
(Errors are not provided for trend-filtered magnitudes)
"""
ap = flux_column[-1]
# Only the instrumental magnitudes are provided, and are not provided for
# trend-filtered light curves. User should select whether to include the
# instrumental errors or ignore them
if include_inst_errs:
# If fluxes are requested, return flux errors
if flux_column[:-1].lower()=="ifl":
flux_err_column = f"ife{ap}"
# Otherwise magnitudes are being requested, return magnitude errors
else:
flux_err_column = f"ire{ap}"
else:
flux_err_column = ""
# Set the appropriate error column for this aperture
quality_column = f"irq{ap}"
lc = read_generic_lightcurve(filename,
time_column="tmid_bjd",
flux_column=flux_column.lower(),
flux_err_column=flux_err_column,
quality_column=quality_column,
time_format='btjd')
# Filter out poor-quality data
# NOTE: Unfortunately Astropy Table masking does not yet work for columns
# that are Quantity objects, so for now we remove poor-quality data instead
# of masking. Details: https://github.com/astropy/astropy/issues/10119
# CDIPS uses their own quality keywords instead of the default bitflags
# Based on Bouma+2019, they filter out coarse point (4) and desat (32)
# as well as other cadences flagged for particular sectors
quality_mask = (lc['quality']=="G") | (lc['quality']=="0")
lc = lc[quality_mask]
lc.meta["AUTHOR"] = "CDIPS"
lc.meta['TARGETID'] = lc.meta.get('TICID')
lc.meta['QUALITY_BITMASK'] = 36
lc.meta['QUALITY_MASK'] = quality_mask
return TessLightCurve(data=lc)
| 3,389
| 39.843373
| 109
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/io/eleanor.py
|
"""Reader for GSFC-ELEANOR-LITE light curve files.
Details can be found at https://archive.stsci.edu/hlsp/eleanor and https://archive.stsci.edu/hlsp/gsfc-eleanor-lite
"""
from ..lightcurve import TessLightCurve
from ..utils import TessQualityFlags
from astropy import units as u
from .generic import read_generic_lightcurve
import numpy as np
def read_eleanor_lightcurve(filename,
flux_column="CORR_FLUX",
quality_bitmask="default"
):
"""Returns a `~lightkurve.lightcurve.LightCurve` object given a light curve file from
eleanor package or GSFC-ELEANOR-LITE Pipeline.
By default, eleanor's `CORR_FLUX` column is used to populate the `flux` values. Note that the "FLUX_ERR"
column in the Eleanor FITS file is referred to the uncertainty of "RAW_FLUX", not "CORR_FLUX". Thus the
uncertainty reported in the 'flux_err' column here is calculated as follows:
corr_flux_err = corr_flux*raw_flux_err/raw_flux.
For completeness, the original raw_flux's error is added as a "raw_flux_err" column.
In terms of quality flags, eleanor uses the TESS SPOC quality flags by identifying short-cadence targets that
fall on each camera-CCD pairing for a given sector. However, eleanor, also adds two new quality flags -- bit 17
(decimal value 131072)) and bit 18 (decimal value 262144).
More information on eleanor: https://github.com/afeinstein20/eleanor
More information on GSFC-ELEANOR-LITE Pipeline: https://archive.stsci.edu/hlsp/gsfc-eleanor-lite
Parameters
----------
filename : str
Local path or remote url of a GSFC-ELEANOR-LITE light curve FITS file.
flux_column : 'RAW_FLUX', 'CORR_FLUX', 'PCA_FLUX', or 'FLUX_BKG'
Which column in the FITS file contains the preferred flux data?
By default the "Corrected Flux" flux (CORR_FLUX) is used.
quality_bitmask : str or int
Bitmask (integer) which identifies the quality flag bitmask that should
be used to mask out bad cadences. If a string is passed, it has the
following meaning:
* "none": no cadences will be ignored (`quality_bitmask=0`).
* "default": cadences with flags indicating AttitudeTweak, SafeMode, CoarsePoint, EarthPoint, Desat, or
ManualExclude will be ignored.
* "hard": cadences with default flags, ApertureCosmic, CollateralCosmic, Straylight, or Straylight2 will be
ignored.
* "hardest": cadences with all the above flags will be ignored, in addition to cadences with GSFC-ELEANOR-LITE
bit flags of 17 (decimal value 131072) and 18 (decimal value 262144).
"""
lc = read_generic_lightcurve(
filename,
time_column="TIME".lower(),
flux_column=flux_column.lower(),
flux_err_column = "FLUX_ERR".lower(),
time_format="btjd",
quality_column= "QUALITY".lower(),
centroid_col_column = "X_CENTROID".lower(),
centroid_row_column = "Y_CENTROID".lower(),
cadenceno_column = "FFIINDEX".lower()
)
if quality_bitmask == "hardest":
# Eleanor has 2 additional bits on top of the 16 TESS SPOC bits
# they are excluded when hardest is specified.
quality_bitmask = TessQualityFlags.HARDEST_BITMASK | 2** 17 | 2**18
quality_mask = TessQualityFlags.create_quality_mask(
quality_array=lc["quality"], bitmask=quality_bitmask
)
lc = lc[quality_mask]
# Eleanor FITS file do not have units specified. re-add them.
for colname in ["flux", "flux_err", "raw_flux", "corr_flux", "pca_flux", "psf_flux"]:
if colname in lc.colnames:
if lc[colname].unit is not None:
# for case flux, flux_err, lightkurve has forced it to be u.dimensionless_unscaled
# can't reset a unit, so we create a new column
lc[colname] = u.Quantity(lc[colname].value, "electron/s")
else:
lc[colname].unit = "electron/s"
for colname in ["flux_bkg"]:
if colname in lc.colnames:
lc[colname].unit = u.percent
for colname in ["centroid_col", "centroid_row", "x_centroid", "y_centroid", "x_com", "y_com"]:
if colname in lc.colnames:
lc[colname].unit = u.pix
for colname in ["barycorr"]:
if colname in lc.colnames:
lc[colname].unit = u.day
# In Eleanor fits file, raw_flux's error is in flux_err, which breaks Lightkurve convention.
# To account for this, the corr_flux error is calculated from corr_flux_err = corr_flux*raw_flux_err/raw_flux. For completeness,
# the original raw_flux's error is added as a "raw_flux_err" column
lc["raw_flux_err"] = lc["flux_err"]
if flux_column.lower() != 'raw_flux':
lc["flux_err"] = lc[flux_column.lower()]*lc["raw_flux_err"]/lc["raw_flux"]
# vanilla eleanor has cadence saved as float,
# convert to int to ensure we stick with the convention
for colname in ["ffiindex", "cadenceno"]:
if colname in lc.colnames:
if not np.issubdtype(lc[colname].dtype, np.int_):
lc[colname] = np.asarray(lc[colname].value, dtype=int)
if (
lc.meta.get("TVERSION") is not None
and lc.meta.get("GITHUB") == "https://github.com/afeinstein20/eleanor"
):
# the above headers are GSFC-ELEANOR-LITE-specific, and are not present in vanilla eleanor
# cf. https://github.com/afeinstein20/eleanor/blob/main/eleanor/targetdata.py
lc.meta["AUTHOR"] = "GSFC-ELEANOR-LITE"
else:
lc.meta["AUTHOR"] = "ELEANOR"
# Eleanor light curves are not normalized by default
lc.meta["NORMALIZED"] = False
tic = lc.meta.get("TIC_ID")
if tic is not None:
# compatibility with SPOC, QLP, etc.
lc.meta["TARGETID"] = tic
lc.meta["TICID"] = tic
lc.meta["OBJECT"] = f"TIC {tic}"
# for Lightkurve's plotting methods
lc.meta["LABEL"] = f"TIC {tic}"
return TessLightCurve(data=lc)
| 6,004
| 43.481481
| 132
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/io/pathos.py
|
"""Reader for A PSF-Based Approach to TESS High Quality Data Of Stellar Clusters (PATHOS) light curve files.
Website: https://archive.stsci.edu/hlsp/pathos
A product description file wasn't obvious on the MAST website
"""
from ..lightcurve import TessLightCurve
from ..utils import TessQualityFlags
from .generic import read_generic_lightcurve
def read_pathos_lightcurve(
filename, flux_column="PSF_FLUX_COR", quality_bitmask="default"
):
"""Returns a TESS PATHOS `~lightkurve.lightcurve.LightCurve`.
More information: https://archive.stsci.edu/hlsp/pathos
Parameters
----------
filename : str
Local path or remote url of PATHOS light curve FITS file.
flux_column : 'psf_flux_cor' or 'ap#_flux_cor' (# = 1, 2, 3, or 4)
or 'psf_flux_raw' or 'ap#_flux_raw' (# = 1, 2, 3, or 4)
Which column in the FITS file contains the preferred flux data?
quality_bitmask : str or int
Bitmask (integer) which identifies the quality flag bitmask that should
be used to mask out bad cadences. If a string is passed, it has the
following meaning:
* "none": no cadences will be ignored.
* "default": cadences with severe quality issues will be ignored.
* "hard": more conservative choice of flags to ignore.
This is known to remove good data.
* "hardest": removes all data that has been flagged.
This mask is not recommended.
See the `~lightkurve.utils.TessQualityFlags` class for details on the bitmasks.
"""
lc = read_generic_lightcurve(
filename,
flux_column=flux_column.lower(),
time_format="btjd",
quality_column="DQUALITY",
)
# Filter out poor-quality data
# NOTE: Unfortunately Astropy Table masking does not yet work for columns
# that are Quantity objects, so for now we remove poor-quality data instead
# of masking. Details: https://github.com/astropy/astropy/issues/10119
quality_mask = TessQualityFlags.create_quality_mask(
quality_array=lc["dquality"], bitmask=quality_bitmask
)
lc = lc[quality_mask]
lc.meta["AUTHOR"] = "PATHOS"
lc.meta["TARGETID"] = lc.meta.get("TICID")
lc.meta["QUALITY_BITMASK"] = quality_bitmask
lc.meta["QUALITY_MASK"] = quality_mask
# QLP light curves are normalized by default
lc.meta["NORMALIZED"] = True
return TessLightCurve(data=lc)
| 2,439
| 36.538462
| 108
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/io/__init__.py
|
"""The .io sub-package provides functions for reading data."""
from astropy.io import registry
from .. import LightCurve
from . import (
cdips,
eleanor,
everest,
k2sff,
kepler,
kepseismic,
pathos,
qlp,
tasoc,
tess,
tglc,
)
from .detect import *
from .read import *
__all__ = ["read", "open"]
# We intend the reader functions to be accessed via `LightCurve.read()`,
# so we add them to the `astropy.io.registry`.
try:
registry.register_reader("kepler", LightCurve, kepler.read_kepler_lightcurve)
registry.register_reader("tess", LightCurve, tess.read_tess_lightcurve)
registry.register_reader("qlp", LightCurve, qlp.read_qlp_lightcurve)
registry.register_reader("eleanor", LightCurve, eleanor.read_eleanor_lightcurve)
registry.register_reader("k2sff", LightCurve, k2sff.read_k2sff_lightcurve)
registry.register_reader("everest", LightCurve, everest.read_everest_lightcurve)
registry.register_reader("pathos", LightCurve, pathos.read_pathos_lightcurve)
registry.register_reader("cdips", LightCurve, cdips.read_cdips_lightcurve)
registry.register_reader("tasoc", LightCurve, tasoc.read_tasoc_lightcurve)
registry.register_reader(
"kepseismic", LightCurve, kepseismic.read_kepseismic_lightcurve
)
registry.register_reader("tglc", LightCurve, tglc.read_tglc_lightcurve)
except registry.IORegistryError:
pass # necessary to enable autoreload during debugging
| 1,462
| 33.833333
| 84
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/io/tess.py
|
"""Reader for official TESS light curve FITS files produced by the Ames SPOC pipeline."""
from ..lightcurve import TessLightCurve
from ..utils import TessQualityFlags
from .generic import read_generic_lightcurve
def read_tess_lightcurve(
filename, flux_column="pdcsap_flux", quality_bitmask="default"
):
"""Returns a TESS `~lightkurve.lightcurve.LightCurve`.
Parameters
----------
filename : str
Local path or remote url of a TESS light curve FITS file.
flux_column : 'pdcsap_flux' or 'sap_flux'
Which column in the FITS file contains the preferred flux data?
quality_bitmask : str or int
Bitmask (integer) which identifies the quality flag bitmask that should
be used to mask out bad cadences. If a string is passed, it has the
following meaning:
* "none": no cadences will be ignored.
* "default": cadences with severe quality issues will be ignored.
* "hard": more conservative choice of flags to ignore.
This is known to remove good data.
* "hardest": removes all data that has been flagged.
This mask is not recommended.
See the `~lightkurve.utils.TessQualityFlags` class for details on the bitmasks.
"""
lc = read_generic_lightcurve(filename, flux_column=flux_column, time_format="btjd")
# Filter out poor-quality data
# NOTE: Unfortunately Astropy Table masking does not yet work for columns
# that are Quantity objects, so for now we remove poor-quality data instead
# of masking. Details: https://github.com/astropy/astropy/issues/10119
quality_mask = TessQualityFlags.create_quality_mask(
quality_array=lc["quality"], bitmask=quality_bitmask
)
lc = lc[quality_mask]
lc.meta["AUTHOR"] = "SPOC"
lc.meta["TARGETID"] = lc.meta.get("TICID")
lc.meta["QUALITY_BITMASK"] = quality_bitmask
lc.meta["QUALITY_MASK"] = quality_mask
return TessLightCurve(data=lc)
| 1,985
| 39.530612
| 89
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/io/read.py
|
"""Functions for reading light curve data."""
import logging
from astropy.io import fits
from astropy.utils import deprecated
from ..lightcurve import KeplerLightCurve, TessLightCurve
from ..utils import LightkurveDeprecationWarning, LightkurveError
from .detect import detect_filetype
log = logging.getLogger(__name__)
__all__ = ["open", "read"]
@deprecated("2.0", alternative="read()", warning_type=LightkurveDeprecationWarning)
def open(path_or_url, **kwargs):
"""DEPRECATED. Please use `lk.read()` instead.
This function has been deprecated because its name collides with Python's
built-in `open()` function.
"""
return read(path_or_url, **kwargs)
def read(path_or_url, **kwargs):
"""Reads any valid Kepler or TESS data file and returns an instance of
`~lightkurve.lightcurve.LightCurve` or
`TargetPixelFile <../targetpixelfile.html>`_
This function will automatically detect the type of the data product, and return the
appropriate object. File types currently supported include::
* `KeplerTargetPixelFile` (typical suffix "-targ.fits.gz");
* `KeplerLightCurve` (typical suffix "llc.fits");
* `TessTargetPixelFile` (typical suffix "_tp.fits");
* `TessLightCurve` (typical suffix "_lc.fits").
Parameters
----------
path_or_url : str
Path or URL of a FITS file.
quality_bitmask : str or int, optional
Bitmask (integer) which identifies the quality flag bitmask that should
be used to mask out bad cadences. If a string is passed, it has the
following meaning:
* "none": no cadences will be ignored
* "default": cadences with severe quality issues will be ignored
* "hard": more conservative choice of flags to ignore
This is known to remove good data.
* "hardest": removes all data that has been flagged
This mask is not recommended.
See the :class:`KeplerQualityFlags <lightkurve.utils.KeplerQualityFlags>` or :class:`TessQualityFlags <lightkurve.utils.TessQualityFlags>` class for details on the bitmasks.
flux_column : str, optional
(Applicable to LightCurve products only) The column in the FITS file to be read as `flux`. Defaults to 'pdcsap_flux'.
Typically 'pdcsap_flux' or 'sap_flux'.
**kwargs : dict
Dictionary of arguments to be passed to underlying data product type specific reader.
Returns
-------
data : a subclass of `~lightkurve.lightcurve.LightCurve` or `TargetPixelFile <../targetpixelfile.html>`_
depending on the detected file type.
Raises
------
ValueError : raised if the data product is not recognized as a Kepler or TESS product.
Examples
--------
To read a target pixel file using its path or URL, simply use:
>>> import lightkurve as lk
>>> tpf = lk.read("mytpf.fits") # doctest: +SKIP
"""
log.debug("Opening {}.".format(path_or_url))
# pass header into `detect_filetype()`
try:
with fits.open(path_or_url) as temp:
filetype = detect_filetype(temp)
log.debug("Detected filetype: '{}'.".format(filetype))
except OSError as e:
filetype = None
# Raise an explicit FileNotFoundError if file not found
if "No such file" in str(e):
raise e
try:
if filetype == "KeplerLightCurve":
return KeplerLightCurve.read(path_or_url, format="kepler", **kwargs)
elif filetype == "TessLightCurve":
return TessLightCurve.read(path_or_url, format="tess", **kwargs)
elif filetype == "QLP":
return TessLightCurve.read(path_or_url, format="qlp", **kwargs)
elif filetype == "ELEANOR":
return TessLightCurve.read(path_or_url, format="eleanor", **kwargs)
elif filetype == "PATHOS":
return TessLightCurve.read(path_or_url, format="pathos", **kwargs)
elif filetype == "CDIPS":
return TessLightCurve.read(path_or_url, format="cdips", **kwargs)
elif filetype == "TASOC":
return TessLightCurve.read(path_or_url, format="tasoc", **kwargs)
elif filetype == "K2SFF":
return KeplerLightCurve.read(path_or_url, format="k2sff", **kwargs)
elif filetype == "EVEREST":
return KeplerLightCurve.read(path_or_url, format="everest", **kwargs)
elif filetype == "KEPSEISMIC":
return KeplerLightCurve.read(path_or_url, format="kepseismic", **kwargs)
elif filetype == "TGLC":
return TessLightCurve.read(path_or_url, format="tglc", **kwargs)
except BaseException as exc:
# ensure path_or_url is in the error
raise LightkurveError(
f"Error in reading Data product {path_or_url} of type {filetype} .\n"
"This file may be corrupt due to an interrupted download. "
"Please remove it from your disk and try again."
) from exc
# Official data products;
# if the filetype is recognized, instantiate a class of that name
if filetype is not None:
try:
return getattr(__import__("lightkurve"), filetype)(path_or_url, **kwargs)
except AttributeError as exc:
raise LightkurveError(
f"Data product f{path_or_url} of type {filetype} is not supported "
"in this version of Lightkurve."
) from exc
else:
# if these keywords don't exist, raise `ValueError`
raise LightkurveError(
"Not recognized as a supported data product:\n"
f"{path_or_url}\n"
"This file may be corrupt due to an interrupted download. "
"Please remove it from your disk and try again."
)
| 5,788
| 40.35
| 181
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/io/kepler.py
|
"""Reader for official Kepler light curve FITS files produced by the Ames pipeline."""
from ..lightcurve import KeplerLightCurve
from ..utils import KeplerQualityFlags
from .generic import read_generic_lightcurve
def read_kepler_lightcurve(
filename, flux_column="pdcsap_flux", quality_bitmask="default"
):
"""Returns a Kepler `~lightkurve.lightcurve.LightCurve`.
Parameters
----------
filename : str
Local path or remote url of a Kepler light curve FITS file.
flux_column : 'pdcsap_flux' or 'sap_flux'
Which column in the FITS file contains the preferred flux data?
quality_bitmask : str or int
Bitmask (integer) which identifies the quality flag bitmask that should
be used to mask out bad cadences. If a string is passed, it has the
following meaning:
* "none": no cadences will be ignored (`quality_bitmask=0`).
* "default": cadences with severe quality issues will be ignored
(`quality_bitmask=1130799`).
* "hard": more conservative choice of flags to ignore
(`quality_bitmask=1664431`). This is known to remove good data.
* "hardest": removes all data that has been flagged
(`quality_bitmask=2096639`). This mask is not recommended.
See the `~lightkurve.utils.KeplerQualityFlags` class for details on the bitmasks.
"""
lc = read_generic_lightcurve(
filename,
flux_column=flux_column,
quality_column="sap_quality",
time_format="bkjd",
)
# Filter out poor-quality data
# NOTE: Unfortunately Astropy Table masking does not yet work for columns
# that are Quantity objects, so for now we remove poor-quality data instead
# of masking. Details: https://github.com/astropy/astropy/issues/10119
quality_mask = KeplerQualityFlags.create_quality_mask(
quality_array=lc["sap_quality"], bitmask=quality_bitmask
)
lc = lc[quality_mask]
lc.meta["AUTHOR"] = "Kepler"
lc.meta["TARGETID"] = lc.meta.get("KEPLERID")
lc.meta["QUALITY_BITMASK"] = quality_bitmask
lc.meta["QUALITY_MASK"] = quality_mask
return KeplerLightCurve(data=lc)
| 2,196
| 38.945455
| 89
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/config/__init__.py
|
import os
import warnings
import astropy.config as astropyconfig
ROOTNAME = 'lightkurve'
class ConfigNamespace(astropyconfig.ConfigNamespace):
rootname = ROOTNAME
class ConfigItem(astropyconfig.ConfigItem):
rootname = ROOTNAME
def get_config_dir():
"""
Determines the package configuration directory name and creates the
directory if it doesn't exist.
This directory is typically ``$HOME/.lightkurve/config``, but if the
XDG_CONFIG_HOME environment variable is set and the
``$XDG_CONFIG_HOME/lightkurve`` directory exists, it will be that directory.
If neither exists, the former will be created and symlinked to the latter.
Returns
-------
configdir : str
The absolute path to the configuration directory.
"""
return astropyconfig.get_config_dir(ROOTNAME)
def get_cache_dir():
"""
Determines the default Lightkurve cache directory name and creates the
directory if it doesn't exist. If the directory cannot be access or created,
then it returns the current directory (``"."``).
This directory is typically ``$HOME/.lightkurve/cache``, but if the
XDG_CACHE_HOME environment variable is set and the
``$XDG_CACHE_HOME/lightkurve`` directory exists, it will be that directory.
If neither exists, the former will be created and symlinked to the latter.
The value can be also configured via ``cache_dir`` configuration parameter.
Returns
-------
cachedir : str
The absolute path to the cache directory.
Examples
--------
To configure "/my_research/data" as the `cache_dir`, users can set it:
1. in the user's ``lightkurve.cfg`` file::
[config]
cache_dir = /my_research/data
2. at run time::
import lightkurve as lk
lk.conf.cache_dir = '/my_research/data'
See :ref:`configuration <api.config>` for more information.
"""
from .. import conf
cache_dir = conf.cache_dir
if cache_dir is None or cache_dir == "":
cache_dir = astropyconfig.get_cache_dir(ROOTNAME)
cache_dir = _ensure_cache_dir_exists(cache_dir)
cache_dir = os.path.abspath(cache_dir)
return cache_dir
def _ensure_cache_dir_exists(cache_dir):
if os.path.isdir(cache_dir):
return cache_dir
else:
# if it doesn't exist, make a new cache directory
try:
os.mkdir(cache_dir)
# user current dir if OS error occurs
except OSError:
warnings.warn(
"Warning: unable to create {} as cache dir "
" (for downloading MAST files, etc.). Use the current "
"working directory instead.".format(cache_dir)
)
cache_dir = "."
return cache_dir
def warn_if_default_cache_dir_migration_needed():
from .. import conf
if not conf.warn_legacy_cache_dir:
return
cache_dir = conf.cache_dir
if not(cache_dir is None or cache_dir == ""):
# If an user has specified a custom cache dir, the check won't be performed.
# Not only is the check somewhat irrelevant, the behavior is also required
# to support the case that the user configures the legacy `~/.lightkurve-cache`
# as the cache dir (e.g., to support running other apps/packages that require
# older lightkurve, especially lightkurve v1.x.)
return
# migration check done only if default is used
old_cache_dir = os.path.join(os.path.expanduser("~"), ".lightkurve-cache")
new_cache_dir = os.path.join(os.path.expanduser("~"), ".lightkurve", "cache")
if os.path.isdir(old_cache_dir):
warnings.warn(
f"The default Lightkurve cache directory, used by download(), etc., has been moved to {new_cache_dir}. "
f"Please move all the files in the legacy directory {old_cache_dir} to the new location "
f"and remove the legacy directory. "
f"Refer to https://docs.lightkurve.org/reference/config.html#default-cache-directory-migration "
f"for more information."
)
| 4,091
| 31.47619
| 116
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/correctors/pldcorrector.py
|
"""Defines a `PLDCorrector` class which provides a simple way to correct a
light curve by utilizing the pixel time series data contained within the
target's own Target Pixel File.
`PLDCorrector` builds upon `RegressionCorrector` by correlating the light curve
against a design matrix composed of the following elements:
* A background light curve to capture the dominant scattered light systematics.
* Background-corrected pixel time series to capture any residual systematics.
* Splines to capture the target's intrinsic variability.
"""
import logging
import warnings
from itertools import combinations_with_replacement as multichoose
import numpy as np
import matplotlib.pyplot as plt
from astropy.utils.decorators import deprecated, deprecated_renamed_argument
from .designmatrix import (
DesignMatrix,
DesignMatrixCollection,
SparseDesignMatrixCollection,
)
from .regressioncorrector import RegressionCorrector
from .designmatrix import create_spline_matrix, create_sparse_spline_matrix
from .. import MPLSTYLE
from ..utils import LightkurveDeprecationWarning
log = logging.getLogger(__name__)
__all__ = ["PLDCorrector", "TessPLDCorrector"]
class PLDCorrector(RegressionCorrector):
r"""Implements the Pixel Level Decorrelation (PLD) systematics removal method.
Special case of `.RegressionCorrector` where the `.DesignMatrix` is
composed of background-corrected pixel time series.
The design matrix also contains columns representing a spline in time
design to capture the intrinsic, long-term variability of the target.
Pixel Level Decorrelation (PLD) was developed by [1]_ to remove
systematic noise caused by spacecraft jitter for the Spitzer
Space Telescope. It was adapted to K2 data by [2]_ and [3]_
for the EVEREST pipeline [4]_.
For a detailed description and implementation of PLD, please refer to
these references. Lightkurve provides a reference implementation
of PLD that is less sophisticated than EVEREST, but is suitable
for quick-look analyses and detrending experiments.
Our simple implementation of PLD is performed by first calculating the
noise model for each cadence in time. This function goes up to arbitrary
order, and is represented by
.. math::
m_i = \sum_l a_l \frac{f_{il}}{\sum_k f_{ik}} + \sum_l \sum_m b_{lm} \frac{f_{il}f_{im}}{\left( \sum_k f_{ik} \right)^2} + ...
where
- :math:`m_i` is the noise model at time :math:`t_i`
- :math:`f_{il}` is the flux in the :math:`l^\text{th}` pixel at time :math:`t_i`
- :math:`a_l` is the first-order PLD coefficient on the linear term
- :math:`b_{lm}` is the second-order PLD coefficient on the :math:`l^\text{th}`,
:math:`m^\text{th}` pixel pair
We perform Principal Component Analysis (PCA) to reduce the number of
vectors in our final model to limit the set to best capture instrumental
noise. With a PCA-reduced set of vectors, we can construct a design matrix
containing fractional pixel fluxes.
To solve for the PLD model, we need to minimize the difference squared
.. math::
\chi^2 = \sum_i \frac{(y_i - m_i)^2}{\sigma_i^2},
where :math:`y_i` is the observed flux value at time :math:`t_i`, by solving
.. math::
\frac{\partial \chi^2}{\partial a_l} = 0.
The design matrix also contains columns representing a spline in time
design to capture the intrinsic, long-term variability of the target.
Examples
--------
Download the pixel data for GJ 9827 and obtain a PLD-corrected light curve:
>>> import lightkurve as lk
>>> tpf = lk.search_targetpixelfile("GJ9827").download() # doctest: +SKIP
>>> corrector = tpf.to_corrector('pld') # doctest: +SKIP
>>> lc = corrector.correct() # doctest: +SKIP
>>> lc.plot() # doctest: +SKIP
However, the above example will over-fit the small transits!
It is necessary to mask the transits using `corrector.correct(cadence_mask=...)`.
References
----------
.. [1] Deming et al. (2015), ads:2015ApJ...805..132D.
(arXiv:1411.7404)
.. [2] Luger et al. (2016), ads:2016AJ....152..100L
(arXiv:1607.00524)
.. [3] Luger et al. (2018), ads:2018AJ....156...99L
(arXiv:1702.05488)
.. [4] EVEREST pipeline webpage, https://rodluger.github.io/everest
"""
def __init__(self, tpf, aperture_mask=None):
if aperture_mask is None:
aperture_mask = tpf.create_threshold_mask(3)
self.aperture_mask = aperture_mask
lc = tpf.to_lightcurve(aperture_mask=aperture_mask)
# Remove cadences that have NaN flux (cf. #874). We don't simply call
# `lc.remove_nans()` here because we need to mask both lc & tpf.
nan_mask = np.isnan(lc.flux)
lc = lc[~nan_mask]
self.tpf = tpf[~nan_mask]
super().__init__(lc=lc)
def __repr__(self):
return "PLDCorrector (ID: {})".format(self.lc.label)
def create_design_matrix(
self,
pld_order=3,
pca_components=16,
pld_aperture_mask=None,
background_aperture_mask="background",
spline_n_knots=None,
spline_degree=3,
normalize_background_pixels=None,
sparse=False,
):
"""Returns a `.DesignMatrixCollection` containing a `DesignMatrix` object
for the background regressors, the PLD pixel component regressors, and
the spline regressors.
If the parameters `pld_order` and `pca_components` are None, their
value will be assigned based on the mission. K2 and TESS experience
different dominant sources of noise, and require different defaults.
For information about how the defaults were chosen, see Pull Request #746.
Parameters
----------
pld_order : int
The order of Pixel Level De-correlation to be performed. First order
(`n=1`) uses only the pixel fluxes to construct the design matrix.
Higher order populates the design matrix with columns constructed
from the products of pixel fluxes.
pca_components : int or tuple of int
Number of terms added to the design matrix for each order of PLD
pixel fluxes. Increasing this value may provide higher precision
at the expense of slower speed and/or overfitting.
If performing PLD with `pld_order > 1`, `pca_components` can be
a tuple containing the number of terms for each order of PLD.
If a single int is passed, the same number of terms will be used
for each order. If zero is passed, PCA will not be performed.
Defaults to 16 for K2 and 8 for TESS.
pld_aperture_mask : array-like, 'pipeline', 'all', 'threshold', or None
A boolean array describing the aperture such that `True` means
that the pixel will be used when selecting the PLD basis vectors.
If `None` or `all` are passed in, all pixels will be used.
If 'pipeline' is passed, the mask suggested by the official pipeline
will be returned.
If 'threshold' is passed, all pixels brighter than 3-sigma above
the median flux will be used.
spline_n_knots : int
Number of knots in spline.
spline_degree : int
Polynomial degree of spline.
sparse : bool
Whether to create `SparseDesignMatrix`.
Returns
-------
dm : `.DesignMatrixCollection`
`.DesignMatrixCollection` containing pixel, background, and spline
components.
"""
# Validate the inputs
pld_aperture_mask = self.tpf._parse_aperture_mask(pld_aperture_mask)
self.pld_aperture_mask = pld_aperture_mask
background_aperture_mask = self.tpf._parse_aperture_mask(
background_aperture_mask
)
self.background_aperture_mask = background_aperture_mask
if spline_n_knots is None:
# Default to a spline per 50 data points
spline_n_knots = int(len(self.lc) / 50)
if sparse:
DMC = SparseDesignMatrixCollection
spline = create_sparse_spline_matrix
else:
DMC = DesignMatrixCollection
spline = create_spline_matrix
# We set the width of all coefficient priors to 10 times the standard
# deviation to prevent the fit from going crazy.
prior_sigma = np.nanstd(self.lc.flux.value) * 10
# Flux normalize background components for K2 and not for TESS by default
bkg_pixels = self.tpf.flux[:, background_aperture_mask].reshape(
len(self.tpf.flux), -1
)
if normalize_background_pixels:
bkg_flux = np.nansum(self.tpf.flux[:, background_aperture_mask], -1)
bkg_pixels = np.array([r / f for r, f in zip(bkg_pixels, bkg_flux)])
else:
bkg_pixels = bkg_pixels.value
# Remove NaNs
bkg_pixels = np.array([r[np.isfinite(r)] for r in bkg_pixels])
# Create background design matrix
dm_bkg = DesignMatrix(bkg_pixels, name="background")
# Apply PCA
dm_bkg = dm_bkg.pca(pca_components)
# Set prior sigma to 10 * standard deviation
dm_bkg.prior_sigma = np.ones(dm_bkg.shape[1]) * prior_sigma
# Create a design matric containing splines plus a constant
dm_spline = spline(
self.lc.time.value, n_knots=spline_n_knots, degree=spline_degree
).append_constant()
# Set prior sigma to 10 * standard deviation
dm_spline.prior_sigma = np.ones(dm_spline.shape[1]) * prior_sigma
# Create a PLD matrix if there are pixels in the pld_aperture_mask
if np.sum(pld_aperture_mask) != 0:
# Flux normalize the PLD components
pld_pixels = self.tpf.flux[:, pld_aperture_mask].reshape(
len(self.tpf.flux), -1
)
pld_pixels = np.array(
[r / f for r, f in zip(pld_pixels, self.lc.flux.value)]
)
# Remove NaNs
pld_pixels = np.array([r[np.isfinite(r)] for r in pld_pixels])
# Use the DesignMatrix infrastructure to apply PCA to the regressors.
regressors_dm = DesignMatrix(pld_pixels)
if pca_components > 0:
regressors_dm = regressors_dm.pca(pca_components)
regressors_pld = regressors_dm.values
# Create a DesignMatrix for each PLD order
all_pld = []
for order in range(1, pld_order + 1):
reg_n = np.product(list(multichoose(regressors_pld.T, order)), axis=1).T
pld_n = DesignMatrix(
reg_n,
prior_sigma=np.ones(reg_n.shape[1]) * prior_sigma / reg_n.shape[1],
name=f"pld_order_{order}",
)
# Apply PCA.
if pca_components > 0:
pld_n = pld_n.pca(pca_components)
# Calling pca() resets the priors, so we set them again.
pld_n.prior_sigma = (
np.ones(pld_n.shape[1]) * prior_sigma / pca_components
)
all_pld.append(pld_n)
# Create the collection of DesignMatrix objects.
# DesignMatrix 1 contains the PLD pixel series
dm_pixels = DesignMatrixCollection(all_pld).to_designmatrix(
name="pixel_series"
)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=".*Not all matrices are `SparseDesignMatrix` objects..*",
)
dm_collection = DMC([dm_pixels, dm_bkg, dm_spline])
else:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message=".*Not all matrices are `SparseDesignMatrix` objects..*",
)
dm_collection = DMC([dm_bkg, dm_spline])
return dm_collection
@deprecated_renamed_argument(
"n_pca_terms",
"pca_components",
"2.0",
warning_type=LightkurveDeprecationWarning,
)
@deprecated_renamed_argument(
"use_gp", None, "2.0", warning_type=LightkurveDeprecationWarning
)
@deprecated_renamed_argument(
"gp_timescale", None, "2.0", warning_type=LightkurveDeprecationWarning
)
@deprecated_renamed_argument(
"aperture_mask", None, "2.0", warning_type=LightkurveDeprecationWarning
)
def correct(
self,
pld_order=None,
pca_components=None,
pld_aperture_mask=None,
background_aperture_mask="background",
spline_n_knots=None,
spline_degree=5,
normalize_background_pixels=None,
restore_trend=True,
sparse=False,
cadence_mask=None,
sigma=5,
niters=5,
propagate_errors=False,
use_gp=None,
gp_timescale=None,
aperture_mask=None,
):
"""Returns a systematics-corrected light curve.
If the parameters `pld_order` and `pca_components` are None, their
value will be assigned based on the mission. K2 and TESS experience
different dominant sources of noise, and require different defaults.
For information about how the defaults were chosen, see PR #746 at
https://github.com/lightkurve/lightkurve/pull/746#issuecomment-658458270
Parameters
----------
pld_order : int
The order of Pixel Level De-correlation to be performed. First order
(`n=1`) uses only the pixel fluxes to construct the design matrix.
Higher order populates the design matrix with columns constructed
from the products of pixel fluxes. Default 3 for K2 and 1 for TESS.
pca_components : int
Number of terms added to the design matrix for each order of PLD
pixel fluxes. Increasing this value may provide higher precision
at the expense of slower speed and/or overfitting.
pld_aperture_mask : array-like, 'pipeline', 'all', 'threshold', or None
A boolean array describing the aperture such that `True` means
that the pixel will be used when selecting the PLD basis vectors.
If `None` or `all` are passed in, all pixels will be used.
If 'pipeline' is passed, the mask suggested by the official pipeline
will be returned.
If 'threshold' is passed, all pixels brighter than 3-sigma above
the median flux will be used.
spline_n_knots : int
Number of knots in spline.
spline_degree : int
Polynomial degree of spline.
restore_trend : bool
Whether to restore the long term spline trend to the light curve.
sparse : bool
Whether to create `SparseDesignMatrix`.
cadence_mask : np.ndarray of bools (optional)
Mask, where True indicates a cadence that should be used.
sigma : int (default 5)
Standard deviation at which to remove outliers from fitting
niters : int (default 5)
Number of iterations to fit and remove outliers
propagate_errors : bool (default False)
Whether to propagate the uncertainties from the regression. Default is False.
Setting to True will increase run time, but will sample from multivariate normal
distribution of weights.
use_gp, gp_timescale : DEPRECATED
As of Lightkurve v2.0 PLDCorrector uses splines instead of Gaussian Processes.
aperture_mask : DEPRECATED
As of Lightkurve v2.0 the `aperture_mask` parameter needs to be
passed to the class constructor.
Returns
-------
clc : `.LightCurve`
Noise-corrected `.LightCurve`.
"""
self.restore_trend = restore_trend
# Set mission-specific values for pld_order and pca_components
if pld_order is None:
if self.tpf.meta.get("MISSION") == "K2":
pld_order = 3
else:
pld_order = 1
if pca_components is None:
if self.tpf.meta.get("MISSION") == "K2":
pca_components = 16
else:
pca_components = 3
if pld_aperture_mask is None:
if self.tpf.meta.get("MISSION") == "K2":
# K2 noise is dominated by motion
pld_aperture_mask = "threshold"
else:
# TESS noise is dominated by background
pld_aperture_mask = "empty"
if normalize_background_pixels is None:
if self.tpf.meta.get("MISSION") == "K2":
normalize_background_pixels = True
else:
normalize_background_pixels = False
dm = self.create_design_matrix(
pld_aperture_mask=pld_aperture_mask,
background_aperture_mask=background_aperture_mask,
pld_order=pld_order,
pca_components=pca_components,
spline_n_knots=spline_n_knots,
spline_degree=spline_degree,
normalize_background_pixels=normalize_background_pixels,
sparse=sparse,
)
clc = super().correct(
dm,
cadence_mask=cadence_mask,
sigma=sigma,
niters=niters,
propagate_errors=propagate_errors,
)
if restore_trend:
clc += self.diagnostic_lightcurves["spline"] - np.median(
self.diagnostic_lightcurves["spline"].flux
)
return clc
def diagnose(self):
"""Returns diagnostic plots to assess the most recent call to `correct()`.
If `correct()` has not yet been called, a ``ValueError`` will be raised.
Returns
-------
`~matplotlib.axes.Axes`
The matplotlib axes object.
"""
if not getattr(self, "corrected_lc"):
raise ValueError(
"You need to call the `correct()` method "
"before you can call `diagnose()`."
)
names = self.diagnostic_lightcurves.keys()
# Plot the right version of corrected light curve
if self.restore_trend:
clc = (
self.corrected_lc
+ self.diagnostic_lightcurves["spline"]
- np.median(self.diagnostic_lightcurves["spline"].flux)
)
else:
clc = self.corrected_lc
uncorr_cdpp = self.lc.estimate_cdpp()
corr_cdpp = clc.estimate_cdpp()
# Get y-axis limits
ylim = [
min(min(self.lc.flux.value), min(clc.flux.value)),
max(max(self.lc.flux.value), max(clc.flux.value)),
]
# Use lightkurve plotting style
with plt.style.context(MPLSTYLE):
# Plot background model
_, axs = plt.subplots(3, figsize=(10, 9), sharex=True)
ax = axs[0]
self.lc.plot(
ax=ax,
normalize=False,
clip_outliers=True,
label=f"uncorrected ({uncorr_cdpp:.0f})",
)
ax.set_xlabel("")
ax.set_ylim(ylim) # use same ylim for all plots
# Plot pixel and spline components
ax = axs[1]
clc.plot(
ax=ax, normalize=False, alpha=0.4, label=f"corrected ({corr_cdpp:.0f})"
)
for key, color in zip(names, ["dodgerblue", "r", "C1"]):
if key in ["background", "spline", "pixel_series"]:
tmplc = (
self.diagnostic_lightcurves[key]
- np.median(self.diagnostic_lightcurves[key].flux)
+ np.median(self.lc.flux)
)
tmplc.plot(ax=ax, c=color)
ax.set_xlabel("")
ax.set_ylim(ylim)
# Plot final corrected light curve with outliers marked
ax = axs[2]
self.lc.plot(
ax=ax,
normalize=False,
alpha=0.2,
label=f"uncorrected ({uncorr_cdpp:.0f})",
)
clc[self.outlier_mask].scatter(
normalize=False, c="r", marker="x", s=10, label="outlier_mask", ax=ax
)
clc[~self.cadence_mask].scatter(
normalize=False,
c="dodgerblue",
marker="x",
s=10,
label="~cadence_mask",
ax=ax,
)
clc.plot(
normalize=False, ax=ax, c="k", label=f"corrected ({corr_cdpp:.0f})"
)
ax.set_ylim(ylim)
return axs
def diagnose_masks(self):
"""Show different aperture masks used by PLD in the most recent call to
`correct()`. If `correct()` has not yet been called, a ``ValueError``
will be raised.
Returns
-------
`~matplotlib.axes.Axes`
The matplotlib axes object.
"""
if not hasattr(self, "corrected_lc"):
raise ValueError(
"You need to call the `correct()` method "
"before you can call `diagnose()`."
)
# Use lightkurve plotting style
with plt.style.context(MPLSTYLE):
_, axs = plt.subplots(1, 3, figsize=(12, 4), sharey=True)
# Show light curve aperture mask
ax = axs[0]
self.tpf.plot(
ax=ax,
show_colorbar=False,
aperture_mask=self.aperture_mask,
title="aperture_mask",
)
# Show PLD pixel mask
ax = axs[1]
self.tpf.plot(
ax=ax,
show_colorbar=False,
aperture_mask=self.pld_aperture_mask,
title="pld_aperture_mask",
)
ax = axs[2]
self.tpf.plot(
ax=ax,
show_colorbar=False,
aperture_mask=self.background_aperture_mask,
title="background_aperture_mask",
)
return axs
# `TessPLDCorrector` was briefly introduced in Lightkurve v1.9
# but was removed in v2.0 in favor of a single generic `PLDCorrector`.
@deprecated(
"2.0", alternative="PLDCorrector", warning_type=LightkurveDeprecationWarning
)
class TessPLDCorrector(PLDCorrector):
pass
| 22,759
| 38.929825
| 134
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/correctors/sffcorrector.py
|
"""Defines the `SFFCorrector` class.
`SFFCorrector` enables systematics to be removed from light curves using the
Self Flat-Fielding (SFF) method described in Vanderburg and Johnson (2014).
"""
import logging
import warnings
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from astropy.modeling import models, fitting
from astropy.units import Quantity
from . import DesignMatrix, DesignMatrixCollection, SparseDesignMatrixCollection
from .regressioncorrector import RegressionCorrector
from .designmatrix import create_spline_matrix, create_sparse_spline_matrix
from .. import MPLSTYLE
from ..utils import LightkurveWarning
log = logging.getLogger(__name__)
__all__ = ["SFFCorrector"]
class SFFCorrector(RegressionCorrector):
"""Special case of `.RegressionCorrector` where the `.DesignMatrix` includes
the target's centroid positions.
The design matrix also contains columns representing a spline in time
design to capture the intrinsic, long-term variability of the target.
Parameters
----------
lc : `.LightCurve`
The light curve that needs to be corrected.
"""
def __init__(self, lc):
if getattr(lc, "mission", "") == "TESS":
warnings.warn(
"The SFF correction method is not suitable for use "
"with TESS data, because the spacecraft motion does "
"not proceed along a consistent arc.",
LightkurveWarning,
)
self.raw_lc = lc
if lc.flux.unit.to_string() == "":
lc = lc.copy()
else:
lc = lc.copy().normalize()
# Setting these values as None so we don't get a value error if the
# user tries to access them before "correct()"
self.window_points = None
self.windows = None
self.bins = None
self.timescale = None
self.breakindex = None
self.centroid_col = None
self.centroid_row = None
super(SFFCorrector, self).__init__(lc=lc)
def __repr__(self):
return "SFFCorrector (LC: {})".format(self.lc.meta.get("TARGETID"))
def correct(
self,
centroid_col=None,
centroid_row=None,
windows=20,
bins=5,
timescale=1.5,
breakindex=None,
degree=3,
restore_trend=False,
additional_design_matrix=None,
polyorder=None,
sparse=False,
**kwargs
):
"""Find the best fit correction for the light curve.
Parameters
----------
centroid_col : np.ndarray of floats (optional)
Array of centroid column positions. If ``None``, will use the
`centroid_col` attribute of the input light curve by default.
centroid_row : np.ndarray of floats (optional)
Array of centroid row positions. If ``None``, will use the
`centroid_row` attribute of the input light curve by default.
windows : int
Number of windows to split the data into to perform the correction.
Default 20.
bins : int
Number of "knots" to place on the arclength spline. More bins will
increase the number of knots, making the spline smoother in arclength.
Default 10.
timescale: float
Time scale of the b-spline fit to the light curve in time, in units
of input light curve time.
breakindex : None, int or list of ints (optional)
Optionally the user can break the light curve into sections. Set
break index to either an index at which to break, or list of indicies.
degree : int
The degree of polynomials in the splines in time and arclength. Higher
values will create smoother splines. Default 3.
cadence_mask : np.ndarray of bools (optional)
Mask, where True indicates a cadence that should be used.
sigma : int (default 5)
Standard deviation at which to remove outliers from fitting
niters : int (default 5)
Number of iterations to fit and remove outliers
restore_trend : bool (default False)
Whether to restore the long term spline trend to the light curve
propagate_errors : bool (default False)
Whether to propagate the uncertainties from the regression. Default is False.
Setting to True will increase run time, but will sample from multivariate normal
distribution of weights.
additional_design_matrix : `~lightkurve.lightcurve.Correctors.DesignMatrix` (optional)
Additional design matrix to remove, e.g. containing background vectors.
polyorder : int
Deprecated as of Lightkurve v1.4. Use ``degree`` instead.
Returns
-------
corrected_lc : `~lightkurve.lightcurve.LightCurve`
Corrected light curve, with noise removed.
"""
DMC, spline = DesignMatrixCollection, create_spline_matrix
if sparse:
DMC, spline = SparseDesignMatrixCollection, create_sparse_spline_matrix
if polyorder is not None:
warnings.warn(
"`polyorder` is deprecated and no longer used, "
"please use the `degree` keyword instead.",
LightkurveWarning,
)
if centroid_col is None:
self.lc = self.lc.remove_nans(column="centroid_col")
centroid_col = self.lc.centroid_col
if centroid_row is None:
self.lc = self.lc.remove_nans(column="centroid_row")
centroid_row = self.lc.centroid_row
if np.any([~np.isfinite(centroid_row), ~np.isfinite(centroid_col)]):
raise ValueError("Centroids contain NaN values.")
self.window_points = _get_window_points(
centroid_col, centroid_row, windows, breakindex=breakindex
)
self.windows = windows
self.bins = bins
self.timescale = timescale
self.breakindex = breakindex
self.arclength = _estimate_arclength(centroid_col, centroid_row)
lower_idx = np.asarray(np.append(0, self.window_points), int)
upper_idx = np.asarray(np.append(self.window_points, len(self.lc.time)), int)
dms = []
for idx, a, b in zip(range(len(lower_idx)), lower_idx, upper_idx):
if isinstance(self.arclength, Quantity):
ar = np.copy(self.arclength.value)
else:
ar = np.copy(self.arclength)
# Temporary workaround for issue #1161: AstroPy v5.0
# Masked arrays cannot be passed to `np.in1d` below
if hasattr(self.arclength, 'mask'):
ar = ar.unmasked
knots = list(np.percentile(ar[a:b], np.linspace(0, 100, bins + 1)[1:-1]))
ar[~np.in1d(ar, ar[a:b])] = 0
dm = spline(ar, knots=knots, degree=degree).copy()
dm.columns = [
"window{}_bin{}".format(idx + 1, jdx + 1) for jdx in range(dm.shape[1])
]
# I'm putting VERY weak priors on the SFF motion vectors
# (1e-6 is being added to prevent sigma from being zero)
ps = np.ones(dm.shape[1]) * 10000 * self.lc[a:b].flux.std() + 1e-6
dm.prior_sigma = ps
dms.append(dm)
sff_dm = DMC(dms).to_designmatrix(name="sff") # .standardize()
# long term
n_knots = int((self.lc.time.value[-1] - self.lc.time.value[0]) / timescale)
s_dm = spline(self.lc.time.value, n_knots=n_knots, name="spline")
means = [np.average(chunk) for chunk in np.array_split(self.lc.flux, n_knots)]
# means = [np.average(self.lc.flux, weights=s_dm.values[:, idx]) for idx in range(s_dm.shape[1])]
s_dm.prior_mu = np.asarray(means)
# I'm putting WEAK priors on the spline that it must be around 1
s_dm.prior_sigma = (
np.ones(len(s_dm.prior_mu)) * 1000 * self.lc.flux.std().value + 1e-6
)
# additional
if additional_design_matrix is not None:
if not isinstance(additional_design_matrix, DesignMatrix):
raise ValueError(
"`additional_design_matrix` must be a DesignMatrix object."
)
self.additional_design_matrix = additional_design_matrix
dm = DMC([s_dm, sff_dm, additional_design_matrix])
else:
dm = DMC([s_dm, sff_dm])
# correct
clc = super(SFFCorrector, self).correct(dm, **kwargs)
# clean
if restore_trend:
trend = self.diagnostic_lightcurves["spline"].flux
clc += trend - np.nanmedian(trend)
clc *= self.raw_lc.flux.mean()
return clc
def diagnose(self):
"""Returns a diagnostic plot which visualizes what happened during the
most recent call to `correct()`."""
axs = self._diagnostic_plot()
for t in self.window_points:
axs[0].axvline(self.lc.time.value[t], color="r", ls="--", alpha=0.3)
def diagnose_arclength(self):
"""Returns a diagnostic plot which visualizes arclength vs flux
from most recent call to `correct()`."""
max_plot = 5
with plt.style.context(MPLSTYLE):
_, axs = plt.subplots(
int(np.ceil(self.windows / max_plot)),
max_plot,
figsize=(10, int(np.ceil(self.windows / max_plot) * 2)),
sharex=True,
sharey=True,
)
axs = np.atleast_2d(axs)
axs[0, 2].set_title("Arclength Plot/Window")
plt.subplots_adjust(hspace=0, wspace=0)
lower_idx = np.asarray(np.append(0, self.window_points), int)
upper_idx = np.asarray(
np.append(self.window_points, len(self.lc.time)), int
)
if hasattr(self, "additional_design_matrix"):
name = self.additional_design_matrix.name
f = (
self.lc.flux
- self.diagnostic_lightcurves["spline"].flux
- self.diagnostic_lightcurves[name].flux
)
else:
f = self.lc.flux - self.diagnostic_lightcurves["spline"].flux
m = self.diagnostic_lightcurves["sff"].flux
idx, jdx = 0, 0
for a, b in zip(lower_idx, upper_idx):
ax = axs[idx, jdx]
if jdx == 0:
ax.set_ylabel("Flux")
ax.scatter(self.arclength[a:b], f[a:b], s=1, label="Data")
ax.scatter(
self.arclength[a:b][~self.cadence_mask[a:b]],
f[a:b][~self.cadence_mask[a:b]],
s=10,
marker="x",
c="r",
label="Outliers",
)
s = np.argsort(self.arclength[a:b])
ax.scatter(
self.arclength[a:b][s],
(m[a:b] - np.median(m[a:b]) + np.median(f[a:b]))[s],
c="C2",
s=0.5,
label="Model",
)
jdx += 1
if jdx >= max_plot:
jdx = 0
idx += 1
if b == len(self.lc.time):
ax.legend()
######################
# Helper functions #
######################
def _get_centroid_dm(col, row, name="centroids"):
"""Returns a `.DesignMatrix` containing (col, row) centroid positions
and transformations thereof.
Parameters
----------
col : np.ndarray
centroid column
row : np.ndarray
centroid row
name : str
Name to pass to `.DesignMatrix` (default: 'centroids').
Returns
-------
dm: np.ndarray
Design matrix with shape len(c) x 10
"""
data = [
col,
row,
col ** 2,
row ** 2,
col ** 3,
row ** 3,
col * row,
col ** 2 * row,
col * row ** 2,
col ** 2 * row ** 2,
]
names = [
r"col",
r"row",
r"col^2",
r"row^2",
r"col^3",
r"row^3",
r"col \times row",
r"col^2 \times row",
r"col \times row^2",
r"col^2 \times row^2",
]
df = pd.DataFrame(np.asarray(data).T, columns=names)
return DesignMatrix(df, name=name)
def _get_thruster_firings(arclength):
"""Find locations where K2 fired thrusters
Parameters
----------
arc : np.ndarray
arclength as a function of time
Returns
-------
thrusters: np.ndarray of bools
True at times where thrusters were fired.
"""
if isinstance(arclength, Quantity):
arc = np.copy(arclength.value)
else:
arc = np.copy(arclength)
# Rate of change of rate of change of arclength wrt time
d2adt2 = np.gradient(np.gradient(arc))
# Fit a Gaussian, most points lie in a tight region, thruster firings are outliers
g = models.Gaussian1D(amplitude=100, mean=0, stddev=0.01)
fitter = fitting.LevMarLSQFitter()
h = np.histogram(
d2adt2[np.isfinite(d2adt2)], np.arange(-0.5, 0.5, 0.0001), density=True
)
xbins = h[1][1:] - np.median(np.diff(h[1]))
g = fitter(g, xbins, h[0], weights=h[0] ** 0.5)
# Depending on the orientation of the roll, it is hard to return
# the point before the firing or the point after the firing.
# This makes sure we always return the same value, no matter the roll orientation.
def _start_and_end(start_or_end):
"""Find points at the start or end of a roll."""
if start_or_end == "start":
thrusters = (d2adt2 < (g.stddev * -5)) & np.isfinite(d2adt2)
if start_or_end == "end":
thrusters = (d2adt2 > (g.stddev * 5)) & np.isfinite(d2adt2)
# Pick the best thruster in each cluster
idx = np.array_split(
np.arange(len(thrusters)),
np.where(np.gradient(np.asarray(thrusters, int)) == 0)[0],
)
m = np.array_split(
thrusters, np.where(np.gradient(np.asarray(thrusters, int)) == 0)[0]
)
th = []
for jdx, _ in enumerate(idx):
if m[jdx].sum() == 0:
th.append(m[jdx])
else:
th.append(
(
np.abs(np.gradient(arc)[idx[jdx]])
== np.abs(np.gradient(arc)[idx[jdx]][m[jdx]]).max()
)
& m[jdx]
)
thrusters = np.hstack(th)
return thrusters
# Get the start and end points
thrusters = np.asarray([_start_and_end("start"), _start_and_end("end")])
thrusters = thrusters.any(axis=0)
# Take just the first point.
thrusters = (np.gradient(np.asarray(thrusters, int)) >= 0) & thrusters
return thrusters
def _get_window_points(
centroid_col, centroid_row, windows, arclength=None, breakindex=None
):
"""Returns indices where thrusters are fired.
Parameters
----------
lc : `.LightCurve` object
Input light curve
windows: int
Number of windows to split the light curve into
arc: np.ndarray
Arclength for the roll motion
breakindex: int
Cadence where there is a natural break. Windows will be automatically put here.
"""
if arclength is None:
arclength = _estimate_arclength(centroid_col, centroid_row)
# Validate break indices
if isinstance(breakindex, int):
breakindexes = [breakindex]
if breakindex is None:
breakindexes = []
elif (breakindex[0] == 0) & (len(breakindex) == 1):
breakindexes = []
else:
breakindexes = breakindex
if not isinstance(breakindexes, list):
raise ValueError("`breakindex` must be an int or a list")
# If the user asks for break indices we should still return them,
# even if there is only 1 window.
if windows == 1:
return breakindexes
# Find evenly spaced window points
dt = len(centroid_col) / windows
lower_idx = np.append(0, breakindexes)
upper_idx = np.append(breakindexes, len(centroid_col))
window_points = np.hstack(
[np.asarray(np.arange(a, b, dt), int) for a, b in zip(lower_idx, upper_idx)]
)
# Get thruster firings
thrusters = _get_thruster_firings(arclength)
for b in breakindexes:
thrusters[b] = True
thrusters = np.where(thrusters)[0]
# Find the nearest point to each thruster firing, unless it's a user supplied break point
if len(thrusters) > 0:
window_points = [
thrusters[np.argmin(np.abs(thrusters - wp))] + 1
for wp in window_points
if wp not in breakindexes
]
window_points = np.unique(np.hstack([window_points, breakindexes]))
# If the first or last windows are very short (<40% median window length),
# then we add them to the second or penultimate window, respectively,
# by removing their break points.
median_length = np.median(np.diff(window_points))
if window_points[0] < 0.4 * median_length:
window_points = window_points[1:]
if window_points[-1] > (len(centroid_col) - 0.4 * median_length):
window_points = window_points[:-1]
return np.asarray(window_points, dtype=int)
def _estimate_arclength(centroid_col, centroid_row):
"""Estimate the arclength given column and row centroid positions.
We use the approximation that the arclength equals
(row**2 + col**2)**0.5
For this to work, row and column must be correlated not anticorrelated.
"""
col = centroid_col - np.nanmin(centroid_col)
row = centroid_row - np.nanmin(centroid_row)
# Force c to be correlated not anticorrelated
if np.polyfit(col.data, row.data, 1)[0] < 0:
col = np.nanmax(col) - col
return (col ** 2 + row ** 2) ** 0.5
| 18,054
| 34.471513
| 112
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/correctors/cbvcorrector.py
|
"""Defines Corrector classes that utilize Kepler/K2/TESS Cotrending Basis Vectors.
"""
import logging
import copy
import requests
import urllib.request
import glob
import os
import warnings
from astropy.io import fits as pyfits
from astropy.table import Table
from astropy.time import Time
from astropy.timeseries import TimeSeries
from astropy.units import Quantity, Unit, UnitsWarning
from astropy.utils.decorators import deprecated
from astropy.utils.masked import Masked
from bs4 import BeautifulSoup
import matplotlib.pyplot as plt
import numpy as np
from scipy.interpolate import PchipInterpolator
from sklearn import linear_model
from scipy.optimize import minimize_scalar
from .designmatrix import DesignMatrix, DesignMatrixCollection
from .. import MPLSTYLE
from ..lightcurve import LightCurve
from ..utils import channel_to_module_output, validate_method, LightkurveDeprecationWarning
from ..search import search_lightcurve
from .regressioncorrector import RegressionCorrector
from ..collections import LightCurveCollection
from .metrics import overfit_metric_lombscargle, underfit_metric_neighbors, MinTargetsError
log = logging.getLogger(__name__)
__all__ = ['CBVCorrector', 'CotrendingBasisVectors', 'KeplerCotrendingBasisVectors',
'TessCotrendingBasisVectors', 'load_kepler_cbvs','load_tess_cbvs',
'download_kepler_cbvs', 'download_tess_cbvs']
#*******************************************************************************
# CBV Corrector Class
class CBVCorrector(RegressionCorrector):
"""Class for removing systematics using Cotrending Basis Vectors (CBVs)
from Kepler/K2/TESS.
On construction of this object, the relevant CBVs will be downloaded from
MAST appropriate for the lightcurve object passed to the constructor.
For TESS there are multiple CBV types. All are loaded and the user must
specify which to use in the correction.
Attributes
----------
lc : LightCurve
The light curve loaded into CBVCorrector in electrons / second
cbvs : CotrendingBasisVectors list
The retrieved CBVs, can contain multiple types of CBVs
interpolated_cbvs : bool
If true then the CBVs have been interpolated to the lightcurve
cbv_design_matrix : DesignMatrix
The retrieved CBVs ported into a DesignMatrix object
extra_design_matrix : DesignMatrix
An extra design matrix to include in the fit with the CBVs
design_matrix_collection : DesignMatrixCollection
The design matrix collection composed of cbv_design_matrix and extra_design_matrix
corrected_lc : LightCurve
The returned light curve from correct() in electrons / second
coefficients : float ndarray
The fit coefficients corresponding to the design_matrix_collection
coefficients_err : float ndarray
The error estimates for the coefficients, see regressioncorrector
model_lc : LightCurve
The model fit to the lightcurve 'lc'
diagnostic_lightcurves : dict
Model fits for each of the sub design matrices fit in model_lc
lc_neighborhood : LightCurveCollection
SPOC SAP light curves of all targets within the defined neighborhood of the
target under study for use with the under-fitting metric
lc_neighborhood_flux : list of arrays
Neighboring target flux aligned or interpolated to the target under
study cadence
cadence_mask : np.ndarray of bool
Mask, where True indicates a cadence that was used in
RegressionCorrector.correct.
Note: The saved cadence_mask is overwritten for each call to correct().
over_fitting_score : float
Over-fitting score from the most recent run of correct()
under_fitting_score : float
Under-fitting score from the most recent run of correct()
alpha : float
L2-norm regularization term used in most recent fit
Equivalent to: designmatrix prior sigma = np.median(self.lc.flux_err) / np.sqrt(alpha)
"""
def __init__(self, lc, interpolate_cbvs=False, extrapolate_cbvs=False, do_not_load_cbvs=False,
cbv_dir=None):
"""Constructor
This constructor will retrieve all relevant CBVs from MAST and then
align or interpolate them with the passed-in light curve.
Parameters
----------
lc : LightCurve
The light curve to correct
interpolate_cbvs : bool
By default, the cbvs will be 'aligned' to the lightcurve. If you
wish to interpolate the cbvs instead then set this to True.
Uses Piecewise Cubic Hermite Interpolating Polynomial (PCHIP).
extrapolate_cbvs : bool
Set to True if the CBVs also have to be extrapolated outside their time
stamp range. (If False then those cadences are filled with NaNs.)
do_not_load_cbvs : bool
If True then the CBVs will NOT be loaded from MAST.
Use this option if you wish to use the CBV corrector methods with only a
custom design matrix (via the ext_dm argument in the corrector methods)
cbv_dir : str
Path to specific directory holding TESS CBVs. If this is None, will query
MAST by default.
"""
if not isinstance(lc, LightCurve):
raise Exception('<lc> must be a LightCurve class')
assert lc.flux.unit==Unit('electron / second'), \
'cbvCorrector expects light curve to be passed in e-/s units.'
if extrapolate_cbvs and (extrapolate_cbvs != interpolate_cbvs):
raise Exception('interpolate_cbvs must be True if extrapolate_cbvs is True')
# We do not want any NaNs
lc = lc.remove_nans()
# Call the RegresssionCorrector Constructor
super(CBVCorrector, self).__init__(lc)
#***
# Retrieve all relevant CBVs from either MAST or a local directory
cbvs = []
if (not do_not_load_cbvs):
if self.lc.mission == 'Kepler':
cbvs.append(load_kepler_cbvs(cbv_dir=cbv_dir,mission=self.lc.mission, quarter=self.lc.quarter,
channel=self.lc.channel))
elif self.lc.mission == 'K2':
cbvs.append(load_kepler_cbvs(cbv_dir=cbv_dir,mission=self.lc.mission, campaign=self.lc.campaign,
channel=self.lc.channel))
elif self.lc.mission == 'TESS':
# For TESS we load multiple CBV types
# Single-Scale
cbvs.append(load_tess_cbvs(cbv_dir=cbv_dir,sector=self.lc.sector,
camera=self.lc.camera, ccd=self.lc.ccd, cbv_type='SingleScale'))
# Multi-Scale
# Although there has always been 3 bands, there could be more,
# continue to load more bands until no more are left to load
iBand = int(0)
moreData = True
while moreData:
iBand += 1
cbvObj = load_tess_cbvs(cbv_dir=cbv_dir,sector=self.lc.sector,
camera=self.lc.camera, ccd=self.lc.ccd, cbv_type='MultiScale',
band=iBand)
if (cbvObj.band == iBand):
cbvs.append(cbvObj)
else:
moreData = False
# Spike
cbvs.append(load_tess_cbvs(cbv_dir=cbv_dir,sector=self.lc.sector,
camera=self.lc.camera, ccd=self.lc.ccd, cbv_type='Spike'))
else:
raise ValueError('Unknown mission type')
for idx in np.arange(len(cbvs)):
if (not isinstance(cbvs[idx], CotrendingBasisVectors)):
raise Exception('CBVs could not be loaded. CBVCorrector must exit')
# Set the CBV time format units to the lightcurve time format units
for idx in np.arange(len(cbvs)):
# astropy.time.Time makes this easy!
cbvs[idx].time.format = lc.time.format
# Align or interpolate the CBVs with the lightcurve flux using the cadence numbers
for idx in np.arange(len(cbvs)):
if interpolate_cbvs:
cbvs[idx] = cbvs[idx].interpolate(self.lc, extrapolate=extrapolate_cbvs)
else:
cbvs[idx] = cbvs[idx].align(self.lc)
self.cbvs = cbvs
self.interpolated_cbvs = interpolate_cbvs
self.extrapolated_cbvs = extrapolate_cbvs
# Initialize all extra attributes to None
self.cbv_design_matrix = None
self.extra_design_matrix = None
self.design_matrix_collection = None
self.corrected_lc = None
self.coefficients = None
self.coefficients_err = None
self.model_lc = None
self.diagnostic_lightcurves = None
self.lc_neighborhood = None
self.lc_neighborhood_flux = None
self.cadence_mask = None
self.over_fitting_score = None
self.under_fitting_score = None
self.alpha = None
def correct_gaussian_prior(self, cbv_type=['SingleScale'],
cbv_indices=[np.arange(1,9)],
alpha=1e-20, ext_dm=None, cadence_mask=None, **kwargs):
""" Performs the correction using RegressionCorrector methods.
This method will assemble the full design matrix collection composed of
cbv_design_matrix and extra_design_matrix (ext_dm). It then uses the
alpha L2-Norm (Ridge Regression) penalty term to set the width on the
design matrix priors. Then uses the super-class
RegressionCorrector.correct to perform the correction.
The relation between the L2-Norm alpha term and the Gaussian prior sigma
is:
alpha = flux_sigma^2 / sigma^2
By default this method will use the first 8 "SingleScale" basis vectors.
Parameters
----------
cbv_type : str list
List of CBV types to use
cbv_indices : list of lists
List of CBV vectors to use in each passed cbv_type. {'ALL' => Use all}
NOTE: 1-Based indexing!
alpha : float
L2-norm regularization penatly term. Default = 1e-20
{0 => no regularization}
ext_dm : `.DesignMatrix` or `.DesignMatrixCollection`
Optionally pass an extra design matrix to also be used in the fit
cadence_mask : np.ndarray of bools (optional)
Mask, where True indicates a cadence that should be used.
**kwargs : dict
Additional keyword arguments passed to
`RegressionCorrector.correct`.
Returns
-------
`.LightCurve`
Corrected light curve, with noise removed. In units of electrons / second
Examples
--------
The following example will perform the correction using the
SingleScale and Spike basis vectors with a weak regularization alpha
term of 0.1. It also adds in an external design matrix to perfomr a
joint fit.
>>> cbv_type = ['SingleScale', 'Spike']
>>> cbv_indices = [np.arange(1,9), 'ALL']
>>> corrected_lc = cbvCorrector.correct_gaussian_prior(cbv_type=cbv_type, # doctest: +SKIP
>>> cbv_indices=cbv_indices, alpha=0.1, # doctest: +SKIP
>>> ext_dm=design_matrix ) # doctest: +SKIP
"""
# Perform all the preparatory stuff common to all correct methods
self._correct_initialization(cbv_type=cbv_type,
cbv_indices=cbv_indices, ext_dm=ext_dm)
# Add in a width to the Gaussian priors
# alpha = flux_sigma^2 / sigma^2
if (alpha == 0.0):
sigma = None
else:
sigma = np.median(self.lc.flux_err.value) / np.sqrt(np.abs(alpha))
self._set_prior_width(sigma)
# Use RegressionCorrector.correct for the actual fitting
self.correct_regressioncorrector(self.design_matrix_collection,
cadence_mask=cadence_mask, **kwargs)
self.alpha = alpha
return self.corrected_lc
def correct_elasticnet(self, cbv_type='SingleScale', cbv_indices=np.arange(1,9),
alpha=1e-20, l1_ratio=0.01, ext_dm=None, cadence_mask=None, **kwargs):
""" Performs the correction using scikit-learn's ElasticNet which
utilizes combined L1- and L2-Norm priors as a regularizer.
This method will assemble the full design matrix collection composed of
cbv_design_matrix and extra_design_matrix (ext_dm). Then uses
scikit-learn.linear_model.ElasticNet to perform the correction.
By default this method will use the first 8 "SingleScale" basis vectors.
This method will preserve the median value of the light curve flux.
Note that the alpha term in scikit-learn's ElasticNet does not have the
same scaling as when used in CBVCorrector.correct_gaussian_prior or
CBVCorrector.correct. Do not assume similar results with a
similar alpha value.
Parameters
----------
cbv_type : str list
List of CBV types to use
cbv_indices : list of lists
List of CBV vectors to use in each passed cbv_type. {'ALL' => Use all}
NOTE: 1-Based indexing!
alpha : float
L2-norm regularization pentaly term.
{0 => no regularization}
l1_ratio : float
Elastic-Net mixing parameter
l1_ratio = 0 => L2 penalty (Ridge). l1_ratio = 1 => L1 penalty (Lasso).
ext_dm : `.DesignMatrix` or `.DesignMatrixCollection`
Optionally pass an extra design matrix to also be used in the fit
cadence_mask : np.ndarray of bools (optional)
Mask, where True indicates a cadence that should be used.
**kwargs : dict
Additional keyword arguments passed to
`sklearn.linear_model.ElasticNet`.
Returns
-------
`.LightCurve`
Corrected light curve, with noise removed. In units of electrons / second
Examples
--------
The following example will perform the ElasticNet correction using the
SingleScale and Spike basis vectors with a strong regualrization alpha
term of 1.0 and an L1 ratio of 0.9 which means predominantly a Lasso
regularization but with a slight amount of Ridge Regression.
>>> cbv_type = ['SingleScale', 'Spike']
>>> cbv_indices = [np.arange(1,9), 'ALL']
>>> corrected_lc = cbvCorrector.correct_elasticnet(cbv_type=cbv_type, # doctest: +SKIP
>>> cbv_indices=cbv_indices, alpha=1.0, l1_ratio=0.9) # doctest: +SKIP
"""
# Perform all the preparatory stuff common to all correct methods
self._correct_initialization(cbv_type=cbv_type,
cbv_indices=cbv_indices, ext_dm=ext_dm)
# Default cadence mask
if cadence_mask is None:
cadence_mask = np.ones(len(self.lc.flux), bool)
# Use Scikit-learn ElasticNet
self.regressor = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False, **kwargs)
X = self.design_matrix_collection.values
y = self.lc.flux
# Set mask
# note: ElasticNet has no internal way to do this so we have to just
# remove the cadences from X and y
XMasked = X.copy()
yMasked = y.copy()
XMasked = XMasked[cadence_mask,:]
yMasked = yMasked[cadence_mask]
# Perform the ElasticNet fit
self.regressor.fit(XMasked, yMasked)
# Finishing work
# When creating the model do not include the constant
model_flux = np.dot(X[:,0:-1], self.regressor.coef_[0:-1])
model_flux -= np.median(model_flux)
# TODO: Propagation of uncertainties. They really do not change much.
model_err = np.zeros(len(model_flux))
self.coefficients = self.regressor.coef_
self.model_lc = LightCurve(time=self.lc.time,
flux=model_flux*self.lc.flux.unit,
flux_err=model_err*self.lc.flux_err.unit)
self.corrected_lc = self.lc.copy()
self.corrected_lc.flux = self.lc.flux - self.model_lc.flux
self.corrected_lc.flux_err = (self.lc.flux_err**2 + model_err**2)**0.5
self.diagnostic_lightcurves = self._create_diagnostic_lightcurves()
self.cadence_mask = cadence_mask
self.alpha = alpha
return self.corrected_lc
def correct(self, cbv_type=['SingleScale'],
cbv_indices=[np.arange(1,9)],
ext_dm=None, cadence_mask=None, alpha_bounds=[1e-4,1e4],
target_over_score=0.5, target_under_score=0.5, max_iter=100):
""" Optimizes the correction by adjusting the L2-Norm (Ridge Regression)
regularization penalty term, alpha, based on the introduced noise
(over-fitting) and residual correlation (under-fitting) goodness
metrics. The numercial optimization is performed using the
scipy.optimize.minimize_scalar Brent's method.
The optimizer attempts to maximize the over- and under-fitting goodness
metrics. However, once the target_over_score or target_under_score is
reached, a "Leaky ReLU" is used so that the optimization "pressure"
concentrates on the other metric until both metrics rise above their
respective target scores, instead of driving a single metric to near
1.0.
The optimization parameters used are stored in self.optimization_params
as a record of how the optimization was performed.
The optimized correction is performed using LightKurve's
RegressionCorrector methods. See correct_gaussian_prior for details.
Parameters
----------
cbv_type : str list
List of CBV types to use in correction {'ALL' => Use all}
cbv_indices : list of lists
List of CBV vectors to use in each of cbv_type passed. {'ALL' => Use all}
NOTE: 1-Based indexing!
ext_dm : `.DesignMatrix` or `.DesignMatrixCollection`
Optionally pass an extra design matrix to also be used in the fit
cadence_mask : np.ndarray of bools (optional)
Mask, where True indicates a cadence that should be used.
alpha_bounds : float list(len=2)
upper anbd lowe bounds for alpha
target_over_score : float
Target Over-fitting metric score
target_under_score : float
Target under-fitting metric score
max_iter : int
Maximum number of iterations to optimize goodness metrics
Returns
-------
`.LightCurve`
Corrected light curve, with noise removed. In units of electrons / second
Examples
--------
The following example will perform the correction using the
SingleScale and Spike basis vectors. It will use alpha bounds of
[1.0,1e3]. The target over-fitting score is 0.5 and the target
under-fitting score is 0.8.
>>> cbv_type = ['SingleScale', 'Spike']
>>> cbv_indices = [np.arange(1,9), 'ALL']
>>> cbvCorrector.correct(cbv_type=cbv_type, cbv_indices=cbv_indices, # doctest: +SKIP
>>> alpha_bounds=[1.0,1e3], # doctest: +SKIP
>>> target_over_score=0.5, target_under_score=0.8) # doctest: +SKIP
"""
# Perform all the preparatory stuff common to all correct methods
self._correct_initialization(cbv_type=cbv_type,
cbv_indices=cbv_indices, ext_dm=ext_dm)
# Create a dictionary for optimization parameters to easily pass to the
# objective function, and also to save for posterity
self.optimization_params = {'alpha_bounds': alpha_bounds,
'target_over_score': target_over_score,
'target_under_score': target_under_score,
'max_iter': max_iter,
'cadence_mask': cadence_mask,
'over_metric_nSamples': 1}
#***
# Use scipy.optimize.minimize_scalar
# Minimize the introduced metric
minimize_result = minimize_scalar(self._goodness_metric_obj_fun, method='Bounded',
bounds=alpha_bounds,
options={'maxiter':max_iter, 'disp': False})
# Re-fit with final alpha value
# (scipy.optimize.minimize_scalar does not exit with the final fit!)
self._goodness_metric_obj_fun(minimize_result.x)
# Only display over- or under-fitting scores if requested to optimize
# for each
if (self.optimization_params['target_over_score'] > 0):
self.over_fitting_score = self.over_fitting_metric(n_samples=10)
print('Optimized Over-fitting metric: {}'.format(self.over_fitting_score))
else:
self.over_fitting_score = -1.0
if (self.optimization_params['target_under_score'] > 0):
self.under_fitting_score = self.under_fitting_metric()
print('Optimized Under-fitting metric: {}'.format(self.under_fitting_score))
else:
self.under_fitting_score = -1.0
self.alpha = minimize_result.x
print('Optimized Alpha: {0:2.3e}'.format(self.alpha))
return self.corrected_lc
def correct_regressioncorrector(self, design_matrix_collection, **kwargs):
""" Pass-through method to gain access to the superclass
RegressionCorrector.correct() method.
"""
# All this does is call the superclass 'correct' method as pass the
# input arguments.
return super(CBVCorrector, self).correct(design_matrix_collection, **kwargs)
def over_fitting_metric(self,
n_samples: int = 10):
""" Computes the over-fitting metric using
metrics.overfit_metric_lombscargle
See that function for a description of the algorithm.
Parameters
----------
n_samples : int
The number of times to compute and average the metric
This can stabalize the value, defaut = 10
Returns
-------
over_fitting_metric : float
A float in the range [0,1] where 0 => Bad, 1 => Good
"""
# Check if corrected_lc is present
if (self.corrected_lc is None):
log.warning('A corrected light curve does not exist, please run '
'correct first')
return None
# Ignore masked cadences
orig_lc = self.lc.copy()
orig_lc = orig_lc[self.cadence_mask]
corrected_lc = self.corrected_lc.copy()
corrected_lc = corrected_lc[self.cadence_mask]
return overfit_metric_lombscargle (orig_lc, corrected_lc, n_samples=n_samples)
def under_fitting_metric(self,
radius: float = None,
min_targets: int = 30,
max_targets: int = 50):
""" Computes the under-fitting metric using
metrics.underfit_metric_neighbors
See that function for a description of the algorithm.
For TESS, the default radius is 5000 arcseconds.
For Kepler/K2, the default radius is 1000 arcseconds
This function will begin with the given radius in arcseconds and
finds all neighboring targets. If not enough were found (< min_targets)
the radius is increased until a minimum number are found.
The downloaded neighboring targets will be "aligned" to the
corrected_lc, meaning the cadence numbers are used to align the targets
to the corrected_lc. However, if the CBVCorrector object was
instantiated with interpolated_cbvs=True then the targets will be
interpolated to the corrected_lc cadence times.
Parameters
----------
radius : float
Search radius to find neighboring targets in arcseconds
min_targets : float
Minimum number of targets to use in correlation metric
Using too few can cause unreliable results. Default = 30
max_targets : float
Maximum number of targets to use in correlation metric
Using too many can slow down the metric due to large data
download. Default = 50
Returns
-------
under_fitting_metric : float
A float in the range [0,1] where 0 => Bad, 1 => Good
"""
# Check if corrected_lc is present
if (self.corrected_lc is None):
raise Exception('A corrected light curve does not exist, please run '
'correct first')
return None
# Set default radius if one is not provided.
if (radius is None):
if (self.lc.mission == 'TESS'):
radius = 5000
else:
radius = 1000
interpolate = self.interpolated_cbvs
extrapolate = self.extrapolated_cbvs
# Make a copy of radius because it changes locally
dynamic_search_radius = radius
# Max search radius is the diagonal distance along a CCD in arcseconds
# 1 pixel in TESS is 21.09 arcseconds
# 1 pixel in Kepler/K2 is 3.98 arcseconds
if (self.lc.mission == 'TESS'):
# 24 degrees of a TESS CCD array (2 CCD's wide) is 86,400 arcseconds
max_search_radius = np.sqrt(2) * (86400/2.0)
elif (self.lc.mission == 'Kepler' or self.lc.mission == 'K2'):
# One Kepler CCD spans 4,096 arcseconds
max_search_radius = np.sqrt(2) * 4096
else:
raise Exception('Unknown mission')
# Ignore masked cadences
corrected_lc = self.corrected_lc.copy()
corrected_lc = corrected_lc[self.cadence_mask]
# Dynamically increase radius until min_targets reached.
continue_searching = True
while (continue_searching):
try:
metric = underfit_metric_neighbors (corrected_lc,
dynamic_search_radius, min_targets, max_targets,
interpolate, extrapolate)
except MinTargetsError:
# Too few targets found, try increasing search radius
if (dynamic_search_radius > max_search_radius):
# Hit the edge of the CCD, we have to give up
raise Exception('Not enough neighboring targets were '
'found. under_fitting_metric failed')
# Too few found, increase search radius
dynamic_search_radius *= 1.5
else:
continue_searching = False
return metric
def _correct_initialization(self, cbv_type='SingleScale', cbv_indices='ALL',
ext_dm=None):
""" Performs all the preparatory work needed before applying a 'correct'
method.
This helper function is used so that multiple correct methods can be used
without the need to repeat preparatory code.
The main thing this method does is set up the design matrix, given the
requested CBVs and external design matrix.
Parameters
----------
cbv_type : str list
List of CBV types to use
Can be None if only ext_dm is used
cbv_indices : list of lists
List of CBV vectors to use in each passed cbv_type. {'ALL' => Use all}
Can be None if only ext_dm is used
ext_dm : `.DesignMatrix` or `.DesignMatrixCollection`
Optionally pass an extra design matrix to additionally be used in the fit
"""
assert not ((cbv_type is None) ^ (cbv_indices is None)), \
'Both cbv_type and cbv_indices must be None, or neither'
if (cbv_type is None and cbv_indices is None):
use_cbvs = False
else:
use_cbvs = True
# If any DesignMatrix was passed then store it
self.extra_design_matrix = ext_dm
# Check that extra design matrix is aligned with lc flux
if ext_dm is not None:
assert isinstance(ext_dm, DesignMatrix), \
'ext_dm must be a DesignMatrix'
if (ext_dm.df.shape[0] != len(self.lc.flux)):
raise ValueError(
'ext_dm must contain the same number of cadences as lc.flux')
# Create a CBV design matrix for each CBV set requested
self.cbv_design_matrix = []
if use_cbvs:
assert (not isinstance(cbv_type, str) and
not isinstance(cbv_indices[0], int)), \
'cbv_type and cbv_indices must be lists of strings'
if (self.lc.mission in ['Kepler', 'K2']):
assert len(cbv_type) == 1 , \
'cbv_type must be only Single-Scale for Kepler and K2 missions'
assert cbv_type == ['SingleScale'], \
'cbv_type must be Single-Scale for Kepler and K2 missions'
if (isinstance(cbv_type, list) and len(cbv_type) != 1):
assert (self.lc.mission == 'TESS'), \
'Multiple CBV types are only allowed for TESS'
assert (len(cbv_type) == len(cbv_indices)), \
'cbv_type and cbv_indices must be the same list length'
# Loop through all the stored CBVs and find the ones matching the
# requested cbv_type list
for idx in np.arange(len(cbv_type)):
for cbvs in self.cbvs:
# Temporarily copy the cbv_indices requested
cbv_idx_loop = cbv_indices[idx]
# If requesting 'ALL' CBVs then set to max default number
# Remember, cbv indices is 1-based!
if (isinstance(cbv_idx_loop, str) and (cbv_idx_loop == 'ALL')):
cbv_idx_loop = cbvs.cbv_indices
# Trim to nCBVs in cbvs
cbv_idx_loop = np.array([idx for idx in cbv_idx_loop if
bool(np.in1d(idx, cbvs.cbv_indices))])
if cbv_type[idx].find('MultiScale') >= 0:
# Find the correct band if this is a multi-scale CBV set
band = int(cbv_type[idx][-1])
if (cbvs.cbv_type in cbv_type[idx] and cbvs.band == band):
self.cbv_design_matrix.append(cbvs.to_designmatrix(
cbv_indices=cbv_idx_loop, name=cbv_type[idx]))
else:
if (cbvs.cbv_type in cbv_type[idx]):
self.cbv_design_matrix.append(cbvs.to_designmatrix(
cbv_indices=cbv_idx_loop, name=cbv_type[idx]))
#***
# Create the design matrix collection with CBVs, plus extra passed basis vectors
# Create the full design matrix collection from all the sub-design
# matrices (I.e 'flatten' the design matrix collection)
if self.extra_design_matrix is not None and \
self.cbv_design_matrix != []:
# Combine cbv_design_matrix and extra_design_matrix
dm_to_flatten = [[cbv_dm for cbv_dm in self.cbv_design_matrix],
[self.extra_design_matrix]]
flattened_dm_list = [item for sublist in dm_to_flatten for item in sublist]
elif self.cbv_design_matrix != []:
# Just use cbv_design_matrix
dm_to_flatten = [[cbv_dm for cbv_dm in self.cbv_design_matrix]]
flattened_dm_list = [item for sublist in dm_to_flatten for item in sublist]
else:
# Just use extra_design_matrix
flattened_dm_list = [self.extra_design_matrix]
# Add in a constant to the design matrix collection
# Note: correct_elasticnet ASSUMES the the last vector in the
# design_matrix_collection is the constant
flattened_dm_list.append(DesignMatrix(np.ones(flattened_dm_list[0].shape[0]),
columns=['Constant'], name='Constant'))
self.design_matrix_collection = DesignMatrixCollection(flattened_dm_list)
def _set_prior_width(self, sigma):
""" Sets the Gaussian prior in the design_matrix_collection widths to sigma
Parameters
----------
sigma : scalar float
all widths are set to the same value
If sigma = None then uniform sigma is set
"""
if (isinstance(sigma, list)):
raise Exception("separate widths is not yet implemented")
for dm in self.design_matrix_collection:
nCBVs = len(dm.prior_sigma)
if sigma is None:
dm.prior_sigma = np.ones(nCBVs) * np.inf
else:
dm.prior_sigma = np.ones(nCBVs) * sigma
def _goodness_metric_obj_fun(self, alpha):
""" The objective function to minimize with
scipy.optimize.minimize_scalar
First sets the alpha regularization penalty then runs
RegressionCorrector.correct and then computes the over- and
under-fitting goodness metrics to return a scalar penalty term to
minimize.
Uses the paramaters in self.optimization_params.
Parameters (in self.optimization_params)
----------
alpha : float
regularization penalty term value to set
cadence_mask : np.ndarray of bools (optional)
Mask, where True indicates a cadence that should be used.
target_over_score : float
Target Over-fitting metric score
If <=0 then ignore over-fitting metric
target_under_score : float
Target under-fitting metric score
If <=0 then ignore under-fitting metric
Returns
-------
penalty : float
Penalty term for minimizer, based on goodness metrics
"""
# Add in a width to the Gaussian priors
# alpha = flux_sigma^2 / sigma^2
sigma = np.median(self.lc.flux_err.value) / np.sqrt(np.abs(alpha))
self._set_prior_width(sigma)
# Use RegressionCorrector.correct for the actual fitting
self.correct_regressioncorrector(self.design_matrix_collection,
cadence_mask=self.optimization_params['cadence_mask'])
# Do not compute and ignore if target score < 0
if (self.optimization_params['target_over_score'] > 0):
overMetric = self.over_fitting_metric(
n_samples=self.optimization_params['over_metric_nSamples'])
else:
overMetric = 1.0
# Do not compute and ignore if target score < 0
if (self.optimization_params['target_under_score'] > 0):
underMetric = self.under_fitting_metric()
else:
underMetric = 1.0
# Once we hit the target we want to ease-back on increasing the metric
# However, we don't want to ease-back to zero pressure, that will
# unconstrain the penalty term and cause the optmizer to run wild.
# So, use a "Leaky ReLU"
# metric' = threshold + (metric - threshold) * leakFactor
leakFactor = 0.01
if (self.optimization_params['target_over_score'] > 0 and
overMetric >= self.optimization_params['target_over_score']):
overMetric = (self.optimization_params['target_over_score'] +
leakFactor *
(overMetric -
self.optimization_params['target_over_score']))
if (self.optimization_params['target_under_score'] > 0 and
underMetric >= self.optimization_params['target_under_score']):
underMetric = (self.optimization_params['target_under_score'] +
leakFactor *
(underMetric -
self.optimization_params['target_under_score']))
penalty = -(overMetric + underMetric)
return penalty
def diagnose(self):
""" Returns diagnostic plots to assess the most recent correction.
If a correction has not yet been fitted, a ``ValueError`` will be raised.
Returns
-------
`~matplotlib.axes.Axes`
The matplotlib axes object.
"""
axs = self._diagnostic_plot()
plt.title('Alpha = {0:2.3e}'.format(self.alpha))
return axs
def goodness_metric_scan_plot(self, cbv_type=['SingleScale'],
cbv_indices=[np.arange(1,9)], alpha_range_log10=[-4, 4],
ext_dm=None, cadence_mask=None):
""" Returns a diagnostic plot of the over and under goodness metrics as a
function of the L2-Norm regularization term, alpha.
alpha is scanned by default to the range 10^-4 : 10^4 in logspace
cbvCorrector.correct_gaussian_prior is used to make the correction for
each alpha. Then the over and under goodness metric are computed.
If a correction has already been performed (via one of the correct_*
methods) then the used alpha value is also plotted for reference.
Parameters
----------
cbv_type : str list
List of CBV types to use in correction {'ALL' => Use all}
cbv_indices : list of lists
List of CBV vectors to use in each of cbv_type passed. {'ALL' => Use all}
NOTE: 1-Based indexing!
alpha_range_log10 : [list of two] The start and end exponent for the logspace scan.
Default = [-4, 4]
ext_dm : `.DesignMatrix` or `.DesignMatrixCollection`
Optionally pass an extra design matrix to also be used in the fit
cadence_mask : np.ndarray of bools (optional)
Mask, where True indicates a cadence that should be used.
Returns
-------
`~matplotlib.axes.Axes`
The matplotlib axes object.
"""
alphaArray = np.logspace(alpha_range_log10[0], alpha_range_log10[1], num=100)
# We need to make a copy of self so that the scan's final fit parameters
# do not over-write any stored fit parameters
cbvCorrectorCopy = self.copy()
# Compute both metrics vs. alpha
overMetric = []
underMetric = []
for thisAlpha in alphaArray:
cbvCorrectorCopy.correct_gaussian_prior(cbv_type=cbv_type, cbv_indices=cbv_indices,
alpha=thisAlpha, ext_dm=ext_dm,
cadence_mask=cadence_mask)
overMetric.append(cbvCorrectorCopy.over_fitting_metric(n_samples=1))
underMetric.append(cbvCorrectorCopy.under_fitting_metric())
# plot both
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.semilogx(alphaArray, underMetric, 'b.', label='UnderFit')
ax.semilogx(alphaArray, overMetric, 'r.', label='OverFit')
if (isinstance(self.alpha, float)):
ax.semilogx([self.alpha, self.alpha], [0, 1.0], 'k-',
label='corrected_lc Alpha = {0:2.3e}'.format(self.alpha))
plt.title('Goodness Metrics vs. L2-Norm Penalty (alpha)')
plt.xlabel('Regularization Factor Alpha')
plt.ylabel('Goodness Metric')
ax.grid(':', alpha=0.3)
ax.legend()
return ax
def copy(self):
"""Returns a copy of this `cbvCorrector` object.
This method uses Python's `copy.deepcopy` function to ensure that all
objects stored within the cbvCorrector instance are fully copied.
Returns
-------
cbvCorrector_copy : `cbvCorrector`
A new object which is a copy of the original.
"""
return copy.deepcopy(self)
def __repr__(self):
""" This will print all attributes of the class kinda like in
self.__dict__
"""
dictionary = self.__dict__.copy()
dictionary['lc'] = '<{} targetid={} length={}>'.format(type(self.lc),
self.lc.targetid, len(self.lc))
if self.corrected_lc is not None:
dictionary['corrected_lc'] = '<{} targetid={} length={}>'.format(
type(self.corrected_lc), self.corrected_lc.targetid,
len(self.corrected_lc))
dict_string = '\n'
for key in dictionary.keys():
dict_string += '\t{} = {}\n'.format(key, dictionary[key])
return dict_string
#*******************************************************************************
#*******************************************************************************
#*******************************************************************************
# Cotrending Basis Vectors Classes and Functions
#*******************************************************************************
#*******************************************************************************
#*******************************************************************************
class CotrendingBasisVectors(TimeSeries):
"""
Defines a CotrendingBasisVectors class, which is the Superclass for
KeplerCotrendingBasisVectors and TessCotrendingBasisVectors.
Normally, one would use these latter classes instead of instantiating
CotrendingBasisVectors directly. However, for generating custom CBVs one can
use this super class.
Stores Cotrending Basis Vectors for the Kepler/K2/TESS missions.
Each CotrendingBasisVectors object contains only ONE set of CBVs.
Instantiate multiple objects to store multiple set of CBVS, for example, to
save each of the three multi-scale bands in TESS.
CotrendingBasisVectors calls the standard __init__ from
astropy.timeseries.TimeSeries
Parameters
----------
data : `~astropy.table.Table`
Data to initialize CotrendingBasisVectors. The
CBVs should be in columns called ``'CADENCENO'``, ``'GAP'``, ``'VECTOR_1'``,
``'VECTOR_2'``, ... ``'VECTOR_N'``
If 'GAP' is not given then it is filled with all False.
If 'CADENCENO' is not given then it is filled with np.arange(nCadences)
time : `~astropy.time.Time`
Time values.
**kwargs : dict
Additional keyword arguments are passed to `~astropy.table.QTable`.
Attributes
----------
cadenceno : int array-like
Cadence indices
time : flaot array-like
CBV cadence times
gap_indicators : bool array-like
True => cadence is gapped
cbv_indices : list int-like
List of CBV indices available
1-based indexing
['VECTOR_#'] : astropy.table.column.Column
CBV number #
"""
#***
def __init__(self, data=None, time=None, **kwargs):
# Add some columns if not existant
if data is not None:
if not 'GAP' in data.colnames:
data['GAP'] = np.full(data[data.colnames[0]].size, False)
if not 'CADENCENO' in data.colnames:
data['CADENCENO'] = np.arange(data[data.colnames[0]].size)
# Initialize the astropy.timeseries.TimeSeries attributes
super().__init__(data=data, time=time, **kwargs)
# Ensure all columns are Quantity objects
for col in self.columns:
if not isinstance(self[col], (Quantity, Time)):
self.replace_column(col, Quantity(self[col], dtype=self[col].dtype))
# cbv_indices are always determined by the 'VECTOR_#' columns in the
# TimeSeries
@property
def cbv_indices(self):
cbv_indices = []
for name in self.colnames:
if name.find('VECTOR_') > -1:
cbv_indices.append(int(name[7:]))
return cbv_indices
@property
def time(self):
"""The time values."""
return self['time']
@time.setter
def time(self, time):
self['time'] = time
@property
def gap_indicators(self):
return self['GAP']
@gap_indicators.setter
def gap_indicators(self, gap_indicators):
self['GAP'] = gap_indicators
@property
def cadenceno(self):
return self['CADENCENO']
@cadenceno.setter
def cadenceno(self, cadenceno):
self['CADENCENO'] = cadenceno
def to_designmatrix(self, cbv_indices='all', name='CBVs'):
"""Returns a `DesignMatrix` where the columns are the
requested CBVs.
Parameters
----------
cbv_indices : list of ints
List of CBV vectors to use. 1-based indexing!
{'all' => Use all}
name : str
A Name for the DesignMatrix
Returns
-------
design_matrix : designmatrix.DesignMatrix
"""
if isinstance(cbv_indices, str) and not cbv_indices == 'all':
raise ValueError('cbv_indices must either be list of ints or "all"')
elif not isinstance(cbv_indices, str) and 0 in cbv_indices:
raise ValueError("CBVs use 1-based indexing. Do not request CBV index '0'")
if (isinstance(cbv_indices, str) and (cbv_indices == 'all')):
cbv_indices = self.cbv_indices
cbv_names = []
cbv_matrix = np.array([])
for idx in cbv_indices:
# Check that the CBV index is available
if idx in self.cbv_indices:
# If so, append it as a column to the matrix
if len(cbv_matrix) == 0:
cbv_matrix = np.array(self['VECTOR_{}'.format(idx)])[...,None]
else:
cbv_matrix = np.hstack((cbv_matrix,
np.array(self['VECTOR_{}'.format(idx)])[...,None]))
cbv_names.append('VECTOR_{}'.format(idx))
return DesignMatrix(cbv_matrix, columns=cbv_names, name=name)
def plot(self, cbv_indices='all', ax=None, **kwargs):
"""Plots the requested CBVs evenly spaced out vertically for legibility.
Does not plot gapped cadences
Parameters
----------
cbv_indices : list of ints
The list of cotrending basis vectors to plot. For example:
[1, 2] will fit the first two basis vectors. 'all' => plot all
NOTE: 1-based indexing
ax : matplotlib.pyplot.Axes.AxesSubplot
Matplotlib axis object. If `None`, one will be generated.
kwargs : dict
Dictionary of arguments to be passed to `matplotlib.pyplot.plot`.
Returns
-------
ax : matplotlib.pyplot.Axes.AxesSubplot
Matplotlib axis object
"""
if isinstance(cbv_indices, str) and not cbv_indices == 'all':
raise ValueError('cbv_indices must either be list of ints or "all"')
elif not isinstance(cbv_indices, str) and 0 in cbv_indices:
raise ValueError("CBVs use 1-based indexing. Do not request CBV index '0'")
with plt.style.context(MPLSTYLE):
if (isinstance(cbv_indices, str) and (cbv_indices == 'all')):
cbv_indices = []
for name in self.colnames:
if name.find('VECTOR_') > -1:
cbv_indices.append(int(name[7:]))
cbv_designmatrix = self.to_designmatrix(cbv_indices)
if ax is None:
_, ax = plt.subplots(1)
# Plot gaps as NaN
# time array is a Masked array so need to fill masks with nans
timeArray = self.time.copy().value
if isinstance(timeArray, (Masked, np.ma.MaskedArray)):
if np.issubdtype(timeArray.dtype, np.int_):
timeArray = timeArray.astype(float)
timeArray = timeArray.filled(np.nan)
timeArray[np.nonzero(self.gap_indicators)[0]] = np.nan
# Get the CBV arrays that were requested
for idx, cbv_name in enumerate(cbv_designmatrix.columns):
cbvIndex = cbv_name[7:]
cbv = cbv_designmatrix[cbv_name]
# Plot gaps as NaN
cbv[np.nonzero(self.gap_indicators)[0]] = np.nan
ax.plot(timeArray, cbv-idx/10., label='{}'.format(cbvIndex), **kwargs)
ax.set_yticks([])
ax.set_xlabel('Time [{}]'.format(self['time'].format))
if hasattr(self, 'mission'):
if self.mission == 'Kepler':
ax.set_title('Kepler CBVs (Quarter.Module.Output : {}.{}.{})'
''.format(self.quarter, self.module, self.output),
fontdict={'fontsize': 10})
elif self.mission == 'K2':
ax.set_title('K2 CBVs (Campaign.Module.Output : {}.{}.{})'
''.format( self.campaign, self.module, self.output),
fontdict={'fontsize': 10})
elif self.mission == 'TESS':
if (self.cbv_type == 'MultiScale'):
ax.set_title('TESS CBVs (Sector.Camera.CCD : {}.{}.{}, CBVType.Band : {}.{})'
''.format(self.sector, self.camera, self.ccd, self.cbv_type, self.band),
fontdict={'fontsize': 9})
else:
ax.set_title('TESS CBVs (Sector.Camera.CCD : {}.{}.{}, CBVType : {})'
''.format(self.sector, self.camera, self.ccd, self.cbv_type),
fontdict={'fontsize': 10})
else:
# This is a generic CotrendingBasisVectors object
ax.set_title('CBVs', fontdict={'fontsize': 10})
ax.grid(':', alpha=0.3)
ax.legend(fontsize='small', ncol=2)
return ax
def align(self, lc):
"""Aligns the CBVs to a light curve. The lightCurve object might not
have the same cadences as the CBVs. This will trim the CBVs to be
aligned with the light curve.
This method will use the cadence number (lc.cadenceno) to
perform the synchronization. Only cadence numbers that exist in both
the CBVs and the light curve will have values in the returned CBVs. All
cadence numbers that exist in the light curve but not in the CBVs will
have NaNs returned for the CBVs on those cadences and the GAP set to
True.
Any cadences in the CBVs not in the light curve will be removed from the CBVs.
The returned cbvs object is sorted by cadenceno.
If you wish to interpolate the CBVs to arbitrary light curve cadence
times then use the interpolate method.
Parameters
----------
lc : LightCurve object
The reference light curve to align to
Returns
-------
cbvs : CotrendingBasisVectors object
Aligned to the light curve
"""
# The fraction of cadences that do not align to throw a
# warning about the CBVs being poorly aligned to the light curve
poorly_aligned_threshold = 0.5
poorly_aligned_flag = False
if not isinstance(lc, LightCurve):
raise Exception('<lc> must be a LightCurve class')
if hasattr(lc, 'cadenceno'):
# Make a deepcopy so we do not just return a modified original
cbvs = copy.deepcopy(self)
# NaN any CBV cadences that are in the light curve and not in CBVs
# This requires us to add rows to the CBV table
lc_nan_mask = np.logical_not(np.in1d(lc.cadenceno, cbvs.cadenceno))
# Determine if the CBVs are poorly aligned to the light curve
if ((np.count_nonzero(lc_nan_mask) / len(lc_nan_mask)) >
poorly_aligned_threshold):
poorly_aligned_flag = True
lc_nan_indices = np.nonzero(lc_nan_mask)[0]
# Sadly, there is no TimesSeries.add_rows (plural), so we have to
# add each row in a for-loop
if len(lc_nan_indices) > 0:
for idx in lc_nan_indices:
dict_to_add = {}
dict_to_add['time'] = lc.time[idx]
dict_to_add['CADENCENO'] = lc.cadenceno[idx]
dict_to_add['GAP'] = True
for cbvIdx in cbvs.cbv_indices:
dict_to_add['VECTOR_{}'.format(cbvIdx)] = np.nan
cbvs.add_row(dict_to_add)
# There appears to be a bug in astropy.timeseries when using ts[x:y]
# in combination with ts.remove_row() or ts.remove_rows.
# See LightKurve Issue #836.
# To get around the error for now, we will attempt to use
# ts[x:y]. If it errors out then revert to remove_rows, which is
# REALLY slow.
try:
# This method is fast but might cause errors
keep_indices = np.nonzero(np.in1d(cbvs.cadenceno, lc.cadenceno))[0]
# Determine if the CBVs are poorly aligned to the light curve
if (len(keep_indices) / len(cbvs)) < poorly_aligned_threshold:
poorly_aligned_flag = True
cbvs = cbvs[keep_indices]
except:
# This method is slow but appears to be more robust
trim_indices = np.nonzero(np.logical_not(
np.in1d(cbvs.cadenceno, lc.cadenceno)))[0]
# Determine if the CBVs are poorly aligned to the light curve
if (len(trim_indices) / len(cbvs)) > poorly_aligned_threshold:
poorly_aligned_flag = True
cbvs.remove_rows(trim_indices)
# Now sort the CBVs by cadenceno
cbvs.sort('CADENCENO')
else:
raise Exception('align requires cadence numbers for the ' + \
'light curve. NO SYNCHRONIZATION OCCURED')
# Only issue this warning once
if poorly_aligned_flag:
log.warning('The {} CBVs do not appear to be well aligned to the '
'light curve. Consider using "interpolate_cbvs=True"'.format(cbvs.cbv_type))
return cbvs
def interpolate(self, lc, extrapolate=False):
"""Interpolates the CBV to the cadence times in the given light curve
using Piecewise Cubic Hermite Interpolating Polynomial (PCHIP).
Uses scipy.interpolate.PchipInterpolator
Each CBV is interpolated independently. All gaps are set to False.
The cadence numbers are taken from the light curve.
Parameters
----------
lc : LightCurve object
The reference light curve cadence times to interpolate to
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
cbvs_interpolated: CotrendingBasisVectors object
interpolated to the light curve cadence times
"""
if not isinstance(lc, LightCurve):
raise Exception('<lc> must be a LightCurve class')
# If not extrapolating then check if extrapolation is necessary.
# If so, throw a warning
if extrapolate==False:
gapRemovedCBVtime = self.time.value[np.logical_not(self.gap_indicators.value)]
if (np.min(lc.time.value) < np.min(gapRemovedCBVtime) or
np.max(lc.time.value) > np.max(gapRemovedCBVtime) ):
log.warning('Extrapolation of CBVs appears to be necessary. '
'Extrapolated values will be filled with zeros. '
'Recommend setting extrapolate=True')
# Create the new cbv object with no basis vectors, yet...
cbvNewTime = lc.time.copy()
# Gaps are all false
gaps = np.full(len(lc.time), False)
dataTbl = Table([lc.cadenceno, gaps], names=('CADENCENO', 'GAP'))
# We are PCHIP interpolating each CBV independently.
# Do not include gaps when interpolating
warning_posted = False
for idx in self.cbv_indices:
fInterp = PchipInterpolator(
self.time.value[np.logical_not(self.gap_indicators.value)],
self['VECTOR_{}'.format(idx)][np.logical_not(self.gap_indicators.value)],
extrapolate=extrapolate)
dataTbl['VECTOR_{}'.format(idx)] = fInterp(lc.time.value)
# Replace NaNs with 0.0
if (np.any(np.isnan(dataTbl['VECTOR_{}'.format(idx)]))):
dataTbl['VECTOR_{}'.format(idx)][np.isnan(dataTbl['VECTOR_{}'.format(idx)])] = \
np.full(np.count_nonzero(np.isnan(dataTbl['VECTOR_{}'.format(idx)])), 0.0)
# Only post this warning once
if (not warning_posted):
log.warning('Some interpolated (or extrapolated) CBV values have been set to zero')
warning_posted = True
dataTbl.meta = self.meta.copy()
# We need to return a new CotrendingBasisVectors class. Make sure we
# instantiate the correct type.
if isinstance(self, KeplerCotrendingBasisVectors):
return KeplerCotrendingBasisVectors(data=dataTbl, time=cbvNewTime)
elif isinstance(self, TessCotrendingBasisVectors):
return TessCotrendingBasisVectors(data=dataTbl, time=cbvNewTime)
else:
return CotrendingBasisVectors(data=dataTbl, time=cbvNewTime)
class KeplerCotrendingBasisVectors(CotrendingBasisVectors):
"""Sub-class for Kepler/K2 cotrending basis vectors
See CotrendingBasisVectors for class details
Attributes
----------
CotrendingBasisVectors attributes
astropy.timeseries.TimeSeries attributes
mission : [str] ('Kepler', 'K2')
cbv_type : [str] always 'SingleScale'
quarter : [int] Kepler Quarter
campaign : [int] K2 Campaign
module : [int] Kepler instrument CCD module
output : [int] Kepler instrument CCD output
"""
#***
validMissionOptions = ('Kepler', 'K2')
validCBVTypes = ('SingleScale')
#***
def __init__(self, data=None, time=None, **kwargs):
"""Initiates a KeplerCotrendingBasisVectors object.
Normally one would use KeplerCotrendingBasisVectors.from_hdu to
automatically set up the object. However, for certain functionality
one must instantiate the object directly.
"""
# Initialize attributes common to all CotrendingBasisVector classes
super(KeplerCotrendingBasisVectors, self).__init__(data=data,
time=time, **kwargs)
@classmethod
def from_hdu(self, hdu=None, module=None, output=None,
**kwargs):
"""Class method to instantiate a KeplerCotrendingBasisVectors object
from a CBV FITS HDU.
Kepler/K2 CBVs are all in the same FITS file for each quarter/campaign,
so, when instantiating the CBV object we must specify which module and
output we desire. Only Single-Scale CBVs are stored for Kepler.
Parameters
----------
hdu : astropy.io.fits.hdu.hdulist.HDUList
A pyfits opened FITS file containing the CBVs
module : int
Kepler CCD module 2 - 84
output : int
Kepler CCD output 1 - 4
**kwargs : Optional arguments
Passed to the TimeSeries superclass
"""
assert module > 1 and module < 85, 'Invalid module number'
assert output > 0 and output < 5, 'Invalid output number'
# Get the mission: Kepler or K2
# Sadly, the HDU does not explicitly say if this is Kepler or K2 CBVs.
if 'QUARTER' in hdu['PRIMARY'].header:
mission = 'Kepler'
elif 'CAMPAIGN' in hdu['PRIMARY'].header:
mission = 'K2'
else:
raise Exception('This does not appear to be a Kepler or K2 FITS HDU')
extName = 'MODOUT_{0}_{1}'.format(module, output)
try:
# Read the columns and meta data
with warnings.catch_warnings():
# By default, AstroPy emits noisy warnings about units commonly used
# in archived TESS data products (e.g., "e-/s" and "pixels").
# We ignore them here because they don't affect Lightkurve's features.
# Inconsistencies between TESS data products and the FITS standard
# out to be addressed at the archive level. (See issue #1216.)
warnings.simplefilter("ignore", category=UnitsWarning)
dataTbl = Table.read(hdu[extName], format="fits")
dataTbl.meta.update(hdu[0].header)
dataTbl.meta.update(hdu[extName].header)
# TimeSeries-based objects require a dedicated time column
# Replace NaNs with default time '2000-01-01', otherwise,
# astropy.time.Time complains
nanHere = np.nonzero(np.isnan(dataTbl['TIME_MJD'].data))[0]
timeData = dataTbl['TIME_MJD'].data
timeData[nanHere] = Time(['2000-01-01'], scale='utc').mjd
cbvTime = Time(timeData, format='mjd', scale='utc')
dataTbl.remove_column('TIME_MJD')
# Gaps are labelled as 'GAPFLAG' so rename!
dataTbl['GAP'] = dataTbl['GAPFLAG']
dataTbl.remove_column('GAPFLAG')
dataTbl.meta['MISSION'] = mission
dataTbl.meta['CBV_TYPE'] = 'SingleScale'
except:
dataTbl = None
cbvTime = None
# Here we instantiate the actual object
return self(data=dataTbl, time=cbvTime, **kwargs)
@property
def mission(self):
return self.meta.get('MISSION', None)
@mission.setter
def mission(self, mission):
self.meta['MISSION'] = mission
@property
def cbv_type(self):
return self.meta.get('CBV_TYPE', None)
@cbv_type.setter
def cbv_type(self, cbv_type):
self.meta['CBV_TYPE'] = cbv_type
@property
def quarter(self):
return self.meta.get('QUARTER', None)
@quarter.setter
def quarter(self, quarter):
if (self.mission == 'Kepler'):
self.meta['QUARTER'] = quarter
else:
pass
@property
def campaign(self):
return self.meta.get('CAMPAIGN', None)
@campaign.setter
def campaign(self, campaign):
if (self.mission == 'K2'):
self.meta['CAMPAIGN'] = campaign
else:
pass
@property
def module(self):
return self.meta.get('MODULE', None)
@module.setter
def module(self, module):
self.meta['MODULE'] = module
@property
def output(self):
return self.meta.get('OUTPUT', None)
@output.setter
def output(self, output):
self.meta['OUTPUT'] = output
def __repr__(self):
if self.mission == 'Kepler':
repr_string = 'Kepler CBVs, Quarter.Module.Output : {}.{}.{}, nCBVs : {}'\
''.format(self.quarter, self.module, self.output, len(self.cbv_indices))
elif self.mission == 'K2':
repr_string = 'K2 CBVs, Campaign.Module.Output : {}.{}.{}, nCBVs : {}'\
''.format( self.campaign, self.module, self.output, len(self.cbv_indices))
return repr_string
class TessCotrendingBasisVectors(CotrendingBasisVectors):
""" Sub-class for TESS cotrending basis vectors
See CotrendingBasisVectors for class details
Attributes
----------
CotrendingBasisVectors attributes
astropy.timeseries.TimeSeries attributes
mission : [str] ('TESS')
cbv_type : [str ('SingleScale', 'MultiScale', 'Spike')
sector : [int] TESS Sector
camera : [int] TESS Camera Index
ccd : [int] TESS CCD Index
band : [int] MultiScale band number (invalid for other CBV types)
"""
validMissionOptions = ('TESS')
validCBVTypes = ('SingleScale', 'MultiScale', 'Spike')
def __init__(self, data=None, time=None, **kwargs):
"""Initiates a TessCotrendingBasisVectors object.
Normally one would use TessCotrendingBasisVectors.from_hdu to
automatically set up the object. However, for certain functionaility
one must instantiate the object directly.
"""
# Initialize attributes common to all CotrendingBasisVector classes
super(TessCotrendingBasisVectors, self).__init__(data=data,
time=time, **kwargs)
@classmethod
def from_hdu(self, hdu=None, cbv_type=None, band=None, **kwargs):
"""Class method to instantiate a TessCotrendingBasisVectors object
from a CBV FITS HDU.
TESS CBVs are in separate FITS files for each camera.CCD, so camera.CCD
is already specified in the HDU, here we need to specify
which CBV type and band is desired.
If the requested CBV type does not exist in the HDU then None is
returned
Parameters
----------
hdu : astropy.io.fits.hdu.hdulist.HDUList
A pyfits opened FITS file containing the CBVs
cbv_type : str
'SingleScale', 'MultiScale' or 'Spike'
band : int
Band number for 'MultiScale' CBVs
Ignored for 'SingleScale' or 'Spike'
**kwargs : Optional arguments
Passed to the TimeSeries superclass
"""
mission = hdu['PRIMARY'].header['TELESCOP']
assert mission == 'TESS', 'This does not appear to be a TESS FITS HDU'
# Check if a valid cbv_type and band was passed
if not cbv_type in self.validCBVTypes:
raise ValueError('Invalid cbv_type')
if band is not None and band < 1:
raise ValueError('Invalid band')
# Get the requested cbv_type
# Curiosly, camera and CCD are not in the primary header!
camera = hdu[1].header['CAMERA']
ccd = hdu[1].header['CCD']
switcher = {
'SingleScale': 'CBV.single-scale.{}.{}'.format(camera, ccd),
'MultiScale': 'CBV.multiscale-band-{}.{}.{}'.format(band,
camera, ccd),
'Spike': 'CBV.spike.{}.{}'.format(camera, ccd),
'unknown': 'error'
}
extName = switcher.get(cbv_type, switcher['unknown'])
if (extName == 'error'):
raise Exception('Invalide cbv_type')
try:
# Read the columns and meta data
with warnings.catch_warnings():
# By default, AstroPy emits noisy warnings about units commonly used
# in archived TESS data products (e.g., "e-/s" and "pixels").
# We ignore them here because they don't affect Lightkurve's features.
# Inconsistencies between TESS data products and the FITS standard
# out to be addressed at the archive level. (See issue #1216.)
warnings.simplefilter("ignore", category=UnitsWarning)
dataTbl = Table.read(hdu[extName], format="fits")
dataTbl.meta.update(hdu[0].header)
dataTbl.meta.update(hdu[extName].header)
# TimeSeries-based objects require a dedicated time column
# Replace NaNs with default time '2000-01-01', otherwise,
# astropy.time.Time complains
nanHere = np.nonzero(np.isnan(dataTbl['TIME'].data))[0]
timeData = dataTbl['TIME'].data
timeData[nanHere] = Time(['2000-01-01'], scale='tdb').mjd
cbvTime = Time(timeData, format='btjd', scale='tdb')
dataTbl.remove_column('TIME')
dataTbl.meta['MISSION'] = 'TESS'
dataTbl.meta['CBV_TYPE'] = cbv_type
dataTbl.meta['BAND'] = band
except:
dataTbl = None
cbvTime = None
# Here we instantiate the actual object
return self(data=dataTbl, time=cbvTime, **kwargs)
@property
def mission(self):
return self.meta.get('MISSION', None)
@mission.setter
def mission(self, mission):
self.meta['MISSION'] = mission
@property
def cbv_type(self):
return self.meta.get('CBV_TYPE', None)
@cbv_type.setter
def cbv_type(self, cbv_type):
self.meta['CBV_TYPE'] = cbv_type
@property
def band(self):
return self.meta.get('BAND', None)
@band.setter
def band(self, band):
self.meta['BAND'] = band
@property
def sector(self):
return self.meta.get('SECTOR', None)
@sector.setter
def sector(self, sector):
self.meta['SECTOR'] = sector
@property
def camera(self):
return self.meta.get('CAMERA', None)
@camera.setter
def camera(self, camera):
self.meta['CAMERA'] = camera
@property
def ccd(self):
return self.meta.get('CCD', None)
@ccd.setter
def ccd(self, ccd):
self.meta['CCD'] = ccd
def __repr__(self):
if (self.cbv_type == 'MultiScale'):
repr_string = 'TESS CBVs, Sector.Camera.CCD : {}.{}.{}, CBVType.Band: {}.{}, nCBVs : {}' \
''.format(self.sector, self.camera, self.ccd, self.cbv_type,
self.band, len(self.cbv_indices))
else:
repr_string = 'TESS CBVs, Sector.Camera.CCD : {}.{}.{}, CBVType : {}, nCBVS : {}'\
''.format(self.sector, self.camera, self.ccd, self.cbv_type, len(self.cbv_indices))
return repr_string
#*******************************************************************************
# Functions
@deprecated("2.1", alternative="load_kepler_cbvs", warning_type=LightkurveDeprecationWarning)
def download_kepler_cbvs(*args, **kwargs):
return load_kepler_cbvs(*args, **kwargs)
def load_kepler_cbvs(cbv_dir=None,mission=None, quarter=None, campaign=None,
channel=None, module=None, output=None):
"""Loads Kepler or K2 cotrending basis vectors, either from a local directory cbv_dir
or searches the public data archive at MAST <https://archive.stsci.edu>.
This function fetches the Cotrending Basis Vectors FITS HDU for the desired
mission, quarter/campaign and channel or module/output, etc...
and then extracts the requested basis vectors and returns a
KeplerCotrendingBasisVectors object
For Kepler/K2, the FITS files contain all channels in a single file per
quarter/campaign.
For Kepler this extracts the DR25 CBVs.
Parameters
----------
cbv_dir : str
Path to specific directory holding Kepler CBVs. If None, queries MAST.
mission : str, list of str
'Kepler' or 'K2'
quarter or campaign : int
Kepler Quarter or K2 Campaign.
channel or (module and output) : int
Kepler/K2 requested channel or module and output.
Must provide either channel, or module and output,
but not both.
Returns
-------
result : :class:`KeplerCotrendingBasisVectors` object
Examples
--------
This example will read in the CBVs for Kepler quarter 8,
and then extract the first 8 CBVs for module.output 16.4
>>> cbvs = load_kepler_cbvs(mission='Kepler', quarter=8, module=16, output=4) # doctest: +SKIP
"""
#***
# Validate inputs
# Make sure only the appropriate arguments are passed
if (mission == 'Kepler'):
assert isinstance(quarter, int), 'quarter must be passed for Kepler mission'
assert campaign is None, 'campaign must not be passed for Kepler mission'
elif (mission == 'K2'):
assert isinstance(campaign, int), 'campaign must be passed for K2 mission'
assert quarter is None, 'quarter must not be passed for K2 mission'
else:
raise ValueError('Unknown mission type')
# CBV FITS files use module/output, not channel
# So if channel is passed, convert to module/output
if (isinstance(channel, int)):
assert module is None, 'module must NOT be passed if channel is passed'
assert output is None, 'output must NOT be passed if channel is passed'
module, output = channel_to_module_output(channel)
channel = None
else:
assert module is not None, 'module must be passed'
assert output is not None, 'output must be passed'
if cbv_dir:
cbvBaseUrl = ""
elif (mission == 'Kepler'):
cbvBaseUrl = "http://archive.stsci.edu/missions/kepler/cbv/"
elif (mission == 'K2'):
cbvBaseUrl = "http://archive.stsci.edu/missions/k2/cbv/"
try:
kepler_cbv_fname = None
if cbv_dir:
cbv_files = glob.glob(os.path.join(cbv_dir,'*.fits'))
else:
soup = BeautifulSoup(requests.get(cbvBaseUrl).text, 'html.parser')
cbv_files = [fn['href'] for fn in soup.find_all('a') if fn['href'].endswith('fits')]
if mission == 'Kepler':
quarter = 'q{:02}'.format(quarter)
for cbv_file in cbv_files:
if quarter + '-d25' in cbv_file:
break
elif mission == 'K2':
campaign = 'c{:02}'.format(campaign)
for cbv_file in cbv_files:
if campaign in cbv_file:
break
kepler_cbv_fname = cbvBaseUrl + cbv_file
hdu = pyfits.open(kepler_cbv_fname)
return KeplerCotrendingBasisVectors.from_hdu(hdu=hdu, module=module, output=output)
except Exception as e:
raise Exception('CBVS were not found') from e
@deprecated("2.1", alternative="load_tess_cbvs", warning_type=LightkurveDeprecationWarning)
def download_tess_cbvs(*args, **kwargs):
return load_tess_cbvs(*args, **kwargs)
def load_tess_cbvs(cbv_dir=None,sector=None, camera=None,
ccd=None, cbv_type='SingleScale', band=None):
"""Loads TESS cotrending basis vectors, either from a directory of
CBV files already saved locally if cbv_dir is passed, or else
will retrieve the relevant files programmatically from MAST.
This function fetches the Cotrending Basis Vectors FITS HDU for the desired
cotrending basis vectors.
For TESS, each CCD CBVs are stored in a separate FITS files.
For now, this function will only load 2-minute cadence CBVs. Once other
cadence CBVs become available this function will be updated to support
their downloads.
Parameters
----------
cbv_dir : str
Path to specific directory holding TESS CBVs. If None, queries MAST.
sector : int, list of ints
TESS Sector number.
camera and ccd : int
TESS camera and CCD
cbv_type : str
'SingleScale' or 'MultiScale' or 'Spike'
band : int
Multi-scale band number
Returns
-------
result : :class:`TessCotrendingBasisVectors` object
Examples
--------
This example will load presaved CBVs from directory '.' for TESS Sector 10 Camera.CCD 2.4
Multi-Scale band 2
>>> cbvs = load_tess_cbvs('.',sector=10, camera=2, ccd=4, # doctest: +SKIP
>>> cbv_type='MultiScale', band=2) # doctest: +SKIP
"""
# The easiest way to obtain a link to the CBV file for a TESS Sector and
# camera.CCD is
#
# 1. Download the bulk download curl script (with a predictable url) for the
# desired sector and search it for the camera.CCD needed
# 2. Download the CBV FITS file based on the link in the curl script
#
# The bulk download curl links have urls such as:
#
# https://archive.stsci.edu/missions/tess/download_scripts/sector/tesscurl_sector_17_cbv.sh
#
# Then the individual CBV files found in the curl file have urls such as:
#
# https://archive.stsci.edu/missions/tess/ffi/s0017/2019/279/1-1/tess2019279210107-s0017-1-1-0161-s_cbv.fits
#***
# Validate inputs
# Make sure only the appropriate arguments are passed
assert isinstance(sector, int), 'sector must be passed for TESS mission'
assert isinstance(camera, int), 'camera must be passed'
assert isinstance(ccd, int), 'CCD must be passed'
if cbv_type == 'MultiScale':
assert isinstance(band, int), 'band must be passed for multi-scale CBVs'
else:
assert band is None, 'band must NOT be passed for single-scale or spike CBVs'
# This is the string to search for in the curl script file
# Pad the sector number with a first '0' if less than 10
# TODO: figure out a way to pad an integer number with forward zeros
# without needing a conditional
sector = int(sector)
try:
SearchString = 's%04d-%s-%s-' % (sector, str(camera),str(ccd))
except:
raise Exception('Error parsing sector string when getting TESS CBV FITS files')
try:
if cbv_dir is not None:
# Read in the relevant curl script file and find the line for the CBV
# data we are looking for
data = glob.glob(os.path.join(cbv_dir,'*.fits'))
fname = None
for line in data:
strLine = str(line)
if SearchString in strLine:
fname = strLine
break
if (fname is None):
raise Exception('CBV FITS file not found')
# Extract url from strLine
hdu = pyfits.open(fname)
else:
curlBaseUrl = 'https://archive.stsci.edu/missions/tess/download_scripts/sector/tesscurl_sector_'
curlEndUrl = '_cbv.sh'
curlUrl = curlBaseUrl + str(sector) + curlEndUrl
# This is the string to search for in the curl script file
# Read in the relevant curl script file and find the line for the CBV
# data we are looking for
data = urllib.request.urlopen(curlUrl)
foundIndex = None
for line in data:
strLine = str(line)
if SearchString in strLine:
foundIndex = strLine.index(SearchString)
break
if (foundIndex is None):
raise Exception('CBV FITS file not found')
# Extract url from strLine
htmlStartIndex = strLine.find('https:')
htmlEndIndex = strLine.rfind('fits')
# Add 4 for length of 'fits' string
tess_cbv_url = strLine[htmlStartIndex:htmlEndIndex+4]
hdu = pyfits.open(tess_cbv_url)
# Check that this is a TESS CBV FITS file
mission = hdu['Primary'].header['TELESCOP']
validate_method(mission, ['tess'])
return TessCotrendingBasisVectors.from_hdu(hdu=hdu, cbv_type=cbv_type, band=band)
except:
raise Exception('CBVS were not found')
| 79,354
| 39.281726
| 112
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/correctors/metrics.py
|
"""Metrics to assess under- and over-fitting of systematic noise.
This module provides two metrics, `overfit_metric_lombscargle` and `underfit_metric_neighbors`,
which enable users to assess whether the noise in a systematics-corrected light curve has been
under- or over-fitted. These features were contributed by Jeff Smith (cf. https://github.com/lightkurve/lightkurve/pull/855)
and are in turn inspired by similar metrics in use by the PDC module of the official Kepler/TESS pipeline.
"""
import logging
import copy
import numpy as np
from scipy.interpolate import PchipInterpolator
from memoization import cached
from astropy import units as u
from .. import LightCurve
log = logging.getLogger(__name__)
def overfit_metric_lombscargle(
original_lc: LightCurve, corrected_lc: LightCurve, n_samples: int = 10
) -> float:
"""Uses a LombScarglePeriodogram to assess the change in broad-band
power in a corrected light curve to measure the degree of over-fitting.
The to_periodogram Lomb-Scargle method is used and the sampling band is
from one frequency separation to the Nyquist frequency
This over-fitting goodness metric is calibrated such that a metric
value of 0.5 means the introduced noise due to over-fitting is at the
same power level as the uncertainties in the light curve.
Parameters
----------
original_lc : LightCurve
Uncorrected light curve.
corrected_lc : LightCurve
Light curve from which systematics have been removed.
n_samples : int
The number of times to compute and average the metric
This can stabilize the value, default = 10
Returns
-------
overfit_metric : float
A float in the range [0,1] where 0 => Bad, 1 => Good
"""
# The fit can sometimes result in NaNs
# Also median normalize original and correctod LCs
orig_lc = original_lc.copy()
orig_lc = orig_lc.remove_nans().normalize()
orig_lc -= 1.0
corrected_lc = corrected_lc.copy()
corrected_lc = corrected_lc.remove_nans().normalize()
corrected_lc -= 1.0
if len(corrected_lc) == 0:
return 1.0
# Perform the measurement multiple times and average to stabilize the metric
metric_per_iter = []
for idx in np.arange(n_samples):
pgOrig = orig_lc.to_periodogram()
# Use the same periods in the corrected flux as just used in the
# original flux
pgCorrected = corrected_lc.to_periodogram(frequency=pgOrig.frequency)
# Get an estimate of the PSD at the uncertainties limit
# The raw and corrected uncertainties should be essentially identical so
# use the corrected
# TODO: the periodogram of WGN should be analytical to compute!
nNonGappedCadences = len(orig_lc)
meanCorrectedUncertainties = np.nanmean(corrected_lc.flux_err)
WGNCorrectedUncert = (
np.random.randn(nNonGappedCadences, 1) * meanCorrectedUncertainties
).T[0]
model_err = np.zeros(nNonGappedCadences)
noise_lc = LightCurve(
time=orig_lc.time, flux=WGNCorrectedUncert, flux_err=model_err
)
pgCorrectedUncert = noise_lc.to_periodogram()
meanCorrectedUncertPower = np.nanmean(np.array(pgCorrectedUncert.power))
# Compute the change in power
pgChange = np.array(pgCorrected.power) - np.array(pgOrig.power)
# Ignore nans
pgChange = pgChange[~np.isnan(pgChange)]
# If no increase in power in ANY bands then return a perfect loss
# function
if len(np.nonzero(pgChange > 0.0)[0]) == 0:
metric_per_iter.append(0.0)
else:
# We are only concerned with bands where the power increased so
# when(pgCorrected - pgOrig) > 0
# Normalize by the noise in the uncertainty
# We want the goodness to begin to degrade when the introduced
# noise is greater than the uncertainties.
# So, when Sigmoid > 0.5 (given twiceSigmoidInv defn.)
denominator = (
len(np.nonzero(pgChange > 0.0)[0])
) * meanCorrectedUncertPower
if denominator == 0:
# Suppress divide by zero warning
result = np.inf
else:
result = np.sum(pgChange[pgChange > 0.0]) / denominator
metric_per_iter.append(result)
metric = np.mean(metric_per_iter)
# We want the goodness to span (0,1]
# Use twice a reversed sigmoid to get a [0,1] range mapped from a [0,inf) range
def sigmoidInv(x):
return 2.0 / (1 + np.exp(x))
# Make sure maximum score is 1.0
metric = sigmoidInv(np.max([metric, 0.0]))
return metric
def underfit_metric_neighbors(
corrected_lc: LightCurve,
radius: float = 6000,
min_targets: int = 30,
max_targets: int = 50,
interpolate: bool = False,
extrapolate: bool = False,
):
"""This goodness metric measures the degree of under-fitting of the
CBVs to the light curve. It does so by measuring the mean residual target to
target Pearson correlation between the target under study and a selection of
neighboring SPOC SAP target light curves.
This function will search within the given radiu in arceseconds and find the
min_targets nearest targets up until max_targets is reached. If less than
min_targets is found a MinTargetsError Exception is raised.
The downloaded neighboring targets will normally be "aligned" to the
corrected_lc, meaning the cadence numbers are used to align the targets
to the corrected_lc. However, if interpolate=True then the targets will be
interpolated to the corrected_lc cadence times. extrapolate=True will
further extrapolate the targets to the corrected_lc cadence times.
The returned under-fitting goodness metric is callibrated such that a
value of 0.95 means the residual correlations in the target is
equivalent to chance correlations of White Gaussian Noise.
Parameters
----------
corrected_lc : LightCurve
Light curve from which systematics have been removed.
radius : float
Search radius to find neighboring targets in arcseconds
min_targets : int
Minimum number of targets to use in correlation metric
Using too few can cause unreliable results. Default = 30
max_targets : int
Maximum number of targets to use in correlation metric
Using too many can slow down the metric due to large data
download. Default = 50
interpolate : bool
If `True`, the flux values of the neighboring light curves will be
interpolated to match the times of the `corrected_lc`.
If `False`, the flux values will simply be aligned by time where possible.
Returns
-------
under_fitting_metric : float
A float in the range [0,1] where 0 => Bad, 1 => Good
"""
# Normalize and condition the corrected light curve
corrected_lc = corrected_lc.copy().remove_nans().normalize()
corrected_lc -= 1.0
corrected_lc_flux = corrected_lc.flux.value
# Download and pre-process neighboring light curves
lc_neighborhood, lc_neighborhood_flux = _download_and_preprocess_neighbors(
corrected_lc=corrected_lc,
radius=radius,
min_targets=min_targets,
max_targets=max_targets,
interpolate=interpolate,
extrapolate=extrapolate,
flux_column="sap_flux",
)
# Create fluxMatrix. The last entry is the target under study
# Check that all neighboring targets have similar shape
if not np.all([len(lc_neighborhood_flux[0]) == len(l) for l in lc_neighborhood_flux]):
raise Exception('Neighbroing targets do not all have the same shape')
fluxMatrix = np.zeros((len(lc_neighborhood_flux[0]), len(lc_neighborhood_flux) + 1))
for idx in np.arange(len(fluxMatrix[0, :]) - 1):
fluxMatrix[:, idx] = lc_neighborhood_flux[idx]
# Add in the trimmed target under study
fluxMatrix[:, -1] = corrected_lc_flux
# Ignore NaNs
mask = ~np.isnan(corrected_lc_flux)
fluxMatrix = fluxMatrix[mask, :]
# Determine the target-target correlation between target and
# neighborhood
correlationMatrix = _compute_correlation(fluxMatrix)
# The selection basis for targets used for the PDC-MAP SVD uses median
# absolute correlation per star. However, here we wish to overemphasize
# any residual correlation between a handfull of targets and not the
# overall correlation (which should almost always be low).
# We want a residual correlation larger than random correlations of WGN
# to mean a meaningful correlation. The median Pearson correlation of
# WGN of nCadences is approximated by the equation:
# 0.0010288 + 0.80304 nCadences^ -0.50128
nCadences = len(fluxMatrix[:, 0])
beta = [0.0007, 0.8083, -0.5023]
WGNCorrelation = beta[0] + beta[1] * (nCadences ** (beta[2]))
# badLimit is the goodness value for WGN correlations
# I.e. anything above this goodness value is equivalent to random correlations
# I.e. 0.95 = sigmoidInv(WGNCorr * correlationScale)
badLimit = 0.95
correlationScale = 1 / (WGNCorrelation) * np.log((2.0 / badLimit) - 1.0)
# Over-emphasize any individual correlation groups. Note the power of
# three after taking the absolute value
# of the correlation. Also, the mean is used so that outliers are *not* ignored.
# Zero diagonal elements
correlationMatrix = np.tril(correlationMatrix, k=-1) + np.triu(
correlationMatrix, k=+1
)
# Add up the correlation over all targets ignoring NaNs (no corrected fit)
correlation = correlationScale * np.nanmean(np.abs(correlationMatrix) ** 3, axis=0)
# We only want the last entry, which is for the target under study
correlation = correlation[-1]
# We want the goodness to span (0,1]
# Use twice a reversed sigmoid to get a [0,1] range mapped from a [0,inf) range
def sigmoidInv(x):
return 2.0 / (1 + np.exp(x))
metric = sigmoidInv(correlation)
return metric
# Custom exception to track when minimum targets is not reached
class MinTargetsError(Exception):
pass
def _unique_key_for_processing_neighbors(
corrected_lc: LightCurve,
radius: float = 6000.0,
min_targets: int = 30,
max_targets: int = 50,
interpolate: bool = False,
extrapolate: bool = False,
author: tuple = ("Kepler", "K2", "SPOC"),
flux_column: str = "sap_flux",
):
"""Returns a unique key that will determine whether a cached version of a
call to `_download_and_preprocess_neighbors` can be re-used."""
return f"{corrected_lc.ra}{corrected_lc.dec}{corrected_lc.cadenceno}{radius}{min_targets}{max_targets}{author}{flux_column}{interpolate}"
@cached(custom_key_maker=_unique_key_for_processing_neighbors)
def _download_and_preprocess_neighbors(
corrected_lc: LightCurve,
radius: float = 6000.0,
min_targets: int = 30,
max_targets: int = 50,
interpolate: bool = False,
extrapolate: bool = False,
author: tuple = ("Kepler", "K2", "SPOC"),
flux_column: str = "sap_flux",
):
"""Returns a collection of neighboring light curves.
If less than min_targets a MinTargetsError Exception is raised.
Parameters
----------
corrected_lc : LightCurve
Light curve around which to look for neighbors.
radius : float
Conesearch radius in arcseconds.
min_targets : int
Minimum number of targets to return.
A `ValueError` will be raised if this number cannot be obtained.
max_targets : int
Maximum number of targets to return.
Using too many can slow down this function due to large data
download.
interpolate : bool
If `True`, the flux values of the neighboring light curves will be
interpolated to match the times of the `corrected_lc`.
If `False`, the flux values will simply be aligned by time where possible.
extrapolate : bool
If `True`, the flux values of the neighboring light curves will be
also be extrapolated. Note: extrapolated values can be unstable.
Returns
-------
lc_neighborhood : LightCurveCollection
Collection of all neighboring light curves used.
lc_neighborhood_flux : list
List containing the flux arrays of the neighboring light curves,
interpolated or aligned with `corrected_lc` if requested.
"""
if extrapolate and (extrapolate != interpolate):
raise Exception('interpolate must be True if extrapolate is True')
search = corrected_lc.search_neighbors(
limit=max_targets, radius=radius, author=author
)
if len(search) < min_targets:
raise MinTargetsError(
f"Unable to find at least {min_targets} neighbors within {radius} arcseconds radius."
)
log.info(
f"Downloading {len(search)} neighboring light curves. This might take a while."
)
lcfCol = search.download_all(flux_column=flux_column)
# Pre-process the neighboring light curves
# Align or interpolate to the corrected light curve
lc_neighborhood = []
lc_neighborhood_flux = []
# Extract SAP light curves
# We want zero-centered median normalized light curves
for lc in lcfCol:
lcSAP = lc.remove_nans().normalize()
lcSAP.flux -= 1.0
# Align or interpolate the neighboring target with the target under study
if interpolate:
# Interpolate to corrected_lc cadence times
fInterp = PchipInterpolator(
lcSAP.time.value,
lcSAP.flux.value,
extrapolate=extrapolate,
)
lc_neighborhood_flux.append(fInterp(corrected_lc.time.value))
else:
# The CBVs were aligned so also align the neighboring
# lightcurves
aligned_lcSAP = _align_to_lc(lcSAP, corrected_lc)
lc_neighborhood_flux.append(aligned_lcSAP.flux.value)
lc_neighborhood.append(lcSAP)
if len(lc_neighborhood) < min_targets:
raise MinTargetsError(
f"Unable to find at least {min_targets} neighbors within {radius} arcseconds radius."
)
# Store the unmolested lightcurve neighborhood but also save the
# aligned or interpolated neighborhood flux
from .. import LightCurveCollection # local import to avoid circular import
lc_neighborhood = LightCurveCollection(lc_neighborhood)
lc_neighborhood_flux = lc_neighborhood_flux
return lc_neighborhood, lc_neighborhood_flux
def _align_to_lc(lc, ref_lc):
""" Aligns a light curve to a reference light curve.
This method will use the cadence number (lc.cadenceno) to
perform the synchronization. Only cadence numbers that exist in both
the lc and the ref_lc will have values in the returned lc. All
cadence numbers that exist in ref_lc but not in lc will
have NaNs returned for those cadences.
Any cadences in the lc not in ref_lc will be removed from the returnd lc.
The returned lc is sorted by cadenceno.
Parameters
----------
lc : LightCurve object
The light curve to align
ref_lc : LightCurve object
The reference light curve to align to
Returns
-------
lc : LightCurve object
The light curve aligned to ref_lc
"""
if not isinstance(lc, LightCurve):
raise Exception('<lc> must be a LightCurve class')
if not isinstance(ref_lc, LightCurve):
raise Exception('<ref_lc> must be a LightCurve class')
if hasattr(lc, 'cadenceno'):
# Make a deepcopy so we do not just return a modified original
aligned_lc = copy.deepcopy(lc)
# NaN any cadences in ref_lc and not lc
# This requires us to add rows to the lc table
lc_nan_mask = np.logical_not(np.in1d(ref_lc.cadenceno, aligned_lc.cadenceno))
lc_nan_indices = np.nonzero(lc_nan_mask)[0]
if len(lc_nan_indices) > 0:
row_to_add = LightCurve(aligned_lc[0:len(lc_nan_indices)])
row_to_add['time'] = ref_lc.time[lc_nan_indices]
row_to_add['cadenceno'] = ref_lc.cadenceno[lc_nan_indices]
row_to_add['flux'] = np.nan
aligned_lc = aligned_lc.append(row_to_add)
# There appears to be a bug in astropy.timeseries when using ts[x:y]
# in combination with ts.remove_row() or ts.remove_rows.
# See LightKurve Issue #836.
# To get around the error for now, we will attempt to use
# ts[x:y]. If it errors out then revert to remove_rows, which is
# REALLY slow.
try:
# This method is fast but might cause errors
keep_indices = np.nonzero(np.in1d(aligned_lc.cadenceno, ref_lc.cadenceno))[0]
aligned_lc = aligned_lc[keep_indices]
except:
# This method is slow but appears to be more robust
trim_indices = np.nonzero(np.logical_not(
np.in1d(aligned_lc.cadenceno, ref_lc.cadenceno)))[0]
aligned_lc.remove_rows(trim_indices)
# Now sort the lc by cadenceno
aligned_lc.sort('cadenceno')
else:
raise Exception('align requires cadence numbers for the ' + \
'light curve. NO ALIGNMENT OCCURED')
return aligned_lc
def _compute_correlation(fluxMatrix):
"""Finds the empirical target to target flux time series Pearson correlation.
Parameters
----------
fluxMatrix : float 2-d array[ntargets,ncadences]
The matrix of target flux. There should be no gaps or Nans
Returns
-------
correlation_matrix : [float 2-d array] (nTargets x nTargets)
The target-target correlation
"""
nCadences = len(fluxMatrix[:, 0])
# Scale each flux value by the RMS flux for the given target.
rmsFlux = np.sqrt(np.sum(fluxMatrix ** 2.0, axis=0) / nCadences)
# If RMS is zero then set to Inf so that we don't get a divide by zero warning
rmsFlux[np.nonzero(rmsFlux == 0.0)[0]] = np.inf
unitNormFlux = fluxMatrix / np.tile(rmsFlux, (nCadences, 1))
correlation_matrix = unitNormFlux.T.dot(unitNormFlux) / nCadences
return correlation_matrix
| 18,295
| 38.010661
| 141
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/correctors/__init__.py
|
"""This sub-package defines classes which help remove instrument systematics
or variability from time series photometry data.
Classes provided by this package should inherit from an abstract `Corrector`
class, which provides three key methods::
Corrector(**data_required):
.correct(**options) -> Returns a systematics-corrected LightCurve.
.diagnose(**options) -> Returns figures which elucidate the correction.
.interact() -> Returns a widget to tune the options interactively (optional).
Classes currently provided are `KeplerCBVCorrector`, `SFFCorrector`, and
`PLDCorrector`.
"""
from .designmatrix import *
from .pldcorrector import *
from .sffcorrector import *
from .cbvcorrector import *
from .regressioncorrector import *
| 762
| 35.333333
| 85
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/correctors/regressioncorrector.py
|
"""Defines `RegressionCorrector` to solve large linear regression problems
with user-defined Gaussian priors in a fast, analytical way.
"""
import logging
import warnings
from astropy.stats import sigma_clip
from astropy import units as u
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.masked import Masked
import matplotlib.pyplot as plt
import numpy as np
from scipy.sparse import issparse, csr_matrix
from .corrector import Corrector
from .designmatrix import (
DesignMatrix,
DesignMatrixCollection,
SparseDesignMatrix,
SparseDesignMatrixCollection,
)
from ..lightcurve import LightCurve, MPLSTYLE
__all__ = ["RegressionCorrector"]
log = logging.getLogger(__name__)
class RegressionCorrector(Corrector):
r"""Remove noise using linear regression against a `.DesignMatrix`.
.. math::
\newcommand{\y}{\mathbf{y}}
\newcommand{\cov}{\boldsymbol\Sigma_\y}
\newcommand{\w}{\mathbf{w}}
\newcommand{\covw}{\boldsymbol\Sigma_\w}
\newcommand{\muw}{\boldsymbol\mu_\w}
\newcommand{\sigw}{\boldsymbol\sigma_\w}
\newcommand{\varw}{\boldsymbol\sigma^2_\w}
Given a column vector of data :math:`\y`
and a design matrix of regressors :math:`X`,
we will find the vector of coefficients :math:`\w`
such that:
.. math::
\mathbf{y} = X\mathbf{w} + \mathrm{noise}
We will assume that the model fits the data within Gaussian uncertainties:
.. math::
p(\y | \w) = \mathcal{N}(X\w, \cov)
We make the regression robust by placing Gaussian priors on :math:`\w`:
.. math::
p(\w) = \mathcal{N}(\muw, \sigw)
We can then find the maximum likelihood solution of the posterior
distribution :math:`p(\w | \y) \propto p(\y | \w) p(\w)` by solving
the matrix equation:
.. math::
\w = \covw (X^\\top \cov^{-1} \y + \\boldsymbol\sigma^{-2}_\w I \muw)
Where :math:`\covw` is the covariance matrix of the coefficients:
.. math::
\covw^{-1} = (X^\\top \cov^{-1} X + \\boldsymbol\sigma^{-2}_\w I)
Parameters
----------
lc : `.LightCurve`
The light curve that needs to be corrected.
"""
def __init__(self, lc):
# We don't accept NaN in time or flux.
if np.any([~np.isfinite(lc.time.value), ~np.isfinite(lc.flux)]):
raise ValueError(
"Input light curve has NaNs in time or flux. "
"Please remove NaNs before correction "
"(e.g. using `lc = lc.remove_nans()`)."
)
# We don't accept NaN in flux_err, unless all values are NaN.
if np.any(~np.isfinite(lc.flux_err)) and not np.all(~np.isfinite(lc.flux_err)):
raise ValueError(
"Input light curve has NaNs in `flux_err`. "
"Please remove NaNs before correction "
"(e.g. using `lc = lc.remove_nans()`)."
)
if np.any(lc.flux_err[np.isfinite(lc.flux_err)] <= 0):
raise ValueError(
"Input light curve contains flux uncertainties "
"smaller than or equal to zero. Please remove "
"these (e.g. using `lc = lc[lc.flux_err > 0]`)."
)
self.lc = lc
# The following properties will be set when correct() is called.
# We're setting them here so they do not throw value errors
self.design_matrix_collection = None
self.coefficients = None
self.corrected_lc = None
self.model_lc = None
self.diagnostic_lightcurves = None
def __repr__(self):
return "RegressionCorrector (ID: {})".format(self.lc.targetid)
@property
def dmc(self):
"""Shorthand for self.design_matrix_collection."""
return self.design_matrix_collection
def _fit_coefficients(
self, cadence_mask=None, prior_mu=None, prior_sigma=None, propagate_errors=False
):
"""Fit the linear regression coefficients.
This function will solve a linear regression with Gaussian priors
on the coefficients.
Parameters
----------
cadence_mask : np.ndarray of bool
Mask, where True indicates a cadence that should be used.
Returns
-------
coefficients : np.ndarray
The best fit model coefficients to the data.
"""
# If prior_mu is specified, prior_sigma must be specified
if not ((prior_mu is None) & (prior_sigma is None)) | (
(prior_mu is not None) & (prior_sigma is not None)
):
raise ValueError("Please specify both `prior_mu` and `prior_sigma`")
# Default cadence mask
if cadence_mask is None:
cadence_mask = np.ones(len(self.lc.flux.value), bool)
# If flux errors are not all finite numbers, then default to array of ones
if np.all(~np.isfinite(self.lc.flux_err.value)):
flux_err = np.ones(cadence_mask.sum())
else:
flux_err = self.lc.flux_err.value[cadence_mask]
# Retrieve the design matrix (X) as a numpy array
X = self.dmc.X[cadence_mask]
if isinstance(X, np.ndarray):
# Compute `X^T cov^-1 X + 1/prior_sigma^2`
sigma_w_inv = X.T.dot(X / flux_err[:, None] ** 2)
# Compute `X^T cov^-1 y + prior_mu/prior_sigma^2`
B = np.dot(X.T, self.lc.flux.value[cadence_mask] / flux_err ** 2)
elif issparse(X):
sigma_f_inv = csr_matrix(1 / flux_err[:, None] ** 2)
# Compute `X^T cov^-1 X + 1/prior_sigma^2`
sigma_w_inv = X.T.dot(X.multiply(sigma_f_inv))
# Compute `X^T cov^-1 y + prior_mu/prior_sigma^2`
B = X.T.dot((self.lc.flux[cadence_mask] / flux_err ** 2))
sigma_w_inv = sigma_w_inv.toarray()
if prior_sigma is not None:
sigma_w_inv = sigma_w_inv + np.diag(1.0 / prior_sigma ** 2)
if prior_sigma is not None:
B = B + (prior_mu / prior_sigma ** 2)
# Solve for weights w
w = np.linalg.solve(sigma_w_inv, B).T
if propagate_errors:
w_err = np.linalg.inv(sigma_w_inv)
else:
w_err = np.zeros(len(w)) * np.nan
return w, w_err
def correct(
self,
design_matrix_collection,
cadence_mask=None,
sigma=5,
niters=5,
propagate_errors=False,
):
"""Find the best fit correction for the light curve.
Parameters
----------
design_matrix_collection : `.DesignMatrix` or `.DesignMatrixCollection`
One or more design matrices. Each matrix must have a shape of
(time, regressors). The columns contained in each matrix must be
known to correlate with additive noise components we want to remove
from the light curve.
cadence_mask : np.ndarray of bools (optional)
Mask, where True indicates a cadence that should be used.
sigma : int (default 5)
Standard deviation at which to remove outliers from fitting
niters : int (default 5)
Number of iterations to fit and remove outliers
propagate_errors : bool (default False)
Whether to propagate the uncertainties from the regression. Default is False.
Setting to True will increase run time, but will sample from multivariate normal
distribution of weights.
Returns
-------
corrected_lc : `.LightCurve`
Corrected light curve, with noise removed.
"""
if not isinstance(design_matrix_collection, DesignMatrixCollection):
if isinstance(design_matrix_collection, SparseDesignMatrix):
design_matrix_collection = SparseDesignMatrixCollection(
[design_matrix_collection]
)
elif isinstance(design_matrix_collection, DesignMatrix):
design_matrix_collection = DesignMatrixCollection(
[design_matrix_collection]
)
# Validate the design matrix. Emits a warning if the matrix has low rank.
design_matrix_collection.validate()
self.design_matrix_collection = design_matrix_collection
if cadence_mask is None:
self.cadence_mask = np.ones(len(self.lc.time), bool)
else:
self.cadence_mask = cadence_mask
# Create an outlier mask using iterative sigma clipping
self.outlier_mask = np.zeros_like(self.cadence_mask)
for count in range(niters):
tmp_cadence_mask = self.cadence_mask & ~self.outlier_mask
coefficients, coefficients_err = self._fit_coefficients(
cadence_mask=tmp_cadence_mask,
prior_mu=self.dmc.prior_mu,
prior_sigma=self.dmc.prior_sigma,
propagate_errors=propagate_errors,
)
model = np.ma.masked_array(
data=self.dmc.X.dot(coefficients), mask=~tmp_cadence_mask
)
model = u.Quantity(model, unit=self.lc.flux.unit)
residuals = self.lc.flux - model
if isinstance(residuals, Masked):
# Workaround for https://github.com/astropy/astropy/issues/14360
# in passing MaskedQuantity to sigma_clip, by converting it to Quantity.
# We explicitly fill masked values with `np.nan` here to ensure they are masked during sigma clipping.
# To handle unlikely edge case, convert int to float to ensure filing `np.nan` work.
# The conversion is acceptable because only the mask of the sigma_clip() result is used.
if np.issubdtype(residuals.dtype, np.int_):
residuals = residuals.astype(float)
residuals = residuals.filled(np.nan)
with warnings.catch_warnings(): # Ignore warnings due to NaNs
warnings.simplefilter("ignore", AstropyUserWarning)
self.outlier_mask |= sigma_clip(residuals, sigma=sigma).mask
log.debug(
"correct(): iteration {}: clipped {} cadences"
"".format(count, self.outlier_mask.sum())
)
self.coefficients = coefficients
self.coefficients_err = coefficients_err
model_flux = self.dmc.X.dot(coefficients)
model_flux -= np.median(model_flux)
if propagate_errors:
with warnings.catch_warnings():
# ignore "RuntimeWarning: covariance is not symmetric positive-semidefinite."
warnings.simplefilter("ignore", RuntimeWarning)
samples = np.asarray(
[
self.dmc.X.dot(
np.random.multivariate_normal(
coefficients, coefficients_err
)
)
for idx in range(100)
]
).T
model_err = np.abs(
np.percentile(samples, [16, 84], axis=1)
- np.median(samples, axis=1)[:, None].T
).mean(axis=0)
else:
model_err = np.zeros(len(model_flux))
self.model_lc = LightCurve(
time=self.lc.time,
flux=u.Quantity(model_flux, unit=self.lc.flux.unit),
flux_err=u.Quantity(model_err, unit=self.lc.flux.unit),
)
self.corrected_lc = self.lc.copy()
self.corrected_lc.flux = self.lc.flux - self.model_lc.flux
self.corrected_lc.flux_err = (self.lc.flux_err ** 2 + model_err ** 2) ** 0.5
self.diagnostic_lightcurves = self._create_diagnostic_lightcurves()
return self.corrected_lc
def _create_diagnostic_lightcurves(self):
"""Returns a dictionary containing all diagnostic light curves.
The dictionary will provide a light curve for each matrix in the
design matrix collection.
"""
if self.coefficients is None:
raise ValueError("you need to call `correct()` first")
lcs = {}
for idx, submatrix in enumerate(self.dmc.matrices):
# What is the index of the first column for the submatrix?
firstcol_idx = sum([m.shape[1] for m in self.dmc.matrices[:idx]])
submatrix_coefficients = self.coefficients[
firstcol_idx : firstcol_idx + submatrix.shape[1]
]
# submatrix_coefficients_err = self.coefficients_err[firstcol_idx:firstcol_idx+submatrix.shape[1], firstcol_idx:firstcol_idx+submatrix.shape[1]]
# samples = np.asarray([np.dot(submatrix.values, np.random.multivariate_normal(submatrix_coefficients, submatrix_coefficients_err)) for idx in range(100)]).T
# model_err = np.abs(np.percentile(samples, [16, 84], axis=1) - np.median(samples, axis=1)[:, None].T).mean(axis=0)
model_flux = u.Quantity(
submatrix.X.dot(submatrix_coefficients), unit=self.lc.flux.unit
)
model_flux_err = u.Quantity(
np.zeros(len(model_flux)), unit=self.lc.flux.unit
)
lcs[submatrix.name] = LightCurve(
time=self.lc.time,
flux=model_flux,
flux_err=model_flux_err,
label=submatrix.name,
)
return lcs
def _diagnostic_plot(self):
"""Produce diagnostic plots to assess the effectiveness of the correction.
Note: We need a hidden function so that other correctors can alter the plot.
"""
if not hasattr(self, "corrected_lc"):
raise ValueError(
"Please call the `correct()` method before trying to diagnose."
)
with plt.style.context(MPLSTYLE):
_, axs = plt.subplots(2, figsize=(10, 6), sharex=True)
ax = axs[0]
self.lc.plot(ax=ax, normalize=False, label="original", alpha=0.4)
for key in self.diagnostic_lightcurves.keys():
(
self.diagnostic_lightcurves[key]
- np.median(self.diagnostic_lightcurves[key].flux)
+ np.median(self.lc.flux)
).plot(ax=ax)
ax.set_xlabel("")
ax = axs[1]
self.lc.plot(ax=ax, normalize=False, alpha=0.2, label="original")
self.corrected_lc[self.outlier_mask].scatter(
normalize=False, c="r", marker="x", s=10, label="outlier_mask", ax=ax
)
self.corrected_lc[~self.cadence_mask].scatter(
normalize=False,
c="dodgerblue",
marker="x",
s=10,
label="~cadence_mask",
ax=ax,
)
self.corrected_lc.plot(normalize=False, label="corrected", ax=ax, c="k")
return axs
def diagnose(self):
"""Returns diagnostic plots to assess the most recent call to `correct()`.
If `correct()` has not yet been called, a ``ValueError`` will be raised.
Returns
-------
`~matplotlib.axes.Axes`
The matplotlib axes object.
"""
return self._diagnostic_plot()
def diagnose_priors(self):
"""Returns a diagnostic plot visualizing how the best-fit coefficients
compare against the priors.
The method will show the results obtained during the most recent call
to `correct()`. If `correct()` has not yet been called, a
``ValueError`` will be raised.
Returns
-------
`~matplotlib.axes.Axes`
The matplotlib axes object.
"""
if not hasattr(self, "corrected_lc"):
raise ValueError(
"Please call the `correct()` method before trying to diagnose."
)
names = [dm.name for dm in self.dmc]
with plt.style.context(MPLSTYLE):
_, axs = plt.subplots(
1, len(names), figsize=(len(names) * 4, 4), sharey=True
)
if not hasattr(axs, "__iter__"):
axs = [axs]
for idx, ax, X in zip(range(len(names)), axs, self.dmc):
X.plot_priors(ax=ax)
firstcol_idx = sum([m.shape[1] for m in self.dmc.matrices[:idx]])
submatrix_coefficients = self.coefficients[
firstcol_idx : firstcol_idx + X.shape[1]
]
[ax.axvline(s, color="red", zorder=-1) for s in submatrix_coefficients]
return axs
| 16,667
| 38.035129
| 169
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/correctors/corrector.py
|
"""Implements the abstract `Corrector` base class.
"""
from abc import ABC, abstractmethod
import matplotlib
import numpy as np
from .. import LightCurve
from .metrics import overfit_metric_lombscargle, underfit_metric_neighbors
class Corrector(ABC):
"""Abstract base class documenting the required structure of classes
designed to remove systematic noise from light curves.
Attributes
----------
original_lc : LightCurve
The uncorrected light curve. Must be passed into (or computed by) the
constructor method.
corrected_lc : LightCurve
Corrected light curve. Must be updated upon each call to the `correct()` method.
cadence_mask : np.array of dtype=bool
Boolean array with the same length as `original_lc`.
True indicates that a cadence should be used to fit the noise model.
By setting certain cadences to False, users can exclude those cadences
from informing the noise model, which will help prevent the overfitting
of those signals (e.g. exoplanet transits).
By default, the cadence mask is True across all cadences.
Methods
-------
__init__()
Accepts all the data required to execute the correction.
The constructor must set the `original_lc` attribute.
correct() -> LightCurve
Executes the correction, optionally accepting meaningful parameters that
can be used to modify the way the correction is applied.
This method must set or update the `corrected_lc` attribute on each run.
diagnose() -> matplotlib.axes.Axes
Creates plots to elucidate the user's most recent call to `correct()`.
"""
@property
def original_lc(self) -> LightCurve:
if hasattr(self, "_original_lc"):
return self._original_lc
else:
raise AttributeError("`original_lc` has not been instantiated yet.")
@original_lc.setter
def original_lc(self, original_lc):
self._original_lc = original_lc
@property
def corrected_lc(self) -> LightCurve:
if hasattr(self, "_corrected_lc"):
return self._corrected_lc
else:
raise AttributeError(
"You need to call the `correct()` method "
"before you can access `corrected_lc`."
)
@corrected_lc.setter
def corrected_lc(self, corrected_lc):
self._corrected_lc = corrected_lc
@property
def cadence_mask(self) -> np.array:
if not hasattr(self, "_cadence_mask"):
self._cadence_mask = np.ones(len(self.original_lc), dtype=bool)
return self._cadence_mask
@cadence_mask.setter
def cadence_mask(self, cadence_mask):
self._cadence_mask = cadence_mask
def __init__(self, original_lc: LightCurve) -> None:
"""Constructor method.
The constructor shall:
* accept all data required to run the correction (e.g. light curves,
target pixel files, engineering data).
* instantiate the `original_lc` property.
"""
self.original_lc = original_lc
@abstractmethod
def correct(
self, cadence_mask: np.array = None, optimize: bool = False
) -> LightCurve:
"""Returns a `LightCurve` from which systematic noise has been removed.
This method shall:
* accept meaningful parameters that can be used to tune the correction,
including:
* `optimize`: should an optimizer be used to tune the parameters?
* `cadence_mask`: flags cadences to be used to fit the noise model.
* store all parameters as object attributes (e.g. `self.optimize`, `self.cadence_mask`);
* store helpful diagnostic information as object attributes;
* store the result in the `self.corrected_lc` attribute;
* return `self.corrected_lc`.
"""
if cadence_mask:
self.cadence_mask = cadence_mask
# ... perform correction ...
# self.corrected_lc = corrected_lc
# return corrected_lc
@abstractmethod
def diagnose(self) -> matplotlib.axes.Axes:
"""Returns plots which elucidate the most recent call to `correct()`.
This method shall plot useful diagnostic information which have been
stored as object attributes during the most recent call to `correct()`.
"""
pass
def compute_overfit_metric(self, **kwargs) -> float:
"""Measures the degree of over-fitting in the correction.
See the docstring of `lightkurve.correctors.metrics.overfit_metric_lombscargle`
for details.
Returns
-------
overfit_metric : float
A float in the range [0,1] where 0 => Bad, 1 => Good
"""
return overfit_metric_lombscargle(
# Ignore masked cadences in the computation
self.original_lc[self.cadence_mask],
self.corrected_lc[self.cadence_mask],
**kwargs
)
def compute_underfit_metric(self, **kwargs) -> float:
"""Measures the degree of under-fitting the correction.
See the docstring of `lightkurve.correctors.metrics.underfit_metric_neighbors`
for details.
Returns
-------
underfit_metric : float
A float in the range [0,1] where 0 => Bad, 1 => Good
"""
return underfit_metric_neighbors(self.corrected_lc[self.cadence_mask], **kwargs)
| 5,460
| 34.461039
| 96
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/correctors/designmatrix.py
|
"""Defines design matrix objects to aid linear regression problems.
Specifically, this module adds the `DesignMatrix`, `DesignMatrixCollection`,
`SparseDesignMatrix`, and `SparseDesignMatrixCollection` classes which
are design to work with the `RegressionCorrector` class.
"""
from copy import deepcopy
import warnings
from astropy import units as u
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.sparse import lil_matrix, csr_matrix, hstack, vstack, issparse, find
from .. import MPLSTYLE
from ..utils import LightkurveWarning, plot_image
__all__ = [
"DesignMatrix",
"SparseDesignMatrix",
"DesignMatrixCollection",
"SparseDesignMatrixCollection",
]
class DesignMatrix:
"""A matrix of column vectors for use in linear regression.
The purpose of this class is to provide a convenient method to interact
with a set of one or more regressors which are known to correlate with
trends or systematic noise signals which we want to remove from a light
curve. Specifically, this class is designed to provide the design matrix
for use by Lightkurve's `.RegressionCorrector` class.
Parameters
----------
df : dict, array, or `pandas.DataFrame` object
Columns to include in the design matrix. If this object is not a
`~pandas.DataFrame` then it will be passed to the DataFrame constructor.
columns : iterable of str (optional)
Column names, if not already provided via ``df``.
name : str
Name of the matrix.
prior_mu : array
Prior means of the coefficients associated with each column in a linear
regression problem.
prior_sigma : array
Prior standard deviations of the coefficients associated with each
column in a linear regression problem.
Examples
--------
>>> from lightkurve.correctors.designmatrix import DesignMatrix, create_spline_matrix
>>> DesignMatrix(np.arange(100), name='slope')
slope DesignMatrix (100, 1)
>>> create_spline_matrix(np.arange(100), n_knots=5, name='spline')
spline DesignMatrix (100, 5)
"""
def __init__(
self, df, columns=None, name="unnamed_matrix", prior_mu=None, prior_sigma=None
):
if not isinstance(df, pd.DataFrame):
df = pd.DataFrame(df)
self.df = df
if columns is not None:
df.columns = columns
self.columns = list(df.columns)
self.name = name
if isinstance(prior_mu, u.Quantity):
prior_mu = prior_mu.value
if prior_mu is None:
prior_mu = np.zeros(len(df.T))
self.prior_mu = np.atleast_1d(prior_mu)
if isinstance(prior_sigma, u.Quantity):
prior_sigma = prior_sigma.value
if prior_sigma is None:
prior_sigma = np.ones(len(df.T)) * np.inf
self.prior_sigma = np.atleast_1d(prior_sigma)
@property
def X(self):
"""Design matrix "X" to be used in RegressionCorrector objects"""
return self.df.values
def copy(self):
"""Returns a deepcopy of DesignMatrix"""
return deepcopy(self)
def plot(self, ax=None, **kwargs):
"""Visualize the design matrix values as an image.
Uses Matplotlib's `~lightkurve.utils.plot_image` to visualize the
matrix values.
Parameters
----------
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be created.
**kwargs : dict
Extra parameters to be passed to `.plot_image`.
Returns
-------
`~matplotlib.axes.Axes`
The matplotlib axes object.
"""
with plt.style.context(MPLSTYLE):
ax = plot_image(
self.values,
ax=ax,
xlabel="Component",
ylabel="X",
clabel="Component Value",
title=self.name,
interpolation="nearest",
**kwargs
)
ax.set_aspect(self.shape[1] / (1.6 * self.shape[0]))
if self.shape[1] <= 40:
ax.set_xticks(np.arange(self.shape[1]))
ax.set_xticklabels(
[r"${}$".format(i) for i in self.columns], rotation=90, fontsize=8
)
return ax
def plot_priors(self, ax=None):
"""Visualize the coefficient priors.
Parameters
----------
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be created.
Returns
-------
`~matplotlib.axes.Axes`
The matplotlib axes object.
"""
def gauss(x, mu=0, sigma=1):
return np.exp(-((x - mu) ** 2) / (2 * sigma ** 2))
with plt.style.context(MPLSTYLE):
if ax is None:
_, ax = plt.subplots()
for m, s in zip(self.prior_mu, self.prior_sigma):
if ~np.isfinite(s):
ax.axhline(1, color="k")
else:
x = np.linspace(m - 5 * s, m + 5 * s, 1000)
ax.plot(x, gauss(x, m, s), c="k")
ax.set_xlabel("Value")
ax.set_title("{} Priors".format(self.name))
return ax
def _get_prior_sample(self):
"""Returns a random sample from the prior distribution."""
return np.random.normal(self.prior_mu, self.prior_sigma)
def split(self, row_indices, inplace=False):
"""Returns a new `.DesignMatrix` with regressors split into multiple
columns.
This method will return a new design matrix containing
n_columns * len(row_indices) regressors. This is useful in situations
where the linear regression can be improved by fitting separate
coefficients for different contiguous parts of the regressors.
Parameters
----------
row_indices : iterable of integers
Every regressor (i.e. column) in the design matrix will be split
up over multiple columns separated at the indices provided.
Returns
-------
`.DesignMatrix`
A new design matrix with shape (n_rows, len(row_indices)*n_columns).
"""
if isinstance(row_indices, int):
row_indices = [row_indices]
if (len(row_indices) == 0) or (row_indices == [0]) or (row_indices is None):
return self
# Where do the submatrices begin and end?
lower_idx = np.append(0, row_indices)
upper_idx = np.append(row_indices, len(self.df))
dfs = []
for idx, a, b in zip(range(len(lower_idx)), lower_idx, upper_idx):
new_columns = dict(
("{}".format(val), "{}".format(val) + " {}".format(idx + 1))
for val in list(self.df.columns)
)
dfs.append(self.df[a:b].rename(columns=new_columns))
prior_mu = np.hstack([self.prior_mu for idx in range(len(dfs))])
prior_sigma = np.hstack([self.prior_sigma for idx in range(len(dfs))])
if inplace:
dm = self
else:
dm = self.copy()
dm.df = pd.concat(dfs, axis=1).fillna(0)
dm.columns = dm.df.columns
dm.prior_mu = prior_mu
dm.prior_sigma = prior_sigma
return dm
def standardize(self, inplace=False):
"""Returns a new `.DesignMatrix` in which the columns have been
median-subtracted and sigma-divided.
For each column in the matrix, this method will subtract the median of
the column and divide by the column's standard deviation, i.e. it
will compute the column's so-called "standard scores" or "z-values".
This operation is useful because it will make the matrix easier to
visualize and makes fitted coefficients easier to interpret.
Notes:
* Standardizing a spline design matrix will break the splines.
* Columns with constant values (i.e. zero standard deviation) will be
left unchanged.
Returns
-------
`.DesignMatrix`
A new design matrix with median-subtracted & sigma-divided columns.
"""
ar = np.asarray(np.copy(self.df))
ar[ar == 0] = np.nan
# If a column has zero standard deviation, it will not change!
is_const = np.nanstd(ar, axis=0) == 0
median = np.atleast_2d(np.nanmedian(ar, axis=0)[~is_const])
std = np.atleast_2d(np.nanstd(ar, axis=0)[~is_const])
ar[:, ~is_const] = (ar[:, ~is_const] - median) / std
new_df = pd.DataFrame(ar, columns=self.columns).fillna(0)
if inplace:
dm = self
else:
dm = self.copy()
dm.df = new_df
return dm
def pca(self, nterms=6):
"""Returns a new `.DesignMatrix` with a smaller number of regressors.
This method will use Principal Components Analysis (PCA) to reduce
the number of columns in the matrix.
Parameters
----------
nterms : int
Number of columns in the new matrix.
Returns
-------
`.DesignMatrix`
A new design matrix with PCA applied.
"""
# nterms cannot be langer than the number of columns in the matrix
if nterms > self.shape[1]:
nterms = self.shape[1]
# We use `fbpca.pca` instead of `np.linalg.svd` because it is faster.
# Note that fbpca is randomized, and has n_iter=2 as default,
# we find this to be too few, and that n_iter=10 is still fast but
# produces more stable results.
from fbpca import pca # local import because not used elsewhere
new_values, _, _ = pca(self.values, nterms, n_iter=10)
return DesignMatrix(new_values, name=self.name)
def append_constant(self, prior_mu=0, prior_sigma=np.inf, inplace=False):
"""Returns a new `.DesignMatrix` with a column of ones appended.
Returns
-------
`.DesignMatrix`
New design matrix with a column of ones appended. This column is
named "offset".
"""
if inplace:
dm = self
else:
dm = self.copy()
extra_df = pd.DataFrame(
np.atleast_2d(np.ones(self.shape[0])).T, columns=["offset"]
)
dm.df = pd.concat([self.df, extra_df], axis=1)
dm.columns = list(dm.df.columns)
dm.prior_mu = np.append(self.prior_mu, prior_mu)
dm.prior_sigma = np.append(self.prior_sigma, prior_sigma)
return dm
def _validate(self, rank=True):
"""Helper function for validating."""
# Matrix rank shouldn't be significantly smaller than the # of columns
if rank:
if self.rank < (0.5 * self.shape[1]):
warnings.warn(
"The design matrix has low rank ({}) compared to the "
"number of columns ({}), which suggests that the "
"matrix contains duplicate or correlated columns. "
"This may prevent the regression from succeeding. "
"Consider reducing the dimensionality by calling the "
"`pca()` method.".format(self.rank, self.shape[1]),
LightkurveWarning,
)
if self.prior_mu is not None:
if len(self.prior_mu) != self.shape[1]:
raise ValueError(
"`prior_mu` must have shape {}" "".format(self.shape[1])
)
if self.prior_sigma is not None:
if len(self.prior_sigma) != self.shape[1]:
raise ValueError(
"`prior_sigma` must have shape {}" "".format(self.shape[1])
)
if np.any(np.asarray(self.prior_sigma) <= 0):
raise ValueError(
"`prior_sigma` values cannot be smaller than " "or equal to zero"
)
def validate(self, rank=True):
"""Emits `LightkurveWarning` if matrix has low rank or priors have incorrect shape.
Note that for `SparseDesignMatrix` objects, calculating the rank will
force the design matrix to be evaluated and stored in memory, reducing
the speed and memory savings of SparseDesignMatrix.
For `SparseDesignMatrix`, rank checks will be turned off by default.
"""
self._validate()
@property
def rank(self):
"""Matrix rank computed using `numpy.linalg.matrix_rank`."""
return np.linalg.matrix_rank(self.values)
@property
def shape(self):
"""Tuple specifying the shape of the matrix as (n_rows, n_columns)."""
return self.X.shape
@property
def values(self):
"""2D numpy array containing the matrix values."""
return self.df.values
def __getitem__(self, key):
return self.df[key].values
def __repr__(self):
return "{} DesignMatrix {}".format(self.name, self.shape)
def to_sparse(self):
"""Convert this dense matrix object to a `SparseDesignMatrix`.
The values of this design matrix will be converted to a
`scipy.sparse.csr_matrix`, which stores the values in a
lower memory matrix. This is not recommended for dense matrices.
"""
return SparseDesignMatrix(
csr_matrix(self.values),
name=self.name,
columns=self.columns,
prior_mu=self.prior_mu,
prior_sigma=self.prior_sigma,
)
def collect(self, matrix):
""" Join two designmatrices, return a design matrix collection """
return DesignMatrixCollection([self, matrix])
class DesignMatrixCollection:
"""Object which stores multiple design matrices.
DesignMatrixCollection objects are useful when users want to regress against
multiple different systematics, but still keep the different systematics distinct.
Examples
--------
>>> from lightkurve.correctors.designmatrix import create_spline_matrix, DesignMatrix, DesignMatrixCollection
>>> dm1 = create_spline_matrix(np.arange(100), n_knots=5, name='spline')
>>> dm2 = DesignMatrix(np.arange(100), name='slope')
>>> dmc = DesignMatrixCollection([dm1, dm2])
>>> dmc
DesignMatrixCollection:
spline DesignMatrix (100, 5)
slope DesignMatrix (100, 1)
>>> dmc.matrices
[spline DesignMatrix (100, 5), slope DesignMatrix (100, 1)]
"""
def __init__(self, matrices):
if np.any([issparse(m.X) for m in matrices]):
# This collection is designed for dense matrices, so we warn if a
# SparseDesignMatrix is passed
warnings.warn(
(
"Some matrices are `SparseDesignMatrix` objects. "
"Sparse matrices will be converted to dense matrices."
),
LightkurveWarning,
)
dense_matrices = []
for m in matrices:
if isinstance(m, SparseDesignMatrix):
dense_matrices.append(m.copy().to_dense())
else:
dense_matrices.append(m)
self.matrices = dense_matrices
else:
self.matrices = matrices
self.X = np.hstack(tuple(m.X for m in self.matrices))
self._child_class = DesignMatrix
self.validate()
@property
def values(self):
"""2D numpy array containing the matrix values."""
return np.hstack(tuple(m.values for m in self.matrices))
@property
def prior_mu(self):
"""Coefficient prior means."""
return np.hstack([m.prior_mu for m in self])
@property
def prior_sigma(self):
"""Coefficient prior standard deviations."""
return np.hstack([m.prior_sigma for m in self])
def plot(self, ax=None, **kwargs):
"""Visualize the design matrix values as an image.
Uses Matplotlib's `~lightkurve.utils.plot_image` to visualize the
matrix values.
Parameters
----------
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be created.
**kwargs : dict
Extra parameters to be passed to `.plot_image`.
Returns
-------
`~matplotlib.axes.Axes`
The matplotlib axes object.
"""
temp_dm = DesignMatrix(pd.concat([d.df for d in self], axis=1))
ax = temp_dm.plot(**kwargs)
ax.set_title("Design Matrix Collection")
return ax
def plot_priors(self, ax=None):
"""Visualize the `prior_mu` and `prior_sigma` attributes.
Parameters
----------
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be created.
Returns
-------
`~matplotlib.axes.Axes`
The matplotlib axes object.
"""
[dm.plot_priors(ax=ax) for dm in self]
return ax
def _get_prior_sample(self):
"""Returns a random sample from the prior distribution."""
return np.hstack([dm.sample_priors() for dm in self])
def split(self, row_indices):
"""Returns a new `.DesignMatrixCollection` with regressors split into
multiple columns.
This method will return a new design matrix collection by calling
`DesignMatrix.split` on each matrix in the collection.
Parameters
----------
row_indices : iterable of integers
Every regressor (i.e. column) in the design matrix will be split
up over multiple columns separated at the indices provided.
Returns
-------
`.DesignMatrixCollection`
A new design matrix collection.
"""
return self.__class__([d.split(row_indices) for d in self])
def standardize(self):
"""Returns a new `.DesignMatrixCollection` in which all the
matrices have been standardized using the `DesignMatrix.standardize`
method.
Returns
-------
`.DesignMatrixCollection`
The new design matrix collection.
"""
return self.__class__([d.standardize() for d in self])
@property
def columns(self):
"""List of column names."""
return np.hstack([d.columns for d in self])
def __getitem__(self, key):
try:
return self.matrices[key]
except Exception:
arg = np.argwhere([m.name == key for m in self.matrices])
return self.matrices[arg[0][0]]
def validate(self):
[d.validate() for d in self]
def __repr__(self):
return "DesignMatrixCollection:\n" + "".join(
["\t{}\n".format(i.__repr__()) for i in self]
)
def to_designmatrix(self, name=None):
"""Flatten a `DesignMatrixCollection` into a `DesignMatrix`."""
if name is None:
name = self.matrices[0].name
return self._child_class(
self.X,
columns=self.columns,
prior_mu=self.prior_mu,
prior_sigma=self.prior_sigma,
name=name,
)
class SparseDesignMatrix(DesignMatrix):
"""A matrix of column vectors for use in linear regression.
This class is similar to the `DesignMatrix` class, but uses the
`scipy.sparse` library to improve speed in the case of sparse matrices.
The purpose of this class is to provide a convenient method to interact
with a set of one or more regressors which are known to correlate with
trends or systematic noise signals which we want to remove from a light
curve. Specifically, this class is designed to provide the design matrix
for use by Lightkurve's `.RegressionCorrector` class.
Parameters
----------
X : `scipy.sparse` matrix
The values to build the design matrix with
columns : iterable of str (optional)
Column names
name : str
Name of the matrix.
prior_mu : array
Prior means of the coefficients associated with each column in a linear
regression problem.
prior_sigma : array
Prior standard deviations of the coefficients associated with each
column in a linear regression problem.
"""
def __init__(
self, X, columns=None, name="unnamed_matrix", prior_mu=None, prior_sigma=None
):
if not issparse(X):
raise ValueError(
"Must pass a `scipy.sparse` matrix (e.g. `scipy.sparse.csr_matrix`)"
)
if columns is None:
columns = np.arange(X.shape[1])
self.columns = columns
self.name = name
if prior_mu is None:
prior_mu = np.zeros(X.shape[1])
if prior_sigma is None:
prior_sigma = np.ones(X.shape[1]) * np.inf
self._X = X
self.prior_mu = prior_mu
self.prior_sigma = prior_sigma
self._child_class = SparseDesignMatrix
self.validate()
@property
def X(self):
"""Design matrix "X" to be used in RegressionCorrector objects"""
return self._X
@property
def values(self):
"""2D numpy array containing the matrix values."""
return self.X.toarray()
def validate(self, rank=False):
"""Checks if the matrix has the right shapes. Set rank to True to test matrix rank."""
# For sparse matrices, calculating the rank is expensive, and negates
# the benefits of using sparse. Validate will ignore rank by default.
self._validate(rank=rank)
def split(self, row_indices, inplace=False):
"""Returns a new `.SparseDesignMatrix` with regressors split into multiple
columns.
This method will return a new design matrix containing
n_columns * len(row_indices) regressors. This is useful in situations
where the linear regression can be improved by fitting separate
coefficients for different contiguous parts of the regressors.
Parameters
----------
row_indices : iterable of integers
Every regressor (i.e. column) in the design matrix will be split
up over multiple columns separated at the indices provided.
Returns
-------
`.SparseDesignMatrix`
A new design matrix with shape (n_rows, len(row_indices)*n_columns).
"""
if not hasattr(row_indices, "__iter__"):
row_indices = [row_indices]
# You can't split on the first or last index
row_indices = list(
np.asarray(row_indices)[~np.in1d(row_indices, [0, self.shape[0]])]
)
if len(row_indices) == 0:
return self
if inplace:
dm = self
else:
dm = self.copy()
x = np.arange(dm.shape[0])
dm.prior_mu = np.concatenate([list(self.prior_mu) * (len(row_indices) + 1)])
dm.prior_sigma = np.concatenate(
[list(self.prior_sigma) * (len(row_indices) + 1)]
)
dm._X = hstack(
[
dm.X.multiply(lil_matrix(np.in1d(x, idx).astype(int)).T)
for idx in np.array_split(x, row_indices)
],
format="lil",
)
non_zero = dm.X.sum(axis=0) != 0
non_zero = np.asarray(non_zero).ravel()
dm._X = dm.X[:, non_zero]
if dm.columns is not None:
dm.columns = list(
np.asarray(
[
["{}_{}".format(c, idx) for c in dm.columns]
for idx in range(len(row_indices) + 1)
]
).ravel()
)
dm.prior_mu = dm.prior_mu[non_zero]
dm.prior_sigma = dm.prior_sigma[non_zero]
return dm
def standardize(self, inplace=False):
"""Returns a new `.SparseDesignMatrix` in which the columns have been
mean-subtracted and sigma-divided.
For each column in the matrix, this method will subtract the mean of
the column and divide by the column's standard deviation, i.e. it
will compute the column's so-called "standard scores" or "z-values".
This operation is useful because it will make the matrix easier to
visualize and makes fitted coefficients easier to interpret.
Notes:
* Standardizing a spline design matrix will break the splines.
* Columns with constant values (i.e. zero standard deviation) will be
left unchanged.
Returns
-------
`.SparseDesignMatrix`
A new design matrix with mean-subtracted & sigma-divided columns.
"""
if inplace:
dm = self
else:
dm = self.copy()
idx, jdx, v = find(dm.X)
weights = dm.X.copy()
weights[dm.X != 0] = 1
mean = np.bincount(jdx, weights=v) / np.bincount(jdx)
std = np.asarray(
[
((np.sum((v[jdx == i] - mean[i]) ** 2) * (1 / ((jdx == i).sum() - 1))))
** 0.5
for i in np.unique(jdx)
]
)
mean[std == 0] = 0
std[std == 0] = 1
white = (dm.X - vstack([lil_matrix(mean)] * dm.shape[0])).multiply(
vstack([lil_matrix(1 / std)] * dm.shape[0])
)
dm._X = white.multiply(weights)
return dm
def pca(self, nterms=6, **kwargs):
"""Returns a new `.SparseDesignMatrix` with a smaller number of regressors.
This method will use Principal Components Analysis (PCA) to reduce
the number of columns in the matrix.
Parameters
----------
nterms : int
Number of columns in the new matrix.
Returns
-------
`.SparseDesignMatrix`
A new design matrix with PCA applied.
"""
return super().pca(nterms, **kwargs).to_sparse()
def append_constant(self, prior_mu=0, prior_sigma=np.inf, inplace=False):
"""Returns a new `.SparseDesignMatrix` with a column of ones appended.
Returns
-------
`.SparseDesignMatrix`
New design matrix with a column of ones appended. This column is
named "offset".
"""
if inplace:
dm = self
else:
dm = self.copy()
dm._X = hstack([dm.X, lil_matrix(np.ones(dm.shape[0])).T], format="lil")
dm.prior_mu = np.append(dm.prior_mu, prior_mu)
dm.prior_sigma = np.append(dm.prior_sigma, prior_sigma)
return dm
def __getitem__(self, key):
loc = np.where(np.asarray(self.columns) == key)[0]
if len(loc) == 0:
raise ValueError("No such column as `{}`.".format(key))
return self.X[:, loc].toarray()
def __repr__(self):
return "{} SparseDesignMatrix {}".format(self.name, self.shape)
def collect(self, matrix):
""" Join two designmatrices, return a design matrix collection """
return SparseDesignMatrixCollection([self, matrix])
def to_dense(self):
"""Convert a SparseDesignMatrix object to a dense DesignMatrix
The values of this design matrix will be converted to a
`numpy.ndarray`. This is not recommended for sparse matrices containing
mostly zeros.
"""
return DesignMatrix(
self.values,
name=self.name,
columns=self.columns,
prior_mu=self.prior_mu,
prior_sigma=self.prior_sigma,
)
class SparseDesignMatrixCollection(DesignMatrixCollection):
"""A set of design matrices."""
def __init__(self, matrices):
if not np.all([issparse(m.X) for m in matrices]):
# This collection is designed for sparse matrices, so we raise a warning if a dense DesignMatrix is passed
warnings.warn(
(
"Not all matrices are `SparseDesignMatrix` objects. "
"Dense matrices will be converted to sparse matrices."
),
LightkurveWarning,
)
sparse_matrices = []
for m in matrices:
if isinstance(m, DesignMatrix):
sparse_matrices.append(m.copy().to_sparse())
else:
sparse_matrices.append(m)
self.matrices = sparse_matrices
else:
self.matrices = matrices
self.X = hstack([m.X for m in self.matrices], format="csr")
self._child_class = SparseDesignMatrix
self.validate()
def plot(self, ax=None, **kwargs):
"""Visualize the design matrix values as an image.
Uses Matplotlib's `~lightkurve.utils.plot_image` to visualize the
matrix values.
Parameters
----------
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be created.
**kwargs : dict
Extra parameters to be passed to `.plot_image`.
Returns
-------
`~matplotlib.axes.Axes`
The matplotlib axes object.
"""
temp_dm = SparseDesignMatrix(hstack([d.X for d in self]))
ax = temp_dm.plot(**kwargs)
ax.set_title("Design Matrix Collection")
return ax
def __repr__(self):
return "SparseDesignMatrixCollection:\n" + "".join(
["\t{}\n".format(i.__repr__()) for i in self]
)
####################################################
# Functions to create commonly-used design matrices.
####################################################
def _spline_basis_vector(x, degree, i, knots):
"""Recursive function to create a single spline basis vector for an input x,
for the ith knot.
See https://en.wikipedia.org/wiki/B-spline for a definition of B-spline
basis vectors
Parameters
----------
x : np.ndarray
Input x
degree : int
Degree of spline to calculate basis for
i : int
The index of the knot to calculate the basis for
knots : np.ndarray
Array of all knots
Returns
-------
B : np.ndarray
A vector of same length as x containing the spline basis for the ith knot
"""
if degree == 0:
B = np.zeros(len(x))
B[(x >= knots[i]) & (x <= knots[i + 1])] = 1
else:
da = knots[degree + i] - knots[i]
db = knots[i + degree + 1] - knots[i + 1]
if (knots[degree + i] - knots[i]) != 0:
alpha1 = (x - knots[i]) / da
else:
alpha1 = np.zeros(len(x))
if (knots[i + degree + 1] - knots[i + 1]) != 0:
alpha2 = (knots[i + degree + 1] - x) / db
else:
alpha2 = np.zeros(len(x))
B = (_spline_basis_vector(x, (degree - 1), i, knots)) * (alpha1) + (
_spline_basis_vector(x, (degree - 1), (i + 1), knots)
) * (alpha2)
return B
def create_sparse_spline_matrix(x, n_knots=20, knots=None, degree=3, name="spline"):
"""Creates a piecewise polynomial function, creating a continuous, smooth function in x
See https://en.wikipedia.org/wiki/B-spline for the definitions of Basis Splines
B-spline vectors of degree higher than 0 are created using recursion, using the
`_spline_basis_vector` function to evaluate the basis vectors for x, for each knot.
Parameters
----------
x : np.ndarray
vector to spline
n_knots: int
Number of knots (default: 20).
knots : np.ndarray [optional]
Optional array containing knots
degree: int
Polynomial degree.
name: string
Name to pass to `.SparseDesignMatrix` (default: 'spline').
include_intercept: bool
Whether to include row of ones to find intercept. Default False.
Returns
-------
dm: `.SparseDesignMatrix`
Design matrix object with shape (len(x), n_knots*degree).
"""
# To use jit we have to use float64
x = np.asarray(x, np.float64)
if not isinstance(n_knots, int):
raise ValueError("`n_knots` must be an integer.")
if n_knots - degree <= 0:
raise ValueError("n_knots must be greater than degree.")
if (knots is None) and (n_knots is not None):
knots = np.asarray(
[s[-1] for s in np.array_split(np.argsort(x), n_knots - degree)[:-1]]
)
knots = [np.mean([x[k], x[k + 1]]) for k in knots]
elif (knots is None) and (n_knots is None):
raise ValueError("Pass either `n_knots` or `knots`.")
knots = np.append(np.append(x.min(), knots), x.max())
knots = np.unique(knots)
knots_wbounds = np.append(
np.append([x.min()] * (degree - 1), knots), [x.max()] * (degree)
)
matrices = [
csr_matrix(_spline_basis_vector(x, degree, idx, knots_wbounds))
for idx in np.arange(-1, len(knots_wbounds) - degree - 1)
]
spline_dm = vstack([m for m in matrices if (m.sum() != 0)], format="csr").T
return SparseDesignMatrix(spline_dm, name=name)
def create_spline_matrix(
x, n_knots=20, knots=None, degree=3, name="spline", include_intercept=True
):
"""Returns a `.DesignMatrix` which models splines using `patsy.dmatrix`.
Parameters
----------
x : np.ndarray
vector to spline
n_knots: int
Number of knots (default: 20).
knots: list [optional]
The interior knots to use for the spline. If unspecified, then equally
spaced quantiles of the input data are used such that there are `n_knots` knots.
degree: int
Polynomial degree.
name: string
Name to pass to `.DesignMatrix` (default: 'spline').
include_intercept: bool
Whether to include row of ones to find intercept. Default False.
Returns
-------
dm: `.DesignMatrix`
Design matrix object with shape (len(x), n_knots*degree).
"""
from patsy import dmatrix # local import because it's rarely-used
if knots is not None:
dm_formula = "bs(x, knots={}, degree={}, include_intercept={}) - 1" "".format(
knots, degree, include_intercept
)
spline_dm = np.asarray(dmatrix(dm_formula, {"x": x}))
df = pd.DataFrame(
spline_dm,
columns=["knot{}".format(idx + 1) for idx in range(spline_dm.shape[1])],
)
else:
dm_formula = "bs(x, df={}, degree={}, include_intercept={}) - 1" "".format(
n_knots, degree, include_intercept
)
spline_dm = np.asarray(dmatrix(dm_formula, {"x": x}))
df = pd.DataFrame(
spline_dm, columns=["knot{}".format(idx + 1) for idx in range(n_knots)]
)
return DesignMatrix(df, name=name)
| 34,995
| 34.207243
| 118
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/prf/tpfmodel.py
|
"""Provides tools to model a Kepler image for PRF photometry fitting.
Examples
--------
%matplotlib inline
import numpy as np
from lightkurve import KeplerTargetPixelFile, LightCurve
from lightkurve.prf import StarPrior, BackgroundPrior, FocusPrior, MotionPrior, TPFModel, PRFPhotometry
from oktopus import GaussianPrior, UniformPrior
tpf = KeplerTargetPixelFile("https://archive.stsci.edu/missions/kepler/target_pixel_files/0084/008462852/"
"kplr008462852-2013098041711_lpd-targ.fits.gz", quality_mask='hardest')
# First, compute a few values from our TPF which will inform the priors
bgflux = np.nanpercentile(tpf.flux[0], 10)
maxflux = np.nansum(tpf.flux, axis=(1, 2)).max()
col, row = np.nanmedian(tpf.centroids(), axis=1)
# Set up the model
model = TPFModel(star_priors=[StarPrior(col=GaussianPrior(mean=col, var=2**2),
row=GaussianPrior(mean=row, var=2**2),
flux=UniformPrior(lb=0, ub=maxflux),
targetid=tpf.keplerid)],
background_prior=BackgroundPrior(flux=GaussianPrior(mean=bgflux, var=bgflux)),
focus_prior=FocusPrior(scale_col=GaussianPrior(mean=1, var=0.0001),
scale_row=GaussianPrior(mean=1, var=0.0001),
rotation_angle=UniformPrior(lb=-3.1415, ub=3.1415)),
motion_prior=MotionPrior(shift_col=GaussianPrior(mean=0., var=0.01),
shift_row=GaussianPrior(mean=0., var=0.01)),
prfmodel=tpf.get_prf_model(),
fit_background=True,
fit_focus=False,
fit_motion=False)
pp = PRFPhotometry(model)
pp.run(tpf.flux, pos_corr1=tpf.pos_corr1, pos_corr2=tpf.pos_corr2, cadences=range(1650, 1850))
pp.plot_results()
print('The star flux in the first cadence is {}'.format(pp.results[0].stars[0].flux))
"""
from __future__ import division, print_function
import logging
from matplotlib import pyplot as plt
import numpy as np
from tqdm import tqdm
import warnings
from oktopus import Prior, GaussianPrior, UniformPrior, PoissonPosterior
from .prfmodel import KeplerPRF
from ..utils import plot_image
__all__ = [
"GaussianPrior",
"UniformPrior",
"FixedValuePrior",
"StarPrior",
"BackgroundPrior",
"FocusPrior",
"MotionPrior",
"StarParameters",
"BackgroundParameters",
"FocusParameters",
"MotionParameters",
"TPFModelParameters",
"TPFModel",
"PRFPhotometry",
]
log = logging.getLogger(__name__)
class FixedValuePrior(Prior):
"""An improper prior with a negative log probability of 0 at a single fixed
value and inf elsewhere. This is similar to a Dirac Delta function,
except this function does not peak at infinity so that it can be used
in numerical optimization functions. It does not integrate to one as a
result and is therefore an "improper distribution".
Attributes
----------
value : int or array-like of ints
The fixed value.
Examples
--------
>>> fp = FixedValuePrior(1)
>>> fp(1)
-0.0
>>> fp(0.5)
inf
"""
def __init__(self, value, name=None):
self.value = np.asarray([value]).reshape(-1)
self.name = name
def __repr__(self):
return "<FixedValuePrior(value={})>".format(self.value)
@property
def mean(self):
"""Returns the fixed value."""
return self.value
@property
def variance(self):
"""Returns zero."""
return 0
def evaluate(self, params):
"""Returns the negative log pdf."""
if self.value == params:
return -0.0
return np.inf
def gradient(self, params):
raise NotImplementedError()
class PriorContainer(object):
"""Container object to hold parameter priors for PRF photometry."""
def _parse_prior(self, prior):
if isinstance(prior, Prior):
return prior
return FixedValuePrior(value=prior)
def __call__(self, *params):
"""Calls :func:`evaluate`"""
return self.evaluate(*params)
class StarPrior(PriorContainer):
"""Container class to capture a user's beliefs about a star's position and flux.
Examples
--------
StarPrior(col=GaussianPrior(mean=col, var=err_col**2),
row=GaussianPrior(mean=row, var=err_row**2),
flux=GaussianPrior(mean=flux, var=err_flux**2))
"""
def __init__(self, col, row, flux=UniformPrior(lb=0, ub=1e10), targetid=None):
self.col = self._parse_prior(col)
self.row = self._parse_prior(row)
self.flux = self._parse_prior(flux)
self.targetid = targetid
def __repr__(self):
return "<StarPrior(\n col={}\n row={}\n flux={}\n targetid={})>" "".format(
self.col, self.row, self.flux, self.targetid
)
def evaluate(self, col, row, flux):
"""Evaluate the prior probability of a star of a given flux being at
a given row and col.
"""
logp = (
self.col.evaluate(col) + self.row.evaluate(row) + self.flux.evaluate(flux)
)
return logp
class BackgroundPrior(PriorContainer):
"""Container class to capture a user's beliefs about the background flux.
Parameters
----------
flux : oktopus ``Prior`` object
Prior on the background flux in electrons/second per pixel.
"""
def __init__(self, flux=FixedValuePrior(value=0)):
self.flux = self._parse_prior(flux)
def __repr__(self):
return "<BackgroundPrior(\n flux={})>".format(self.flux)
def evaluate(self, flux):
"""Returns the prior probability for a given background flux value."""
return self.flux.evaluate(flux)
class FocusPrior(PriorContainer):
"""Container class to capture a user's beliefs about the telescope focus.
Parameters
----------
scale_col, scale_row : oktopus ``Prior`` object
Pixel scale in the column and row directions. Typically close to one.
rotation_angle : oktopus ``Prior`` object
Rotation angle in radians. Typically zero.
"""
def __init__(
self,
scale_col=GaussianPrior(mean=1, var=0.0001),
scale_row=GaussianPrior(mean=1, var=0.0001),
rotation_angle=UniformPrior(lb=-3.1415, ub=3.1415),
):
self.scale_col = self._parse_prior(scale_col)
self.scale_row = self._parse_prior(scale_row)
self.rotation_angle = self._parse_prior(rotation_angle)
def __repr__(self):
return (
"<FocusPrior(\n scale_col={}\n scale_row={}\n rotation_angle={})>"
"".format(self.scale_col, self.scale_row, self.rotation_angle)
)
def evaluate(self, scale_col, scale_row, rotation_angle):
"""Returns the prior probability for a gien set of focus parameters."""
logp = (
self.scale_col.evaluate(scale_col)
+ self.scale_row.evaluate(scale_row)
+ self.rotation_angle.evaluate(rotation_angle)
)
return logp
class MotionPrior(PriorContainer):
"""Container class to capture a user's beliefs about the telescope motion."""
def __init__(
self,
shift_col=GaussianPrior(mean=0, var=1.0 ** 2),
shift_row=GaussianPrior(mean=0, var=1.0 ** 2),
):
self.shift_col = self._parse_prior(shift_col)
self.shift_row = self._parse_prior(shift_row)
def __repr__(self):
return "<MotionPrior(\n shift_col={}\n shift_row={})>" "".format(
self.shift_col, self.shift_row
)
def evaluate(self, shift_col, shift_row):
"""Returns the prior probability for a gien set of motion parameters."""
logp = self.shift_col.evaluate(shift_col) + self.shift_row.evaluate(shift_row)
return logp
class StarParameters(object):
"""Container class to hold the parameters of a star in a ``TPFModel``."""
def __init__(
self, col, row, flux, err_col=None, err_row=None, err_flux=None, targetid=None
):
self.col = col
self.row = row
self.flux = flux
self.targetid = targetid
def __repr__(self):
r = "<StarParameters(\n col={}\n row={}\n flux={}\n targetid={})>".format(
self.col, self.row, self.flux, self.targetid
)
return r
class BackgroundParameters(object):
"""Container class to hold the parameters of the background in a ``TPFModel``."""
def __init__(self, flux=0.0, err_flux=None, fitted=True):
self.flux = flux
self.err_flux = err_flux
self.fitted = fitted
def __repr__(self):
r = "<BackgroundParameters(\n flux={}\n fitted={})>".format(
self.flux, self.fitted
)
return r
class FocusParameters(object):
"""Container class to hold the parameters of the telescope focus in a ``TPFModel``."""
def __init__(self, scale_col=1.0, scale_row=1.0, rotation_angle=0.0, fitted=False):
self.scale_col = scale_col
self.scale_row = scale_row
self.rotation_angle = rotation_angle
self.fitted = fitted
def __repr__(self):
return (
"<FocusParameters(\n scale_col={}\n scale_row={}\n "
"rotation_angle={}\n fitted={})>"
"".format(self.scale_col, self.scale_row, self.rotation_angle, self.fitted)
)
class MotionParameters(object):
"""Container class to hold the parameters of the telescope motion in a ``TPFModel``."""
def __init__(self, shift_col=0.0, shift_row=0.0, fitted=False):
self.shift_col = shift_col
self.shift_row = shift_row
self.fitted = fitted
def __repr__(self):
return (
"<MotionParameters(\n shift_col={}\n shift_row={}\n fitted={})>".format(
self.shift_col, self.shift_row, self.fitted
)
)
class TPFModelParameters(object):
"""Container class to combine all parameters that parameterize a ``TPFModel``.
Attributes
----------
stars : list of ``StarParameters`` objects
Parameters related to the stars in the model.
background : ``BackgroundParameters`` object
Parameters related to the background flux.
focus : ``FocusParameters`` object
Parameters related to the telescope focus.
motion : ``MotionParameters`` object
Parameters related to the telescope motion.
"""
def __init__(
self,
stars=None,
background=BackgroundParameters(),
focus=FocusParameters(),
motion=MotionParameters(),
):
if stars is None:
stars = []
self.stars = stars
self.background = background
self.focus = focus
self.motion = motion
def __repr__(self):
out = super(TPFModelParameters, self).__repr__() + "\n"
out += "".join(
[" {}\n".format(str(star).replace("\n", "\n ")) for star in self.stars]
)
out += " " + str(self.background).replace("\n", "\n ") + "\n"
out += " " + str(self.focus).replace("\n", "\n ") + "\n"
out += " " + str(self.motion).replace("\n", "\n ") + "\n"
if "residual_image" in vars(self):
out += " residual_image:\n {}".format(self.residual_image[0][0:4])[:-1]
out += "...\n"
if "predicted_image" in vars(self):
out += " predicted_image:\n {}".format(self.predicted_image[0][0:4])[
:-1
]
out += "...\n"
return out
def to_array(self):
"""Converts the free parameters held by this class to an array of size (n,),
where n is the number of free parameters.
This method exists because `scipy.optimize` can only optimize arrays of
real numbers, yet we like to store in the parameters in human-friendly
container classes to make the fitted parameters accessible without
confusion.
Returns
-------
array : array-like
Array containing all the free parameters.
"""
array = []
for star in self.stars:
array.append(star.col)
array.append(star.row)
array.append(star.flux)
if self.background.fitted:
array.append(self.background.flux)
if self.focus.fitted:
array.append(self.focus.scale_col)
array.append(self.focus.scale_row)
array.append(self.focus.rotation_angle)
if self.motion.fitted:
array.append(self.motion.shift_col)
array.append(self.motion.shift_row)
return np.array(array).ravel()
def from_array(self, array):
"""Inverse of ``to_array()``."""
array = np.asarray(array).ravel()
next_idx = 0
stars = []
for staridx in range(len(self.stars)):
star = StarParameters(
col=array[next_idx], row=array[next_idx + 1], flux=array[next_idx + 2]
)
stars.append(star)
next_idx += 3
if not self.background.fitted:
background = self.background
else:
background = BackgroundParameters(flux=array[next_idx], fitted=True)
next_idx += 1
if not self.focus.fitted:
focus = self.focus
else:
focus = FocusParameters(
scale_col=array[next_idx],
scale_row=array[next_idx + 1],
rotation_angle=array[next_idx + 2],
fitted=True,
)
next_idx += 3
if not self.motion.fitted:
motion = self.motion
else:
motion = MotionParameters(
shift_col=array[next_idx], shift_row=array[next_idx + 1], fitted=True
)
return TPFModelParameters(
stars=stars, background=background, focus=focus, motion=motion
)
class TPFModel(object):
"""A model which describes a single-cadence Kepler image.
Attributes
----------
star_priors : list of ``StarPrior`` objects.
List of stars believed to be in the image.
background_prior : ``BackgroundPrior`` object.
Beliefs about the background flux.
prfmodel : ``KeplerPRF`` object.
The callable Pixel Reponse Function (PRF) model to use.
focus_prior : ``FocusPrior`` object.
Beliefs about the telescope focus.
motion_prior : ``MotionPrior`` object.
Beliefs about the telescope motion.
fit_background : bool
If False, the background parameters will be kept fixed.
fit_focus : bool
If False, the telescope focus parameters will be kept fixed.
fit_motion : bool
If False, the telescope motion parameters will be kept fixed.
"""
def __init__(
self,
star_priors=None,
background_prior=BackgroundPrior(),
focus_prior=FocusPrior(),
motion_prior=MotionPrior(),
prfmodel=None,
fit_background=True,
fit_focus=False,
fit_motion=False,
):
if star_priors is None:
star_priors = []
if prfmodel is None:
prfmodel = KeplerPRF(1, shape=(10, 10), column=0, row=0)
self.star_priors = star_priors
self.background_prior = background_prior
self.focus_prior = focus_prior
self.motion_prior = motion_prior
self.prfmodel = prfmodel
self.fit_background = fit_background
self.fit_focus = fit_focus
self.fit_motion = fit_motion
self._params = self.get_initial_guesses()
def __repr__(self):
out = super(TPFModel, self).__repr__() + "\n"
out += "".join(
[
" {}\n".format(str(star).replace("\n", "\n "))
for star in self.star_priors
]
)
out += " " + str(self.background_prior).replace("\n", "\n ") + "\n"
out += " " + str(self.focus_prior).replace("\n", "\n ") + "\n"
out += " " + str(self.motion_prior).replace("\n", "\n ") + "\n"
out += " " + str(self.prfmodel).replace("\n", "\n ") + "\n"
out += " fit_background={}\n fit_focus={}\n fit_motion={}\n".format(
self.fit_background, self.fit_focus, self.fit_motion
)
return out
def get_initial_guesses(self):
"""Returns the prior means which can be used to initialize the model.
The guesses are obtained by taking the means of the priors.
"""
initial_star_guesses = []
for star in self.star_priors:
initial_star_guesses.append(
StarParameters(
col=star.col.mean, row=star.row.mean, flux=star.flux.mean
)
)
background = BackgroundParameters(
flux=self.background_prior.flux.mean, fitted=self.fit_background
)
focus = FocusParameters(
scale_col=self.focus_prior.scale_col.mean,
scale_row=self.focus_prior.scale_row.mean,
rotation_angle=self.focus_prior.rotation_angle.mean,
fitted=self.fit_focus,
)
motion = MotionParameters(
shift_col=self.motion_prior.shift_col.mean,
shift_row=self.motion_prior.shift_row.mean,
fitted=self.fit_motion,
)
initial_params = TPFModelParameters(
stars=initial_star_guesses,
background=background,
focus=focus,
motion=motion,
)
return initial_params
def predict(self, params=None):
"""Returns a synthetic Kepler image given a set of model parameters.
Attributes
----------
params : ```TPFModelParameters``` object
Parameters which define the model.
Returns
-------
synthetic_image : 2D ndarray
Predicted image given the parameters.
"""
if params is None:
params = self.get_initial_guesses()
star_images = []
for star in params.stars:
star_images.append(
self.prfmodel(
center_col=star.col + params.motion.shift_col,
center_row=star.row + params.motion.shift_row,
flux=star.flux,
scale_col=params.focus.scale_col,
scale_row=params.focus.scale_row,
rotation_angle=params.focus.rotation_angle,
)
)
synthetic_image = np.sum(star_images, axis=0) + params.background.flux
return synthetic_image
def _predict(self, *params_array):
"""Wrapper around ``predict()`` which takes an array of shape (n,)
where n is the number of free parameters.
Unlike ``predict()`, this function can be called by scipy.optimize.
"""
params = self._params.from_array(params_array)
return self.predict(params)
def __call__(self, *params_array):
return self._predict(*params_array)
def gradient(self, *params_array):
"""UNFINISHED WORK!"""
params = self._params.from_array(params_array)
grad = []
for star in params.stars:
grad.append(
self.prfmodel.gradient(
center_col=star.col, center_row=star.row, flux=star.flux
)
)
# We assume the background gradient is proportional to one
grad.append([np.ones(self.prfmodel.shape)])
# We assume the gradient of other parameters is one
for i in range(len([params_array]) - 3 * len(params.stars) - 1):
grad.append([np.ones(self.prfmodel.shape)])
grad = sum(grad, [])
return grad
def logp_prior(self, params):
"""Evaluates the prior at a point in the parameter space.
Attributes
----------
params : TPFModelParameters object
"""
logp = 0
for star, star_prior in zip(params.stars, self.star_priors):
logp += star_prior.evaluate(col=star.col, row=star.row, flux=star.flux)
if self.fit_background:
logp += self.background_prior.evaluate(params.background.flux)
if self.fit_focus:
logp += self.focus_prior.evaluate(
params.focus.scale_col,
params.focus.scale_row,
params.focus.rotation_angle,
)
if self.fit_motion:
logp += self.motion_prior.evaluate(
params.motion.shift_col, params.motion.shift_row
)
return logp
def _logp_prior(self, params_array):
"""Wrapper around ``logp_prior()`` which takes an array of shape (n,)
where n is the number of free parameters.
Unlike ``predict()`, this function can be called by scipy.optimize.
"""
params = self._params.from_array(params_array)
return self.logp_prior(params)
def fit(
self,
data,
loss_function=PoissonPosterior,
method="powell",
pos_corr1=None,
pos_corr2=None,
**kwargs
):
"""Fits the model to the data.
Parameters
----------
data : array-like
The pixel data for a single cadence, i.e. the data obtained using
``KeplerTargetPixelFile.flux[cadenceno]``.
loss_function : subclass of oktopus.LossFunction
Noise distribution associated with each random measurement
kwargs : dict
Dictionary of additional parameters to be passed to
`scipy.optimize.minimize`.
Returns
-------
result : ``TPFModelParameters`` object
Fitted parameters plus fitting diagnostics.
"""
if pos_corr1 is not None and np.abs(pos_corr1) < 50:
self.motion_prior.shift_col.mean = pos_corr1
if pos_corr2 is not None and np.abs(pos_corr2) < 50:
self.motion_prior.shift_row.mean = pos_corr2
self._params = self.get_initial_guesses() # Update _params for model changes!
loss = loss_function(data, self, prior=self._logp_prior)
with warnings.catch_warnings():
# Ignore RuntimeWarnings trigged by invalid values
warnings.simplefilter("ignore", RuntimeWarning)
fit = loss.fit(
x0=self.get_initial_guesses().to_array(), method=method, **kwargs
)
result = self._params.from_array(fit.x)
# NOTE: uncertainties are not available for now because `self.gradient` is unfinished;
# hence, the line below is commented out for now:
# result.uncertainties = loss.loglikelihood.uncertainties(fit.x)
result.predicted_image = self._predict(fit.x)
result.residual_image = data - result.predicted_image
result.loss_value = fit.fun
result.opt_result = fit
return result
def plot(self, *params, **kwargs):
"""Plots an image of the model for a given point in the parameter space."""
img = self.predict(*params)
plot_image(
img,
title="TPF Model",
extent=(
self.prfmodel.column,
self.prfmodel.column + self.prfmodel.shape[1],
self.prfmodel.row,
self.prfmodel.row + self.prfmodel.shape[0],
),
**kwargs
)
def plot_diagnostics(self, data, figsize=(12, 4), *params, **kwargs):
"""Plots an image of the model for a given point in the parameter space."""
fig, ax = plt.subplots(nrows=1, ncols=3, figsize=figsize)
fit = self.fit(data)
extent = (
self.prfmodel.column,
self.prfmodel.column + self.prfmodel.shape[1],
self.prfmodel.row,
self.prfmodel.row + self.prfmodel.shape[0],
)
plot_image(
data,
ax=ax[0],
title="Observed Data, Channel: {}".format(self.prfmodel.channel),
extent=extent,
**kwargs
)
plot_image(
fit.predicted_image,
ax=ax[1],
title="Predicted Image, Channel: {}".format(self.prfmodel.channel),
extent=extent,
**kwargs
)
plot_image(
fit.residual_image,
ax=ax[2],
title="Residual Image, Channel: {}".format(self.prfmodel.channel),
extent=extent,
**kwargs
)
return fit
class PRFPhotometry(object):
"""This class performs PRF Photometry on TPF-like data given a ``TPFModel``.
This class exists because a ``TPFModel`` object is designed to fit only
one cadence at a time. This class makes it easy to fit a large number
of cadences and obtain the resulting LightCurve.
Attributes
----------
model : instance of TPFModel
Model which will be fit to the data
"""
def __init__(self, model):
self.model = model
self.results = []
def run(
self, tpf_flux, cadences=None, pos_corr1=None, pos_corr2=None, parallel=True
):
"""Fits the model to the flux data.
Parameters
----------
tpf_flux : array-like
A pixel flux time-series, i.e., the pixel data, e.g,
KeplerTargetPixelFile.flux, such that (time, row, column) represents
the shape of ``tpf_flux``.
cadences : array-like
Cadences to fit. If `None` (default) then all cadences will be fit.
pos_corr1, pos_corr2 : array-like, array-like
If set, use these values to update the prior means for
`model.motion_prior.shift_col` and `model.motion_prior.shift_row`
for each cadence.
parallel : boolean
If `True`, cadences will be fit in parallel using Python's `multiprocessing` module.
"""
if cadences is None: # By default, fit all cadences.
cadences = np.arange(len(tpf_flux))
# Prepare an iterable of arguments, such that each item contains all information
# needed to fit a single cadence. This will enable parallel processing below.
tpf_flux = np.asarray(tpf_flux) # Ensure the flux data can be indexed
if pos_corr1 is None or pos_corr2 is None:
args = zip([self.model] * len(cadences), tpf_flux[cadences])
else:
args = zip(
[self.model] * len(cadences),
tpf_flux[cadences],
pos_corr1[cadences],
pos_corr2[cadences],
)
# Set up a mapping function
if parallel:
import multiprocessing
pool = multiprocessing.Pool()
mymap = pool.imap
else:
import itertools
mymap = itertools.imap
# Now fit all cadences using the mapping function and the list of arguments
self.results = []
for result in tqdm(
mymap(fit_one_cadence, args), desc="Fitting cadences", total=len(cadences)
):
self.results.append(result)
if parallel:
pool.close()
# Parse results
self.lightcurves = [
self._parse_lightcurve(star_idx)
for star_idx in range(len(self.model.star_priors))
]
def _parse_lightcurve(self, star_idx):
# Create a lightcurve
from .. import LightCurve
flux = []
for cadence in range(len(self.results)):
flux.append(self.results[cadence].stars[star_idx].flux)
return LightCurve(flux=flux, targetid=self.model.star_priors[star_idx].targetid)
def _parse_background(self):
# Create a lightcurve
from .. import LightCurve
bgflux = []
for cadence in range(len(self.results)):
bgflux.append(self.results[cadence].background.flux)
return LightCurve(flux=bgflux)
def plot_results(self, star_idx=0):
"""Plot all the TPF model parameters over time."""
fig, ax = plt.subplots(10, sharex=True, figsize=(6, 12))
x = range(len(self.results))
ax[0].plot(x, [r.stars[star_idx].flux for r in self.results])
ax[0].set_ylabel("Flux")
ax[1].plot(x, [r.stars[star_idx].col for r in self.results])
ax[1].set_ylabel("Col")
ax[2].plot(x, [r.stars[star_idx].row for r in self.results])
ax[2].set_ylabel("Row")
ax[3].plot(x, [r.motion.shift_col for r in self.results])
ax[3].set_ylabel("Shift col")
ax[4].plot(x, [r.motion.shift_row for r in self.results])
ax[4].set_ylabel("Shift row")
ax[5].plot(x, [r.background.flux for r in self.results])
ax[5].set_ylabel("Background")
ax[6].plot(x, [r.focus.scale_col for r in self.results])
ax[6].set_ylabel("Focus col")
ax[7].plot(x, [r.focus.scale_row for r in self.results])
ax[7].set_ylabel("Focus row")
ax[8].plot(x, [r.focus.rotation_angle for r in self.results])
ax[8].set_ylabel("Focus angle")
ax[9].plot(x, [r.loss_value for r in self.results])
ax[9].set_ylabel("Loss")
def fit_one_cadence(arg):
"""Helper function to enable parallelism.
This function is used by PRFPhotometry.run().
"""
model = arg[0]
data = arg[1]
if len(arg) == 4:
pos_corr1, pos_corr2 = arg[2], arg[3]
else:
pos_corr1, pos_corr2 = None, None
return model.fit(data, pos_corr1=pos_corr1, pos_corr2=pos_corr2)
| 29,717
| 33.880282
| 106
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/prf/prfmodel.py
|
"""Provides callable models of the Kepler Pixel Response Function (PRF)."""
from __future__ import division, print_function
import math
from astropy.io import fits as pyfits
import numpy as np
import scipy
import scipy.interpolate
from ..utils import channel_to_module_output, plot_image
__all__ = ["KeplerPRF", "SimpleKeplerPRF"]
class KeplerPRF(object):
"""
Kepler's Pixel Response Function as designed by [1]_.
This class provides the necessary interface to load Kepler PRF
calibration files and to create a model that can be fit as a function
of flux, center positions, width, and rotation angle.
Attributes
----------
channel : int
KeplerTargetPixelFile.channel
shape : (int, int)
KeplerTargetPixelFile.shape[1:]
column : int
KeplerTargetPixelFile.column
row : int
KeplerTargetPixelFile.row
Examples
--------
Objects from the KeplerPRF class are defined by a channel number, a pair of
dimensions (the size of the image), and a reference coordinate (bottom left
corner). In this example, we create a KeplerPRF object located at channel
#44 with dimension equals 10 x 10, reference row and column coordinate
equals (5, 5). After the object has been created, we may translate it to a
given center coordinate. Additionally, we can specify total flux, pixel
scales, and rotation around the object's center.
>>> import math
>>> import matplotlib.pyplot as plt
>>> from lightkurve.prf import KeplerPRF
>>> kepprf = KeplerPRF(channel=44, shape=(10, 10), column=5, row=5) # doctest: +SKIP
Downloading http://archive.stsci.edu/missions/kepler/fpc/prf/kplr13.4_2011265_prf.fits [Done]
>>> prf = kepprf(flux=1000, center_col=10, center_row=10,
... scale_row=0.7, scale_col=0.7, rotation_angle=math.pi/2) # doctest: +SKIP
>>> plt.imshow(prf, origin='lower') # doctest: +SKIP
References
----------
.. [1] S. T. Bryson. The Kepler Pixel Response Function, 2010.
<https://arxiv.org/abs/1001.0331>.
"""
def __init__(self, channel, shape, column, row):
self.channel = channel
self.shape = shape
self.column = column
self.row = row
(
self.col_coord,
self.row_coord,
self.interpolate,
self.supersampled_prf,
) = self._prepare_prf()
def __call__(
self, center_col, center_row, flux, scale_col, scale_row, rotation_angle
):
return self.evaluate(
center_col, center_row, flux, scale_col, scale_row, rotation_angle
)
def evaluate(
self,
center_col,
center_row,
flux=1.0,
scale_col=1.0,
scale_row=1.0,
rotation_angle=0.0,
):
"""
Interpolates the PRF model onto detector coordinates.
Parameters
----------
center_col, center_row : float
Column and row coordinates of the center
flux : float
Total integrated flux of the PRF
scale_col, scale_row : float
Pixel scale stretch parameter, i.e. the numbers by which the PRF
model needs to be multiplied in the column and row directions to
account for focus changes
rotation_angle : float
Rotation angle in radians
Returns
-------
prf_model : 2D array
Two dimensional array representing the PRF values parametrized
by flux, centroids, widths, and rotation.
"""
cosa = math.cos(rotation_angle)
sina = math.sin(rotation_angle)
delta_col = self.col_coord - center_col
delta_row = self.row_coord - center_row
delta_col, delta_row = np.meshgrid(delta_col, delta_row)
rot_row = delta_row * cosa - delta_col * sina
rot_col = delta_row * sina + delta_col * cosa
self.prf_model = flux * self.interpolate(
rot_row.flatten() * scale_row, rot_col.flatten() * scale_col, grid=False
).reshape(self.shape)
return self.prf_model
def gradient(
self,
center_col,
center_row,
flux=1.0,
scale_col=1.0,
scale_row=1.0,
rotation_angle=0.0,
):
"""
This function returns the gradient of the KeplerPRF model with
respect to center_col, center_row, flux, scale_col, scale_row,
and rotation_angle.
Parameters
----------
center_col, center_row : float
Column and row coordinates of the center
flux : float
Total integrated flux of the PRF
scale_col, scale_row : float
Pixel scale stretch parameter, i.e. the numbers by which the PRF
model needs to be multiplied in the column and row directions to
account for focus changes
rotation_angle : float
Rotation angle in radians
Returns
-------
grad_prf : list
Returns a list of arrays where the elements are the partial derivatives
of the KeplerPRF model with respect to center_col, center_row, flux, scale_col,
scale_row, and rotation_angle, respectively.
"""
cosa = math.cos(rotation_angle)
sina = math.sin(rotation_angle)
delta_col = self.col_coord - center_col
delta_row = self.row_coord - center_row
delta_col, delta_row = np.meshgrid(delta_col, delta_row)
rot_row = delta_row * cosa - delta_col * sina
rot_col = delta_row * sina + delta_col * cosa
# for a proof of the maths that follow, see the pdf attached
# on pull request #198 in lightkurve GitHub repo.
deriv_flux = self.interpolate(
rot_row.flatten() * scale_row, rot_col.flatten() * scale_col, grid=False
).reshape(self.shape)
interp_dy = self.interpolate(
rot_row.flatten() * scale_row,
rot_col.flatten() * scale_col,
grid=False,
dy=1,
).reshape(self.shape)
interp_dx = self.interpolate(
rot_row.flatten() * scale_row,
rot_col.flatten() * scale_col,
grid=False,
dx=1,
).reshape(self.shape)
scale_row_times_interp_dx = scale_row * interp_dx
scale_col_times_interp_dy = scale_col * interp_dy
deriv_center_col = -flux * (
cosa * scale_col_times_interp_dy - sina * scale_row_times_interp_dx
)
deriv_center_row = -flux * (
sina * scale_col_times_interp_dy + cosa * scale_row_times_interp_dx
)
deriv_scale_row = flux * interp_dx * rot_row
deriv_scale_col = flux * interp_dy * rot_col
deriv_rotation_angle = flux * (
interp_dy * scale_col * (delta_row * cosa - delta_col * sina)
- interp_dx * scale_row * (delta_row * sina + delta_col * cosa)
)
return [
deriv_center_col,
deriv_center_row,
deriv_flux,
deriv_scale_col,
deriv_scale_row,
deriv_rotation_angle,
]
def _read_prf_calibration_file(self, path, ext):
prf_cal_file = pyfits.open(path)
data = prf_cal_file[ext].data
# looks like these data below are the same for all prf calibration files
crval1p = prf_cal_file[ext].header["CRVAL1P"]
crval2p = prf_cal_file[ext].header["CRVAL2P"]
cdelt1p = prf_cal_file[ext].header["CDELT1P"]
cdelt2p = prf_cal_file[ext].header["CDELT2P"]
prf_cal_file.close()
return data, crval1p, crval2p, cdelt1p, cdelt2p
def _prepare_prf(self):
n_hdu = 5
min_prf_weight = 1e-6
module, output = channel_to_module_output(self.channel)
# determine suitable PRF calibration file
if module < 10:
prefix = "kplr0"
else:
prefix = "kplr"
prfs_url_path = "http://archive.stsci.edu/missions/kepler/fpc/prf/"
prffile = (
prfs_url_path
+ prefix
+ str(module)
+ "."
+ str(output)
+ "_2011265_prf.fits"
)
# read PRF images
prfn = [0] * n_hdu
crval1p = np.zeros(n_hdu, dtype="float32")
crval2p = np.zeros(n_hdu, dtype="float32")
cdelt1p = np.zeros(n_hdu, dtype="float32")
cdelt2p = np.zeros(n_hdu, dtype="float32")
for i in range(n_hdu):
(
prfn[i],
crval1p[i],
crval2p[i],
cdelt1p[i],
cdelt2p[i],
) = self._read_prf_calibration_file(prffile, i + 1)
prfn = np.array(prfn)
PRFcol = np.arange(0.5, np.shape(prfn[0])[1] + 0.5)
PRFrow = np.arange(0.5, np.shape(prfn[0])[0] + 0.5)
PRFcol = (PRFcol - np.size(PRFcol) / 2) * cdelt1p[0]
PRFrow = (PRFrow - np.size(PRFrow) / 2) * cdelt2p[0]
# interpolate the calibrated PRF shape to the target position
rowdim, coldim = self.shape[0], self.shape[1]
prf = np.zeros(np.shape(prfn[0]), dtype="float32")
ref_column = self.column + 0.5 * coldim
ref_row = self.row + 0.5 * rowdim
for i in range(n_hdu):
prf_weight = math.sqrt(
(ref_column - crval1p[i]) ** 2 + (ref_row - crval2p[i]) ** 2
)
if prf_weight < min_prf_weight:
prf_weight = min_prf_weight
prf += prfn[i] / prf_weight
prf /= np.nansum(prf) * cdelt1p[0] * cdelt2p[0]
# location of the data image centered on the PRF image (in PRF pixel units)
col_coord = np.arange(self.column + 0.5, self.column + coldim + 0.5)
row_coord = np.arange(self.row + 0.5, self.row + rowdim + 0.5)
# x-axis correspond to row-axis in scipy.RectBivariate
# not to be confused with our convention, in which the
# x-axis correspond to the column-axis
interpolate = scipy.interpolate.RectBivariateSpline(PRFrow, PRFcol, prf)
return col_coord, row_coord, interpolate, prf
def plot(self, *params, **kwargs):
pflux = self.evaluate(*params)
plot_image(
pflux,
title="Kepler PRF Model, Channel: {}".format(self.channel),
extent=(
self.column,
self.column + self.shape[1],
self.row,
self.row + self.shape[0],
),
**kwargs
)
class SimpleKeplerPRF(KeplerPRF):
"""
Simple model of KeplerPRF.
This class provides identical functionality as in KeplerPRF, except that
it is parametrized only by flux and center positions. The width scales
and angle are fixed to 1.0 and 0, respectivelly.
"""
def __call__(self, center_col, center_row, flux=1.0):
return self.evaluate(center_col, center_row, flux)
def evaluate(self, center_col, center_row, flux=1.0):
"""
Interpolates the PRF model onto detector coordinates.
Parameters
----------
flux : float
Total integrated flux of the PRF
center_col, center_row : float
Column and row coordinates of the center
Returns
-------
prf_model : 2D array
Two dimensional array representing the PRF values parametrized
by flux and centroids.
"""
delta_col = self.col_coord - center_col
delta_row = self.row_coord - center_row
self.prf_model = flux * self.interpolate(delta_row, delta_col)
return self.prf_model
def gradient(self, center_col, center_row, flux):
"""
This function returns the gradient of the SimpleKeplerPRF model with
respect to flux, center_col, and center_row.
Parameters
----------
center_col, center_row : float
Column and row coordinates of the center
flux : float
Total integrated flux of the PRF
Returns
-------
grad_prf : list
Returns a list of arrays where the elements are the derivative
of the KeplerPRF model with respect to center_col, center_row,
and flux, respectively.
"""
delta_col = self.col_coord - center_col
delta_row = self.row_coord - center_row
deriv_flux = self.interpolate(delta_row, delta_col)
deriv_center_col = -flux * self.interpolate(delta_row, delta_col, dy=1)
deriv_center_row = -flux * self.interpolate(delta_row, delta_col, dx=1)
return [deriv_center_col, deriv_center_row, deriv_flux]
| 12,737
| 33.334232
| 97
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/prf/__init__.py
|
from .prfmodel import *
from .tpfmodel import *
| 48
| 15.333333
| 23
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/seismology/stellar_estimators.py
|
"""Functions to estimate stellar parameters (radius, mass, logg) using
asteroseismic scaling relations.
"""
from uncertainties import ufloat, umath
from astropy import units as u
from astropy import constants as const
from .utils import SeismologyQuantity
__all__ = ["estimate_radius", "estimate_mass", "estimate_logg"]
"""Global parameters for the sun"""
NUMAX_SOL = ufloat(3090, 30) # microhertz | Huber et al. 2011
DELTANU_SOL = ufloat(135.1, 0.1) # microhertz | Huber et al. 2011
TEFF_SOL = ufloat(5772.0, 0.8) # Kelvin | Prsa et al. 2016
G_SOL = ((const.G * const.M_sun) / (const.R_sun) ** 2).to(u.cm / u.second ** 2) # cms^2
def estimate_radius(
numax, deltanu, teff, numax_err=None, deltanu_err=None, teff_err=None
):
"""Returns a stellar radius estimate based on the scaling relations.
The two global observable seismic parameters, numax and deltanu, along with
temperature, scale with fundamental stellar properties (Brown et al. 1991;
Kjeldsen & Bedding 1995). These scaling relations can be rearranged to
calculate a stellar radius as
R = Rsol * (numax/numax_sol)(deltanu/deltanusol)^-2(Teff/Teffsol)^0.5
where R is the radius and Teff is the effective temperature, and the suffix
'sol' indicates a solar value. In this method we use the solar values for
numax and deltanu as given in Huber et al. (2011) and for Teff as given in
Prsa et al. (2016).
This code structure borrows from work done in Bellinger et al. (2019), which
also functions as an accessible explanation of seismic scaling relations.
If no value of effective temperature is given, this function will check the
meta data of the `Periodogram` object used to create the `Seismology` object.
These data will often contain an effective tempearture from the Kepler Input
Catalogue (KIC, https://ui.adsabs.harvard.edu/abs/2011AJ....142..112B/abstract),
or from the EPIC or TIC for K2 and TESS respectively. The temperature values in these
catalogues are estimated using photometry, and so have large associated uncertainties
(roughly 200 K, see KIC). For more better results, spectroscopic measurements of
temperature are often more precise.
NOTE: These scaling relations are scaled to the Sun, and therefore do not
always produce an entirely accurate result for more evolved stars.
Parameters
----------
numax : float
The frequency of maximum power of the seismic mode envelope. If not
given an astropy unit, assumed to be in units of microhertz.
deltanu : float
The frequency spacing between two consecutive overtones of equal radial
degree. If not given an astropy unit, assumed to be in units of
microhertz.
teff : float
The effective temperature of the star. In units of Kelvin.
numax_err : float
Error on numax. Assumed to be same units as numax
deltanu_err : float
Error on deltanu. Assumed to be same units as deltanu
teff_err : float
Error on Teff. Assumed to be same units as Teff.
Returns
-------
radius : SeismologyQuantity
An estimate of the stellar radius in solar radii.
"""
numax = u.Quantity(numax, u.microhertz).value
deltanu = u.Quantity(deltanu, u.microhertz).value
teff = u.Quantity(teff, u.Kelvin).value
if all(b is not None for b in [numax_err, deltanu_err, teff_err]):
numax_err = u.Quantity(numax_err, u.microhertz).value
deltanu_err = u.Quantity(deltanu_err, u.microhertz).value
teff_err = u.Quantity(teff_err, u.Kelvin).value
unumax = ufloat(numax, numax_err)
udeltanu = ufloat(deltanu, deltanu_err)
uteff = ufloat(teff, teff_err)
else:
unumax = ufloat(numax, 0)
udeltanu = ufloat(deltanu, 0)
uteff = ufloat(teff, 0)
uR = (
(unumax / NUMAX_SOL)
* (udeltanu / DELTANU_SOL) ** (-2.0)
* (uteff / TEFF_SOL) ** (0.5)
)
result = SeismologyQuantity(
uR.n * u.solRad,
error=uR.s * u.solRad,
name="radius",
method="Uncorrected Scaling Relations",
)
return result
def estimate_mass(
numax, deltanu, teff, numax_err=None, deltanu_err=None, teff_err=None
):
"""Calculates mass using the asteroseismic scaling relations.
The two global observable seismic parameters, numax and deltanu, along with
temperature, scale with fundamental stellar properties (Brown et al. 1991;
Kjeldsen & Bedding 1995). These scaling relations can be rearranged to
calculate a stellar mass as
M = Msol * (numax/numax_sol)^3(deltanu/deltanusol)^-4(Teff/Teffsol)^1.5
where M is the mass and Teff is the effective temperature, and the suffix
'sol' indicates a solar value. In this method we use the solar values for
numax and deltanu as given in Huber et al. (2011) and for Teff as given in
Prsa et al. (2016).
This code structure borrows from work done in Bellinger et al. (2019), which
also functions as an accessible explanation of seismic scaling relations.
If no value of effective temperature is given, this function will check the
meta data of the `Periodogram` object used to create the `Seismology` object.
These data will often contain an effective tempearture from the Kepler Input
Catalogue (KIC, https://ui.adsabs.harvard.edu/abs/2011AJ....142..112B/abstract),
or from the EPIC or TIC for K2 and TESS respectively. The temperature values in these
catalogues are estimated using photometry, and so have large associated uncertainties
(roughly 200 K, see KIC). For more better results, spectroscopic measurements of
temperature are often more precise.
NOTE: These scaling relations are scaled to the Sun, and therefore do not
always produce an entirely accurate result for more evolved stars.
Parameters
----------
numax : float
The frequency of maximum power of the seismic mode envelope. If not
given an astropy unit, assumed to be in units of microhertz.
deltanu : float
The frequency spacing between two consecutive overtones of equal radial
degree. If not given an astropy unit, assumed to be in units of
microhertz.
teff : float
The effective temperature of the star. In units of Kelvin.
numax_err : float
Error on numax. Assumed to be same units as numax
deltanu_err : float
Error on deltanu. Assumed to be same units as deltanu
teff_err : float
Error on Teff. Assumed to be same units as Teff.
Returns
-------
mass : SeismologyQuantity
An estimate of the stellar mass in solar masses.
"""
numax = u.Quantity(numax, u.microhertz).value
deltanu = u.Quantity(deltanu, u.microhertz).value
teff = u.Quantity(teff, u.Kelvin).value
if all(b is not None for b in [numax_err, deltanu_err, teff_err]):
numax_err = u.Quantity(numax_err, u.microhertz).value
deltanu_err = u.Quantity(deltanu_err, u.microhertz).value
teff_err = u.Quantity(teff_err, u.Kelvin).value
unumax = ufloat(numax, numax_err)
udeltanu = ufloat(deltanu, deltanu_err)
uteff = ufloat(teff, teff_err)
else:
unumax = ufloat(numax, 0)
udeltanu = ufloat(deltanu, 0)
uteff = ufloat(teff, 0)
uM = (
(unumax / NUMAX_SOL) ** 3.0
* (udeltanu / DELTANU_SOL) ** (-4.0)
* (uteff / TEFF_SOL) ** (1.5)
)
result = SeismologyQuantity(
uM.n * u.solMass,
error=uM.s * u.solMass,
name="mass",
method="Uncorrected Scaling Relations",
)
return result
def estimate_logg(numax, teff, numax_err=None, teff_err=None):
"""Calculates the log of the surface gravity using the asteroseismic scaling
relations.
The two global observable seismic parameters, numax and deltanu, along with
temperature, scale with fundamental stellar properties (Brown et al. 1991;
Kjeldsen & Bedding 1995). These scaling relations can be rearranged to
calculate a stellar surface gravity as
g = gsol * (numax/numax_sol)(Teff/Teffsol)^0.5
where g is the surface gravity and Teff is the effective temperature,
and the suffix 'sol' indicates a solar value. In this method we use the
solar values for numax as given in Huber et al. (2011) and for Teff as given
in Prsa et al. (2016). The solar surface gravity is calcluated from the
astropy constants for solar mass and radius and does not have an error.
The solar surface gravity is returned as log10(g) with units in dex, as is
common in the astrophysics literature.
This code structure borrows from work done in Bellinger et al. (2019), which
also functions as an accessible explanation of seismic scaling relations.
If no value of effective temperature is given, this function will check the
meta data of the `Periodogram` object used to create the `Seismology` object.
These data will often contain an effective tempearture from the Kepler Input
Catalogue (KIC, https://ui.adsabs.harvard.edu/abs/2011AJ....142..112B/abstract),
or from the EPIC or TIC for K2 and TESS respectively. The temperature values in these
catalogues are estimated using photometry, and so have large associated uncertainties
(roughly 200 K, see KIC). For more better results, spectroscopic measurements of
temperature are often more precise.
NOTE: These scaling relations are scaled to the Sun, and therefore do not
always produce an entirely accurate result for more evolved stars.
Parameters
----------
numax : float
The frequency of maximum power of the seismic mode envelope. If not
given an astropy unit, assumed to be in units of microhertz.
teff : float
The effective temperature of the star. In units of Kelvin.
numax_err : float
Error on numax. Assumed to be same units as numax
teff_err : float
Error on teff. Assumed to be same units as teff.
Returns
-------
logg : `.SeismologyQuantity`
The log10 of the surface gravity of the star.
"""
numax = u.Quantity(numax, u.microhertz).value
teff = u.Quantity(teff, u.Kelvin).value
if all(b is not None for b in [numax_err, teff_err]):
numax_err = u.Quantity(numax_err, u.microhertz).value
teff_err = u.Quantity(teff_err, u.Kelvin).value
unumax = ufloat(numax, numax_err)
uteff = ufloat(teff, teff_err)
else:
unumax = ufloat(numax, 0)
uteff = ufloat(teff, 0)
ug = G_SOL.value * (unumax / NUMAX_SOL) * (uteff / TEFF_SOL) ** 0.5
ulogg = umath.log(ug, 10)
result = SeismologyQuantity(
ulogg.n * u.dex,
error=ulogg.s * u.dex,
name="logg",
method="Uncorrected Scaling Relations",
)
return result
| 10,895
| 39.962406
| 89
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/seismology/deltanu_estimators.py
|
"""Helper functions for estimating deltanu from periodograms."""
from __future__ import division, print_function
import numpy as np
from scipy.signal import find_peaks
from matplotlib import pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from astropy import units as u
from .utils import SeismologyQuantity
from . import utils
from .. import MPLSTYLE
__all__ = ["estimate_deltanu_acf2d", "diagnose_deltanu_acf2d"]
def estimate_deltanu_acf2d(periodogram, numax):
"""Returns the average value of the large frequency spacing, DeltaNu,
of the seismic oscillations of the target, using an autocorrelation
function.
There are many papers on the topic of autocorrelation functions for
estimating seismic parameters, including but not limited to:
Roxburgh & Vorontsov (2006), Roxburgh (2009), Mosser & Appourchaux (2009),
Huber et al. (2009), Verner & Roxburgh (2011) & Viani et al. (2019).
We base this approach first and foremost off the approach taken in
Mosser & Appourchaux (2009). Given a known numax, a window around this
numax is taken of one estimated full-width-half-maximum (FWHM) of the
seismic mode envelope either side of numax. This width is chosen so that
the autocorrelation includes all of the visible mode peaks.
The autocorrelation (numpy.correlate) is given as::
C = sum(s * s)
where s is a window of the signal-to-noise spectrum. When shifting
the spectrum over itself, C will increase when two mode peaks are
overlapping. Because of the method of this calculation, we need to first
rescale the power by subtracting its mean, placing its mean around 0. This
decreases the noise levels in the ACF, as the autocorrelation of the noise
with itself will be close to zero.
As is done in Mosser & Appourchaux, we rescale the value of C in terms
of the noise level in the ACF spectrum as::
A = (|C^2| / |C[0]^2|) * (2 * len(C) / 3) .
The method will autocorrelate the region around the estimated numax
expected to contain seismic oscillation modes. Repeating peaks in the
autocorrelation implies an evenly spaced structure of modes.
The peak closest to an empirical estimate of deltanu is taken as the true
value. The peak finding algorithm is limited by a minimum spacing
between peaks of 0.5 times the empirical value for deltanu.
Our empirical estimate for numax is taken from Stello et al. (2009) as::
deltanu = 0.294 * numax^0.772
If `numax` is None, a numax is calculated using the estimate_numax()
function with default settings.
NOTE: This function is intended for use with solar like Main Sequence
and Red Giant Branch oscillators only.
Parameters
----------
numax : float
An estimated numax value of the mode envelope in the periodogram. If
not given units it is assumed to be in units of the periodogram
frequency attribute.
Returns
-------
deltanu : `.SeismologyQuantity`
The average large frequency spacing of the seismic oscillation modes.
In units of the periodogram frequency attribute.
"""
# The frequency grid must be evenly spaced
if not periodogram._is_evenly_spaced():
raise ValueError(
"the ACF 2D method requires that the periodogram "
"has a grid of uniformly spaced frequencies."
)
# Run some checks on the passed in numaxs
# Ensure input numax is in the correct units
numax = u.Quantity(numax, periodogram.frequency.unit)
fs = np.median(np.diff(periodogram.frequency.value))
if numax.value < fs:
raise ValueError(
"The input numax can not be lower than" " a single frequency bin."
)
if numax.value > np.nanmax(periodogram.frequency.value):
raise ValueError(
"The input numax can not be higher than"
"the highest frequency value in the periodogram."
)
# Calculate deltanu using the method by Stello et al. 2009
# Make sure that this relation only ever happens in microhertz space
deltanu_emp = u.Quantity(
(0.294 * u.Quantity(numax, u.microhertz).value ** 0.772) * u.microhertz,
periodogram.frequency.unit,
).value
window_width = 2 * int(np.floor(utils.get_fwhm(periodogram, numax.value)))
aacf = utils.autocorrelate(
periodogram, numax=numax.value, window_width=window_width
)
acf = (np.abs(aacf ** 2) / np.abs(aacf[0] ** 2)) / (3 / (2 * len(aacf)))
fs = np.median(np.diff(periodogram.frequency.value))
lags = np.linspace(0.0, len(acf) * fs, len(acf))
# Select a 25% region region around the empirical deltanu
sel = (lags > deltanu_emp - 0.25 * deltanu_emp) & (
lags < deltanu_emp + 0.25 * deltanu_emp
)
# Run a peak finder on this region
peaks, _ = find_peaks(acf[sel], distance=np.floor(deltanu_emp / 2.0 / fs))
# Select the peak closest to the empirical value
best_deltanu_value = lags[sel][peaks][
np.argmin(np.abs(lags[sel][peaks] - deltanu_emp))
]
best_deltanu = u.Quantity(best_deltanu_value, periodogram.frequency.unit)
diagnostics = {
"lags": lags,
"acf": acf,
"peaks": peaks,
"sel": sel,
"numax": numax,
"deltanu_emp": deltanu_emp,
}
result = SeismologyQuantity(
best_deltanu,
name="deltanu",
method="ACF2D",
diagnostics=diagnostics,
diagnostics_plot_method=diagnose_deltanu_acf2d,
)
return result
def diagnose_deltanu_acf2d(deltanu, periodogram):
"""Returns a diagnostic plot which elucidates how deltanu was estimated.
[1] Scaled correlation metric vs frequecy lag of the autocorrelation
window, with inset close up on the determined deltanu and a line
indicating the determined deltanu.
For details on the deltanu estimation, see the `estimate_deltanu()`
function. The calculation performed is identical.
NOTE: When plotting, we exclude the first two frequency lag bins, to
make the relevant features on the plot clearer, as these bins are close to
the spectrum correlated with itself and therefore much higher than the rest
of the bins.
Parameters
----------
deltanu : `.SeismologyResult` object
The object returned by `estimate_deltanu_acf2d()`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
with plt.style.context(MPLSTYLE):
fig, axs = plt.subplots(2, figsize=(8.485, 8))
ax = axs[0]
periodogram.plot(ax=ax, label="")
ax.axvline(
deltanu.diagnostics["numax"].value, c="r", linewidth=1, alpha=0.4, ls=":"
)
ax.text(
deltanu.diagnostics["numax"].value,
periodogram.power.value.max() * 0.45,
"{} ({:.1f} {})".format(
r"$\nu_{\rm max}$",
deltanu.diagnostics["numax"].value,
deltanu.diagnostics["numax"].unit.to_string("latex"),
),
rotation=90,
ha="right",
color="r",
alpha=0.5,
fontsize=8,
)
ax.text(
0.025,
0.9,
"Input Power Spectrum",
horizontalalignment="left",
transform=ax.transAxes,
fontsize=11,
)
window_width = 2 * int(
np.floor(utils.get_fwhm(periodogram, deltanu.diagnostics["numax"].value))
)
frequency_spacing = np.median(np.diff(periodogram.frequency.value))
spread = int(window_width / 2 / frequency_spacing) # spread in indices
a = (
np.argmin(
np.abs(periodogram.frequency.value - deltanu.diagnostics["numax"].value)
)
+ spread
)
b = (
np.argmin(
np.abs(periodogram.frequency.value - deltanu.diagnostics["numax"].value)
)
- spread
)
a = [
periodogram.frequency.value[a]
if a < len(periodogram.frequency)
else periodogram.frequency.value[-1]
][0]
b = [
periodogram.frequency.value[b] if b > 0 else periodogram.frequency.value[0]
][0]
ax.axvline(a, c="r", linewidth=2, alpha=0.4, ls="--")
ax.axvline(b, c="r", linewidth=2, alpha=0.4, ls="--")
h = periodogram.power.value.max() * 0.9
ax.annotate(
"",
xy=(a, h),
xytext=(a - (a - b), h),
arrowprops=dict(arrowstyle="<->", color="r", alpha=0.5),
va="bottom",
)
ax.text(
a - (a - b) / 2,
h,
r"2 $\times$ FWHM",
color="r",
alpha=0.7,
fontsize=10,
va="bottom",
ha="center",
)
ax.set_xlim(b - ((a - b) * 0.2), a + ((a - b) * 0.2))
ax = axs[1]
ax.plot(deltanu.diagnostics["lags"][2:], deltanu.diagnostics["acf"][2:])
ax.set_xlabel(
"Frequency Lag [{}]".format(periodogram.frequency.unit.to_string("latex"))
)
ax.set_ylabel(r"Scaled Auto Correlation", fontsize=11)
axin = inset_axes(ax, width="50%", height="50%", loc="upper right")
axin.set_yticks([])
axin.plot(
deltanu.diagnostics["lags"][deltanu.diagnostics["sel"]],
deltanu.diagnostics["acf"][deltanu.diagnostics["sel"]],
)
axin.scatter(
deltanu.diagnostics["lags"][deltanu.diagnostics["sel"]][
deltanu.diagnostics["peaks"]
],
deltanu.diagnostics["acf"][deltanu.diagnostics["sel"]][
deltanu.diagnostics["peaks"]
],
c="r",
s=5,
)
mea_label = r"Measured {} {:.1f} {}".format(
r"$\Delta\nu$", deltanu.value, periodogram.frequency.unit.to_string("latex")
)
ax.axvline(deltanu.value, c="r", linewidth=2, alpha=0.4, label=mea_label)
emp_label = r"Empirical {} {:.1f} {}".format(
r"$\Delta\nu$",
deltanu.diagnostics["deltanu_emp"],
periodogram.frequency.unit.to_string("latex"),
)
ax.axvline(
deltanu.diagnostics["deltanu_emp"],
c="b",
linewidth=2,
alpha=0.4,
ls="--",
label=emp_label,
)
axin.axvline(deltanu.value, c="r", linewidth=2, alpha=0.4)
axin.axvline(
deltanu.diagnostics["deltanu_emp"], c="b", linewidth=2, alpha=0.4, ls="--"
)
ax.text(
0.025,
0.9,
"Scaled Auto Correlation Within 2 FWHM",
horizontalalignment="left",
transform=ax.transAxes,
fontsize=11,
)
ax.legend(loc="lower right", fontsize=10)
return ax
| 10,981
| 34.086262
| 88
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/seismology/core.py
|
"""Defines the Seismology class."""
import logging
import warnings
import numpy as np
from matplotlib import pyplot as plt
from scipy.signal import find_peaks
from astropy import units as u
from astropy.units import cds
from .. import MPLSTYLE
from . import utils, stellar_estimators
from ..periodogram import SNRPeriodogram
from ..utils import LightkurveWarning, validate_method
from .utils import SeismologyQuantity
# Import the optional Bokeh dependency required by ``interact_echelle```,
# or print a friendly error otherwise.
try:
import bokeh # Import bokeh first so we get an ImportError we can catch
from bokeh.io import show, output_notebook
from bokeh.plotting import figure
from bokeh.models import LogColorMapper, Slider, RangeSlider, Button
from bokeh.layouts import layout, Spacer
except:
# Nice error will be raised when ``interact_echelle``` is called.
pass
log = logging.getLogger(__name__)
__all__ = ["Seismology"]
class Seismology(object):
"""Enables astroseismic quantities to be estimated from periodograms.
This class provides easy access to methods to estimate numax, deltanu, radius,
mass, and logg, and stores them on its tray for easy diagnostic plotting.
Examples
--------
Download the TESS light curve for HIP 116158:
>>> import lightkurve as lk
>>> lc = lk.search_lightcurve("HIP 116158", sector=2).download() # doctest: +SKIP
>>> lc = lc.normalize().remove_nans().remove_outliers() # doctest: +SKIP
Create a Lomb-Scargle periodogram:
>>> pg = lc.to_periodogram(normalization='psd', minimum_frequency=100, maximum_frequency=800) # doctest: +SKIP
Create a Seismology object and use it to estimate parameters:
>>> seismology = pg.flatten().to_seismology() # doctest: +SKIP
>>> seismology.estimate_numax() # doctest: +SKIP
numax: 415.00 uHz (method: ACF2D)
>>> seismology.estimate_deltanu() # doctest: +SKIP
deltanu: 28.78 uHz (method: ACF2D)
>>> seismology.estimate_radius(teff=5080) # doctest: +SKIP
radius: 2.78 solRad (method: Uncorrected Scaling Relations)
Parameters
----------
periodogram : `~lightkurve.periodogram.Periodogram` object
Periodogram to be analyzed. Must be background-corrected,
e.g. using `periodogram.flatten()`.
"""
periodogram = None
"""The periodogram from which seismological parameters are being extracted."""
def __init__(self, periodogram):
if not isinstance(periodogram, SNRPeriodogram):
warnings.warn(
"Seismology received a periodogram which does not appear "
"to have been background-corrected. Please consider calling "
"`periodogram.flatten()` prior to extracting seismological parameters.",
LightkurveWarning,
)
self.periodogram = periodogram
def __repr__(self):
attrs = np.asarray(["numax", "deltanu", "mass", "radius", "logg"])
tray = np.asarray([hasattr(self, attr) for attr in attrs])
if tray.sum() == 0:
tray_str = " - no values have been computed so far."
else:
tray_str = " - computed values:\n * " + "\n * ".join(
[getattr(self, attr).__repr__() for attr in attrs[tray]]
)
return "Seismology(ID: {}){}".format(self.periodogram.label, tray_str)
@staticmethod
def from_lightcurve(lc, **kwargs):
"""Returns a `Seismology` object given a `LightCurve`."""
log.info(
"Building a Seismology object directly from a light curve "
"uses default periodogram parameters. For further tuneability, "
"create a periodogram object first, using `to_periodogram`."
)
return Seismology(
periodogram=lc.normalize()
.remove_nans()
.fill_gaps()
.to_periodogram(**kwargs)
.flatten()
)
def _validate_numax(self, numax):
"""Raises exception if `numax` is None and `self.numax` is not set."""
if numax is None:
try:
return self.numax
except AttributeError:
raise AttributeError(
"You need to call `Seismology.estimate_numax()` first."
)
return numax
def _validate_deltanu(self, deltanu):
"""Raises exception if `deltanu` is None and `self.deltanu` is not set."""
if deltanu is None:
try:
return self.deltanu
except AttributeError:
raise AttributeError(
"You need to call `Seismology.estimate_deltanu()` first."
)
return deltanu
def _clean_echelle(
self,
deltanu=None,
numax=None,
minimum_frequency=None,
maximum_frequency=None,
smooth_filter_width=0.1,
scale="linear",
):
"""Takes input seismology object and creates the necessary arrays for an echelle
diagram. Validates all the inputs.
Parameters
----------
deltanu : float
Value for the large frequency separation of the seismic mode
frequencies in the periodogram. Assumed to have the same units as
the frequencies, unless given an Astropy unit.
Is assumed to be in the same units as frequency if not given a unit.
numax : float
Value for the frequency of maximum oscillation. If a numax is
passed, a suitable range one FWHM of the mode envelope either side
of the will be shown. This is overwritten by custom frequency ranges.
Is assumed to be in the same units as frequency if not given a unit.
minimum_frequency : float
The minimum frequency at which to display the echelle
Is assumed to be in the same units as frequency if not given a unit.
maximum_frequency : float
The maximum frequency at which to display the echelle.
Is assumed to be in the same units as frequency if not given a unit.
smooth_filter_width : float
If given a value, will smooth periodogram used to plot the echelle
diagram using the periodogram.smooth(method='boxkernel') method with
a filter width of `smooth_filter_width`. This helps visualise the
echelle diagram. Is assumed to be in the same units as the
periodogram frequency.
ax : `~matplotlib.axes.Axes`
A matplotlib axes object to plot into. If no axes is provided,
a new one will be created.
scale: str
Set z axis to be "linear" or "log". Default is linear.
Returns
-------
ep : np.ndarray
Echelle diagram power
x_f : np.ndarray
frequencies for X axis
y_f : np.ndarray
frequencies for Y axis
"""
if (minimum_frequency is None) & (maximum_frequency is None):
numax = self._validate_numax(numax)
deltanu = self._validate_deltanu(deltanu)
if (not hasattr(numax, "unit")) & (numax is not None):
numax = numax * self.periodogram.frequency.unit
if (not hasattr(deltanu, "unit")) & (deltanu is not None):
deltanu = deltanu * self.periodogram.frequency.unit
if smooth_filter_width:
pgsmooth = self.periodogram.smooth(filter_width=smooth_filter_width)
freq = pgsmooth.frequency # Makes code below more readable below
power = pgsmooth.power # Makes code below more readable below
else:
freq = self.periodogram.frequency # Makes code below more readable
power = self.periodogram.power # Makes code below more readable
fmin = freq[0]
fmax = freq[-1]
# Check for any superfluous input
if (numax is not None) & (
any([a is not None for a in [minimum_frequency, maximum_frequency]])
):
warnings.warn(
"You have passed both a numax and a frequency limit. "
"The frequency limit will override the numax input.",
LightkurveWarning,
)
# Ensure input numax is in the correct units (if there is one)
if numax is not None:
numax = u.Quantity(numax, freq.unit).value
if numax > freq[-1].value:
raise ValueError(
"You can't pass in a numax outside the"
"frequency range of the periodogram."
)
fwhm = utils.get_fwhm(self.periodogram, numax)
fmin = numax - 2 * fwhm
if fmin < freq[0].value:
fmin = freq[0].value
fmax = numax + 2 * fwhm
if fmax > freq[-1].value:
fmax = freq[-1].value
# Set limits and set them in the right units
if minimum_frequency is not None:
fmin = u.Quantity(minimum_frequency, freq.unit).value
if fmin > freq[-1].value:
raise ValueError(
"You can't pass in a limit outside the "
"frequency range of the periodogram."
)
if maximum_frequency is not None:
fmax = u.Quantity(maximum_frequency, freq.unit).value
if fmax > freq[-1].value:
raise ValueError(
"You can't pass in a limit outside the "
"frequency range of the periodogram."
)
# Make sure fmin and fmax are Quantities or code below will break
fmin = u.Quantity(fmin, freq.unit)
fmax = u.Quantity(fmax, freq.unit)
# Add on 1x deltanu so we don't miss off any important range due to rounding
if fmax < freq[-1] - 1.5 * deltanu:
fmax += deltanu
fs = np.median(np.diff(freq))
x0 = int(freq[0] / fs)
ff = freq[int(fmin / fs) - x0 : int(fmax / fs) - x0] # Selected frequency range
pp = power[int(fmin / fs) - x0 : int(fmax / fs) - x0] # Power range
# Reshape the power into n_rows of n_columns
# When modulus ~ zero, deltanu divides into frequency without remainder
mod_zeros = find_peaks(-1.0 * (ff % deltanu))[0]
# The bottom left corner of the plot is the lowest frequency that
# divides into deltanu with almost zero remainder
start = mod_zeros[0]
# The top left corner of the plot is the highest frequency that
# divides into deltanu with almost zero remainder. This index is the
# approximate end, because we fix an integer number of rows and columns
approx_end = mod_zeros[-1]
# The number of rows is the number of times you can partition your
# frequency range into chunks of size deltanu, start and ending at
# frequencies that divide nearly evenly into deltanu
n_rows = len(mod_zeros) - 1
# The number of columns is the total number of frequency points divided
# by the number of rows, floor divided to the nearest integer value
n_columns = int((approx_end - start) / n_rows)
# The exact end point is therefore the ncolumns*nrows away from the start
end = start + n_columns * n_rows
ep = np.reshape(pp[start:end], (n_rows, n_columns))
if scale == "log":
ep = np.log10(ep)
# Reshape the freq into n_rowss of n_columnss & create arays
ef = np.reshape(ff[start:end], (n_rows, n_columns))
x_f = (ef[0, :] - ef[0, 0]) % deltanu
y_f = ef[:, 0]
return ep, x_f, y_f
def plot_echelle(
self,
deltanu=None,
numax=None,
minimum_frequency=None,
maximum_frequency=None,
smooth_filter_width=0.1,
scale="linear",
ax=None,
cmap="Blues",
):
"""Plots an echelle diagram of the periodogram by stacking the
periodogram in slices of deltanu.
Modes of equal radial degree should appear approximately vertically aligned.
If no structure is present, you are likely dealing with a faulty deltanu
value or a low signal to noise case.
This method is adapted from work by Daniel Hey & Guy Davies.
Parameters
----------
deltanu : float
Value for the large frequency separation of the seismic mode
frequencies in the periodogram. Assumed to have the same units as
the frequencies, unless given an Astropy unit.
Is assumed to be in the same units as frequency if not given a unit.
numax : float
Value for the frequency of maximum oscillation. If a numax is
passed, a suitable range one FWHM of the mode envelope either side
of the will be shown. This is overwritten by custom frequency ranges.
Is assumed to be in the same units as frequency if not given a unit.
minimum_frequency : float
The minimum frequency at which to display the echelle
Is assumed to be in the same units as frequency if not given a unit.
maximum_frequency : float
The maximum frequency at which to display the echelle.
Is assumed to be in the same units as frequency if not given a unit.
smooth_filter_width : float
If given a value, will smooth periodogram used to plot the echelle
diagram using the periodogram.smooth(method='boxkernel') method with
a filter width of `smooth_filter_width`. This helps visualise the
echelle diagram. Is assumed to be in the same units as the
periodogram frequency.
scale: str
Set z axis to be "linear" or "log". Default is linear.
cmap : str
The name of the matplotlib colourmap to use in the echelle diagram.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
if (minimum_frequency is None) & (maximum_frequency is None):
numax = self._validate_numax(numax)
deltanu = self._validate_deltanu(deltanu)
if (not hasattr(numax, "unit")) & (numax is not None):
numax = numax * self.periodogram.frequency.unit
if (not hasattr(deltanu, "unit")) & (deltanu is not None):
deltanu = deltanu * self.periodogram.frequency.unit
ep, x_f, y_f = self._clean_echelle(
numax=numax,
deltanu=deltanu,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
smooth_filter_width=smooth_filter_width,
)
# Plot the echelle diagram
with plt.style.context(MPLSTYLE):
if ax is None:
_, ax = plt.subplots()
extent = (x_f[0].value, x_f[-1].value, y_f[0].value, y_f[-1].value)
figsize = plt.rcParams["figure.figsize"]
a = figsize[1] / figsize[0]
b = (extent[3] - extent[2]) / (extent[1] - extent[0])
vmin = np.nanpercentile(ep.value, 1)
vmax = np.nanpercentile(ep.value, 99)
im = ax.imshow(
ep.value,
cmap=cmap,
aspect=a / b,
origin="lower",
extent=extent,
vmin=vmin,
vmax=vmax,
)
cbar = plt.colorbar(im, ax=ax, extend="both", pad=0.01)
if isinstance(self.periodogram, SNRPeriodogram):
ylabel = "Signal to Noise Ratio (SNR)"
elif self.periodogram.power.unit == cds.ppm:
ylabel = "Amplitude [{}]".format(
self.periodogram.power.unit.to_string("latex")
)
else:
ylabel = "Power Spectral Density [{}]".format(
self.periodogram.power.unit.to_string("latex")
)
if scale == "log":
ylabel = "log10(" + ylabel + ")"
cbar.set_label(ylabel)
ax.set_xlabel(r"Frequency mod. {:.2f}".format(deltanu))
ax.set_ylabel(
r"Frequency [{}]".format(
self.periodogram.frequency.unit.to_string("latex")
)
)
ax.set_title("Echelle diagram for {}".format(self.periodogram.label))
return ax
def _make_echelle_elements(
self,
deltanu,
cmap="viridis",
minimum_frequency=None,
maximum_frequency=None,
smooth_filter_width=0.1,
scale="linear",
width=490,
height=340,
title="Echelle",
):
"""Helper function to make the elements of the echelle diagram for bokeh plotting."""
if not hasattr(deltanu, "unit"):
deltanu = deltanu * self.periodogram.frequency.unit
if smooth_filter_width:
pgsmooth = self.periodogram.smooth(filter_width=smooth_filter_width)
freq = pgsmooth.frequency # Makes code below more readable below
else:
freq = self.periodogram.frequency # Makes code below more readable
ep, x_f, y_f = self._clean_echelle(
deltanu=deltanu,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
smooth_filter_width=smooth_filter_width,
scale=scale,
)
fig = figure(
width=width,
height=height,
x_range=(0, 1),
y_range=(y_f[0].value, y_f[-1].value),
title=title,
tools="pan,box_zoom,reset",
toolbar_location="above",
border_fill_color="white",
)
fig.yaxis.axis_label = r"Frequency [{}]".format(freq.unit.to_string())
fig.xaxis.axis_label = r"Frequency / {:.3f} Mod. 1".format(deltanu)
lo, hi = np.nanpercentile(ep.value, [0.1, 99.9])
vlo, vhi = 0.3 * lo, 1.7 * hi
vstep = (lo - hi) / 500
color_mapper = LogColorMapper(palette="RdYlGn10", low=lo, high=hi)
fig.image(
image=[ep.value],
x=0,
y=y_f[0].value,
dw=1,
dh=y_f[-1].value,
color_mapper=color_mapper,
name="img",
)
stretch_slider = RangeSlider(
start=vlo,
end=vhi,
step=vstep,
title="",
value=(lo, hi),
orientation="vertical",
width=10,
height=230,
direction="rtl",
show_value=False,
sizing_mode="fixed",
name="stretch",
)
def stretch_change_callback(attr, old, new):
"""TPF stretch slider callback."""
fig.select("img")[0].glyph.color_mapper.high = new[1]
fig.select("img")[0].glyph.color_mapper.low = new[0]
stretch_slider.on_change("value", stretch_change_callback)
return fig, stretch_slider
def interact_echelle(self, notebook_url="localhost:8888", **kwargs):
"""Display an interactive Jupyter notebook widget showing an Echelle diagram.
This feature only works inside an active Jupyter Notebook, and
requires an optional dependency, ``bokeh`` (v1.0 or later).
This dependency can be installed using e.g. `conda install bokeh`.
Parameters
----------
notebook_url : str
Location of the Jupyter notebook page (default: "localhost:8888")
When showing Bokeh applications, the Bokeh server must be
explicitly configured to allow connections originating from
different URLs. This parameter defaults to the standard notebook
host and port. If you are running on a different location, you
will need to supply this value for the application to display
properly. If no protocol is supplied in the URL, e.g. if it is
of the form "localhost:8888", then "http" will be used.
"""
try:
import bokeh
if bokeh.__version__[0] == "0":
warnings.warn(
"interact() requires Bokeh version 1.0 or later", LightkurveWarning
)
except ImportError:
log.error(
"The interact() tool requires the `bokeh` Python package; "
"you can install bokeh using e.g. `conda install bokeh`."
)
return None
maximum_frequency = kwargs.pop(
"maximum_frequency", self.periodogram.frequency.max().value
)
minimum_frequency = kwargs.pop(
"minimum_frequency", self.periodogram.frequency.min().value
)
if not hasattr(self, "deltanu"):
dnu = SeismologyQuantity(
quantity=self.periodogram.frequency.max() / 30,
name="deltanu",
method="echelle",
)
else:
dnu = self.deltanu
def create_interact_ui(doc):
fig_tpf, stretch_slider = self._make_echelle_elements(
dnu,
maximum_frequency=maximum_frequency,
minimum_frequency=minimum_frequency,
**kwargs
)
maxdnu = self.periodogram.frequency.max().value / 5
# Interactive slider widgets
dnu_slider = Slider(
start=0.01,
end=maxdnu,
value=dnu.value,
step=0.01,
title="Delta Nu",
width=290,
)
r_button = Button(label=">", button_type="default", width=30)
l_button = Button(label="<", button_type="default", width=30)
rr_button = Button(label=">>", button_type="default", width=30)
ll_button = Button(label="<<", button_type="default", width=30)
def update(attr, old, new):
"""Callback to take action when dnu slider changes"""
dnu = SeismologyQuantity(
quantity=dnu_slider.value * u.microhertz,
name="deltanu",
method="echelle",
)
ep, _, _ = self._clean_echelle(
deltanu=dnu,
minimum_frequency=minimum_frequency,
maximum_frequency=maximum_frequency,
**kwargs
)
fig_tpf.select("img")[0].data_source.data["image"] = [ep.value]
fig_tpf.xaxis.axis_label = r"Frequency / {:.3f} Mod. 1".format(dnu)
def go_right_by_one_small():
"""Step forward in time by a single cadence"""
existing_value = dnu_slider.value
if existing_value < maxdnu:
dnu_slider.value = existing_value + 0.002
def go_left_by_one_small():
"""Step back in time by a single cadence"""
existing_value = dnu_slider.value
if existing_value > 0:
dnu_slider.value = existing_value - 0.002
def go_right_by_one():
"""Step forward in time by a single cadence"""
existing_value = dnu_slider.value
if existing_value < maxdnu:
dnu_slider.value = existing_value + 0.01
def go_left_by_one():
"""Step back in time by a single cadence"""
existing_value = dnu_slider.value
if existing_value > 0:
dnu_slider.value = existing_value - 0.01
dnu_slider.on_change("value", update)
r_button.on_click(go_right_by_one_small)
l_button.on_click(go_left_by_one_small)
rr_button.on_click(go_right_by_one)
ll_button.on_click(go_left_by_one)
widgets_and_figures = layout(
[fig_tpf, [Spacer(height=20), stretch_slider]],
[
ll_button,
Spacer(width=30),
l_button,
Spacer(width=25),
dnu_slider,
Spacer(width=30),
r_button,
Spacer(width=23),
rr_button,
],
)
doc.add_root(widgets_and_figures)
output_notebook(verbose=False, hide_banner=True)
return show(create_interact_ui, notebook_url=notebook_url)
def estimate_numax(self, method="acf2d", **kwargs):
"""Returns the frequency of the peak of the seismic oscillation modes envelope.
At present, the only method supported is based on using a
2D autocorrelation function (ACF2D). This method is implemented by the
`~lightkurve.seismology.estimate_numax_acf2d` function which accepts
the parameters `numaxs`, `window_width`, and `spacing`.
For details and literature references, please read the detailed
docstring of this function by typing ``lightkurve.seismology.estimate_numax_acf2d?``
in a Python terminal or notebook.
Parameters
----------
method : str
Method to use. Only ``"acf2d"`` is supported at this time.
Returns
-------
numax : `~lightkurve.seismology.SeismologyQuantity`
Numax of the periodogram, including details on the units and method.
"""
method = validate_method(method, supported_methods=["acf2d"])
if method == "acf2d":
from .numax_estimators import estimate_numax_acf2d
result = estimate_numax_acf2d(self.periodogram, **kwargs)
self.numax = result
return result
def diagnose_numax(self, numax=None):
"""Create diagnostic plots showing how numax was estimated."""
numax = self._validate_numax(numax)
return numax.diagnostics_plot_method(numax, self.periodogram)
def estimate_deltanu(self, method="acf2d", numax=None):
"""Returns the average value of the large frequency spacing, DeltaNu,
of the seismic oscillations of the target.
At present, the only method supported is based on using an
autocorrelation function (ACF2D). This method is implemented by the
`~lightkurve.seismology.estimate_deltanu_acf2d` function which requires
the parameter `numax`. For details and literature references, please
read the detailed docstring of this function by typing
``lightkurve.seismology.estimate_deltanu_acf2d?`` in a Python terminal or notebook.
Parameters
----------
method : str
Method to use. Only ``"acf2d"`` is supported at this time.
Returns
-------
deltanu : `~lightkurve.seismology.SeismologyQuantity`
DeltaNu of the periodogram, including details on the units and method.
"""
method = validate_method(method, supported_methods=["acf2d"])
numax = self._validate_numax(numax)
if method == "acf2d":
from .deltanu_estimators import estimate_deltanu_acf2d
result = estimate_deltanu_acf2d(self.periodogram, numax=numax)
self.deltanu = result
return result
def diagnose_deltanu(self, deltanu=None):
"""Create diagnostic plots showing how numax was estimated."""
deltanu = self._validate_deltanu(deltanu)
return deltanu.diagnostics_plot_method(deltanu, self.periodogram)
def estimate_radius(self, teff=None, numax=None, deltanu=None):
"""Returns a stellar radius estimate based on the scaling relations.
The two global observable seismic parameters, numax and deltanu, along with
temperature, scale with fundamental stellar properties (Brown et al. 1991;
Kjeldsen & Bedding 1995). These scaling relations can be rearranged to
calculate a stellar radius as
R = Rsol * (numax/numax_sol)(deltanu/deltanusol)^-2(Teff/Teffsol)^0.5
where R is the radius and Teff is the effective temperature, and the suffix
'sol' indicates a solar value. In this method we use the solar values for
numax and deltanu as given in Huber et al. (2011) and for Teff as given in
Prsa et al. (2016).
This code structure borrows from work done in Bellinger et al. (2019), which
also functions as an accessible explanation of seismic scaling relations.
If no value of effective temperature is given, this function will check the
meta data of the `Periodogram` object used to create the `Seismology` object.
These data will often contain an effective tempearture from the Kepler Input
Catalogue (KIC, https://ui.adsabs.harvard.edu/abs/2011AJ....142..112B/abstract),
or from the EPIC or TIC for K2 and TESS respectively. The temperature values in these
catalogues are estimated using photometry, and so have large associated uncertainties
(roughly 200 K, see KIC). For more better results, spectroscopic measurements of
temperature are often more precise.
NOTE: These scaling relations are scaled to the Sun, and therefore do not
always produce an entirely accurate result for more evolved stars.
Parameters
----------
numax : float
The frequency of maximum power of the seismic mode envelope. If not
given an astropy unit, assumed to be in units of microhertz.
deltanu : float
The frequency spacing between two consecutive overtones of equal radial
degree. If not given an astropy unit, assumed to be in units of
microhertz.
teff : float
The effective temperature of the star. In units of Kelvin.
numax_err : float
Error on numax. Assumed to be same units as numax
deltanu_err : float
Error on deltanu. Assumed to be same units as deltanu
teff_err : float
Error on Teff. Assumed to be same units as Teff.
Returns
-------
radius : `~lightkurve.seismology.SeismologyQuantity`
Stellar radius estimate.
"""
numax = self._validate_numax(numax)
deltanu = self._validate_deltanu(deltanu)
if teff is None:
teff = self.periodogram.meta.get("TEFF")
if teff is None:
raise ValueError(
"You must provide an effective temperature argument (`teff`) to `estimate_radius`,"
"because the Periodogram object does not contain it in its meta data (i.e. `pg.meta['TEFF']` is missing"
)
else:
log.info(
"Using value for effective temperature from the Kepler Input Catalogue."
"These temperatue values may sometimes differ significantly from modern estimates."
)
pass
else:
pass
result = stellar_estimators.estimate_radius(numax, deltanu, teff)
self.radius = result
return result
def estimate_mass(self, teff=None, numax=None, deltanu=None):
"""Calculates mass using the asteroseismic scaling relations.
The two global observable seismic parameters, numax and deltanu, along with
temperature, scale with fundamental stellar properties (Brown et al. 1991;
Kjeldsen & Bedding 1995). These scaling relations can be rearranged to
calculate a stellar mass as
M = Msol * (numax/numax_sol)^3(deltanu/deltanusol)^-4(Teff/Teffsol)^1.5
where M is the mass and Teff is the effective temperature, and the suffix
'sol' indicates a solar value. In this method we use the solar values for
numax and deltanu as given in Huber et al. (2011) and for Teff as given in
Prsa et al. (2016).
This code structure borrows from work done in Bellinger et al. (2019), which
also functions as an accessible explanation of seismic scaling relations.
If no value of effective temperature is given, this function will check the
meta data of the `Periodogram` object used to create the `Seismology` object.
These data will often contain an effective tempearture from the Kepler Input
Catalogue (KIC, https://ui.adsabs.harvard.edu/abs/2011AJ....142..112B/abstract),
or from the EPIC or TIC for K2 and TESS respectively. The temperature values in these
catalogues are estimated using photometry, and so have large associated uncertainties
(roughly 200 K, see KIC). For more better results, spectroscopic measurements of
temperature are often more precise.
NOTE: These scaling relations are scaled to the Sun, and therefore do not
always produce an entirely accurate result for more evolved stars.
Parameters
----------
numax : float
The frequency of maximum power of the seismic mode envelope. If not
given an astropy unit, assumed to be in units of microhertz.
deltanu : float
The frequency spacing between two consecutive overtones of equal radial
degree. If not given an astropy unit, assumed to be in units of
microhertz.
teff : float
The effective temperature of the star. In units of Kelvin.
numax_err : float
Error on numax. Assumed to be same units as numax
deltanu_err : float
Error on deltanu. Assumed to be same units as deltanu
teff_err : float
Error on Teff. Assumed to be same units as Teff.
Returns
-------
mass : `~lightkurve.seismology.SeismologyQuantity`
Stellar mass estimate.
"""
numax = self._validate_numax(numax)
deltanu = self._validate_deltanu(deltanu)
if teff is None:
teff = self.periodogram.meta.get("TEFF")
if teff is None:
raise ValueError(
"You must provide an effective temperature argument (`teff`) to `estimate_radius`,"
"because the Periodogram object does not contain it in its meta data (i.e. `pg.meta['TEFF']` is missing"
)
else:
log.info(
"Using value for effective temperature from the Kepler Input Catalogue."
"These temperatue values may sometimes differ significantly from modern estimates."
)
pass
else:
pass
result = stellar_estimators.estimate_mass(numax, deltanu, teff)
self.mass = result
return result
def estimate_logg(self, teff=None, numax=None):
"""Calculates the log of the surface gravity using the asteroseismic scaling
relations.
The two global observable seismic parameters, numax and deltanu, along with
temperature, scale with fundamental stellar properties (Brown et al. 1991;
Kjeldsen & Bedding 1995). These scaling relations can be rearranged to
calculate a stellar surface gravity as
g = gsol * (numax/numax_sol)(Teff/Teffsol)^0.5
where g is the surface gravity and Teff is the effective temperature,
and the suffix 'sol' indicates a solar value. In this method we use the
solar values for numax as given in Huber et al. (2011) and for Teff as given
in Prsa et al. (2016). The solar surface gravity is calcluated from the
astropy constants for solar mass and radius and does not have an error.
The solar surface gravity is returned as log10(g) with units in dex, as is
common in the astrophysics literature.
This code structure borrows from work done in Bellinger et al. (2019), which
also functions as an accessible explanation of seismic scaling relations.
If no value of effective temperature is given, this function will check the
meta data of the `Periodogram` object used to create the `Seismology` object.
These data will often contain an effective tempearture from the Kepler Input
Catalogue (KIC, https://ui.adsabs.harvard.edu/abs/2011AJ....142..112B/abstract),
or from the EPIC or TIC for K2 and TESS respectively. The temperature values in these
catalogues are estimated using photometry, and so have large associated uncertainties
(roughly 200 K, see KIC). For more better results, spectroscopic measurements of
temperature are often more precise.
NOTE: These scaling relations are scaled to the Sun, and therefore do not
always produce an entirely accurate result for more evolved stars.
Parameters
----------
numax : float
The frequency of maximum power of the seismic mode envelope. If not
given an astropy unit, assumed to be in units of microhertz.
teff : float
The effective temperature of the star. In units of Kelvin.
numax_err : float
Error on numax. Assumed to be same units as numax
teff_err : float
Error on teff. Assumed to be same units as teff.
Returns
-------
logg : `~lightkurve.seismology.SeismologyQuantity`
Stellar surface gravity estimate.
"""
numax = self._validate_numax(numax)
if teff is None:
teff = self.periodogram.meta.get("TEFF")
if teff is None:
raise ValueError(
"You must provide an effective temperature argument (`teff`) to `estimate_radius`,"
"because the Periodogram object does not contain it in its meta data (i.e. `pg.meta['TEFF']` is missing"
)
else:
log.info(
"Using value for effective temperature from the Kepler Input Catalogue."
"These temperatue values may sometimes differ significantly from modern estimates."
)
pass
else:
pass
result = stellar_estimators.estimate_logg(numax, teff)
self.logg = result
return result
| 38,459
| 40.533477
| 124
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/seismology/utils.py
|
"""Generic classes and functions which aid the asteroseismology features."""
import numpy as np
import copy
from astropy import units as u
from astropy.units import Quantity
__all__ = ["SeismologyQuantity"]
class SeismologyQuantity(Quantity):
"""Holds an asteroseismic value including its unit, error, and estimation method.
Compared to a traditional AstroPy `~astropy.units.Quantity` object, this
class has the following extra attributes:
* name (e.g. 'deltanu' or 'radius');
* error (i.e. the uncertainty);
* method (e.g. specifying the asteroseismic scaling relation);
* diagnostics;
* diagnostics_plot_method.
"""
def __new__(
cls,
quantity,
name=None,
error=None,
method=None,
diagnostics=None,
diagnostics_plot_method=None,
):
# Note: Quantity is peculiar to sub-class because it inherits from numpy ndarray;
# see https://docs.astropy.org/en/stable/units/quantity.html#subclassing-quantity.
self = Quantity.__new__(cls, quantity.value)
self.__dict__ = quantity.__dict__
self.name = name
self.error = error
self.method = method
self.diagnostics = diagnostics
self.diagnostics_plot_method = diagnostics_plot_method
return self
def __repr__(self):
try:
return "{}: {} {} (method: {})".format(
self.name, "{:.2f}".format(self.value), self.unit.__str__(), self.method
)
except AttributeError: # Math operations appear to remove Seismic attributes for now
return super().__repr__()
def _repr_latex_(self):
try:
return "{}: {} {} (method: {})".format(
self.name,
"${:.2f}$".format(self.value),
self.unit._repr_latex_(),
self.method,
)
except AttributeError: # Math operations appear to remove Seismic attributes for now
return super()._repr_latex_()
def get_fwhm(periodogram, numax):
"""In a power spectrum of a solar-like oscillator, the power of the
modes of oscillation will appear in the shape of that looks
approximately Gaussian, for all basic purposes, also referred to as the
'mode envelope'. For a given numax (the central frequency of the mode
envelope), the expected Full Width Half Maximum of the envelope is known
as a function of numax for evolved Red Giant Branch stars as follows
(see Mosser et al 2010):
fwhm = 0.66 * numax^0.88 .
If the maximum frequency in the periodogram is less than 500 microhertz,
this function will default to the above equation under the assumption it
is dealing with an RGB star, which oscillate at lower frequencies.
If the maximum frequency is above 500 microhertz, the envelope is given
as a different function of numax (see Lund et al. 2017), as
fwhm = 0.25 * numax,
in which case the function assumes it is dealing with a main sequence
star, which oscillate at higher frequencies.
Parameters
----------
numax : float
The estimated position of the numax of the power spectrum. This
is used to calculated the region autocorrelated with itself.
Returns
-------
fwhm: float
The estimate full-width-half-maximum of the seismic mode envelope
"""
# Calculate the index FWHM for a given numax
if u.Quantity(periodogram.frequency[-1], u.microhertz) > u.Quantity(
500.0, u.microhertz
):
fwhm = 0.25 * numax
else:
fwhm = 0.66 * numax ** 0.88
return fwhm
def autocorrelate(periodogram, numax, window_width=25.0, frequency_spacing=None):
"""An autocorrelation function (ACF) for seismic mode envelopes.
We autocorrelate a region with a width of `window_width` (in microhertz)
around a central frequency `numax` (in microhertz). The window size is
determined based on the location of the nyquist frequency when
estimating numax, and based on the expected width of the mode envelope
of the asteroseismic oscillations when calculating deltanu. The section of
power being autocorrelated is first resclaed by subtracting its mean, so
that its noise is centered around zero. If this is not done, noise will
appear in the ACF as a function of 1/lag.
Parameters:
----------
numax : float
The estimated position of the numax of the power spectrum. This
is used to calculated the region autocorrelated with itself.
window_width : int or float
The width of the autocorrelation window around the central
frequency numax.
frequency_spacing : float
The frequency spacing of the periodogram. If none is passed, it
is calculated internally. This should never be set by the user.
Returns:
--------
acf : array-like
The autocorrelation power calculated for the given numax
"""
if frequency_spacing is None:
frequency_spacing = np.median(np.diff(periodogram.frequency.value))
spread = int(window_width / 2 / frequency_spacing) # Find the spread in indices
x = int(numax / frequency_spacing) # Find the index value of numax
x0 = int(
(periodogram.frequency[0].value / frequency_spacing)
) # Transform in case the index isn't from 0
xt = x - x0
p_sel = copy.deepcopy(
periodogram.power[xt - spread : xt + spread].value
) # Make the window selection
p_sel -= np.nanmean(p_sel) # Make it so that the selection has zero mean.
C = np.correlate(p_sel, p_sel, mode="full")[
len(p_sel) - 1 :
] # Correlated the resulting SNR space with itself
return C
| 5,796
| 36.4
| 93
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/seismology/numax_estimators.py
|
"""Helper functions for estimating numax from periodograms."""
import numpy as np
from matplotlib import pyplot as plt
from astropy.convolution import convolve, Gaussian1DKernel
from astropy import units as u
from .. import MPLSTYLE
from . import utils
from .utils import SeismologyQuantity
__all__ = ["estimate_numax_acf2d", "diagnose_numax_acf2d"]
def estimate_numax_acf2d(periodogram, numaxs=None, window_width=None, spacing=None):
"""Estimates the peak of the envelope of seismic oscillation modes, numax,
using an autocorrelation function.
There are many papers on the topic of autocorrelation functions for
estimating seismic parameters, including but not limited to:
Roxburgh & Vorontsov (2006), Roxburgh (2009), Mosser & Appourchaux (2009),
Huber et al. (2009), Verner & Roxburgh (2011) & Viani et al. (2019).
We base this approach first and foremost off the 2D ACF numax estimation
presented in Viani et al. (2019) and other papers above. A window of
fixed width (either given by the user, 25 microhertz for Red Giants or
250 microhertz for Main Sequence stars) is moved along the power
spectrum, where the central frequency of the window moves in steps of 1
microhertz (or given by the user as `spacing`) and evaluates the
autocorrelation at each step.
The correlation (numpy.correlate) is typically given as:
C[x, y] = sum( x * conj(y) ) .
The autocorrelation power of a full spectrum with itself is then
C = sum(s * s),
where s is a window of the signal-to-noise spectrum.
Because of the method of this calculation, we need to first
rescale the power by subtracting its mean, placing its mean around 0. This
decreases the noise levels in the ACF, as the autocorrelation of the noise
with itself will be close to zero.
In order to evaluate where the correlation power is highest (indicative
of the power excess of the modes) we calculate the Mean Collapsed
Correlation (MCC, see Kiefer 2013, Viani et al. 2019) as
MCC = (sum(|C|) - 1) / nlags ,
where C is the autocorrelation power at a given central freqeuncy, and
nlags is the number of lags in the autocorrelation.
The MCC metric is covolved with an Astropy Gaussian 1D Kernel with a
standard deviation of 1/5th of the window size to smooth it. The
frequency that results in the highest value of the smoothed MCC is the
detected numax.
NOTE: This method is not robust against large peaks in the spectrum (due
to e.g. spacecraft rotation), nor is it robust in the case of low signal
to noise (such as for single sector TESS data). Exercise caution when
using this module!
NOTE: This function is intended for use with solar like Main Sequence
and Red Giant Branch oscillators only.
Parameters
----------
numaxs : array-like
An array of numaxs at which to evaluate the autocorrelation. If
none is given, a sensible range will be chosen. If no units are
given it is assumed to be in the same units as the periodogram
frequency.
window_width : int or float
The width of the autocorrelation window around each central
frequency in 'numaxs'. If none is given, a sensible value will be
chosen. If no units are given it is assumed to be in the same units
as the periodogram frequency.
spacing : int or float
The spacing between central frequencies (numaxs) at which the
autocorrelation is evaluated. If none is given, a sensible value
will be assumed. If no units are given it is assumed to be in the
same units as the periodogram frequency.
Returns
-------
numax : `.SeismologyQuantity`
The numax of the periodogram. In the units of the periodogram object
frequency.
"""
# Detect whether the frequency grid is evenly-spaced
if not periodogram._is_evenly_spaced():
raise ValueError(
"the ACF 2D method requires that the periodogram "
"has a grid of uniformly spaced frequencies."
)
# Calculate the window_width size
# C: What is this doing? Why have these values been picked? This function is slow.
if window_width is None:
if u.Quantity(periodogram.frequency[-1], u.microhertz) > u.Quantity(
500.0, u.microhertz
):
window_width = (
u.Quantity(250.0, u.microhertz).to(periodogram.frequency.unit).value
)
else:
window_width = (
u.Quantity(25.0, u.microhertz).to(periodogram.frequency.unit).value
)
# Calculate the spacing size
if spacing is None:
if u.Quantity(periodogram.frequency[-1], u.microhertz) > u.Quantity(
500.0, u.microhertz
):
spacing = (
u.Quantity(10.0, u.microhertz).to(periodogram.frequency.unit).value
)
else:
spacing = u.Quantity(1.0, u.microhertz).to(periodogram.frequency.unit).value
# Run some checks on the inputs
window_width = u.Quantity(window_width, periodogram.frequency.unit).value
spacing = u.Quantity(spacing, periodogram.frequency.unit).value
if numaxs is None:
numaxs = np.arange(
np.ceil(np.nanmin(periodogram.frequency.value)) + window_width / 2,
np.floor(np.nanmax(periodogram.frequency.value)) - window_width / 2,
spacing,
)
numaxs = u.Quantity(numaxs, periodogram.frequency.unit).value
if not hasattr(numaxs, "__iter__"):
numaxs = np.asarray([numaxs])
fs = np.median(np.diff(periodogram.frequency.value))
# Perform checks on spacing and window_width
for var, label in zip(
[np.asarray(window_width), np.asarray(spacing)], ["window_width", "spacing"]
):
if (var < fs).any():
raise ValueError(
"You can't have {} smaller than the "
"frequency separation!".format(label)
)
if (
var > (periodogram.frequency[-1].value - periodogram.frequency[0].value)
).any():
raise ValueError(
"You can't have {} wider than the entire "
"power spectrum!".format(label)
)
if (var < 0).any():
raise ValueError("Please pass an entirely positive {}.".format(label))
# Perform checks on numaxs
if any(numaxs < fs):
raise ValueError(
"A custom range of numaxs can not extend below " "a single frequency bin."
)
if any(numaxs > np.nanmax(periodogram.frequency.value)):
raise ValueError(
"A custom range of numaxs can not extend above "
"the highest frequency value in the periodogram."
)
# We want to find the numax which returns in the highest autocorrelation
# power, rescaled based on filter width
fs = np.median(np.diff(periodogram.frequency.value))
metric = np.zeros(len(numaxs))
acf2d = np.zeros([int(window_width / 2 / fs) * 2, len(numaxs)])
for idx, numax in enumerate(numaxs):
acf = utils.autocorrelate(
periodogram, numax, window_width=window_width, frequency_spacing=fs
) # Return the acf at this numax
acf2d[:, idx] = acf # Store the 2D acf
metric[idx] = (np.sum(np.abs(acf)) - 1) / len(
acf
) # Store the max acf power normalised by the length
# Smooth the data to find the peak
# Gaussian1D kernel takes a standard deviation in unitless indices. A stddev
# of sqrt(len(numaxs) will result in a smoothing kernel that works for all
# resolutions of numax.
if len(numaxs) > 10:
g = Gaussian1DKernel(stddev=np.sqrt(len(numaxs)))
metric_smooth = convolve(metric, g, boundary="extend")
else:
metric_smooth = metric
# The highest value of the metric corresponds to numax
best_numax = numaxs[np.argmax(metric_smooth)]
best_numax = u.Quantity(best_numax, periodogram.frequency.unit)
# Create and return the object containing the result and diagnostics
diagnostics = {
"numaxs": numaxs,
"acf2d": acf2d,
"window_width": window_width,
"metric": metric,
"metric_smooth": metric_smooth,
}
result = SeismologyQuantity(
best_numax,
name="numax",
method="ACF2D",
diagnostics=diagnostics,
diagnostics_plot_method=diagnose_numax_acf2d,
)
return result
def diagnose_numax_acf2d(numax, periodogram):
"""Returns a diagnostic plot which elucidates how numax was estimated.
[1] The SNRPeriodogram plotted with a red line indicating the estimated
numax value.
[2] An image showing the 2D autocorrelation. On the y-axis is the
frequency lag of the autocorrelation window. The width of the window is
equal to `window_width`, and the spacing between lags is equal to
`numax_spacing`. On the x-axis is the central frequency at which the
autocorrelation was calculated. In the z-axis is the unitless
autocorrelation power. Shown in red is the estimated numax.
[3] The Mean Collapsed Correlation (MCC, see Viani et al. 2019) against
central frequency at which the MCC was calculated. Shown in red is the
estimated numax. Shown in blue is the MCC convolved with a Gaussian
smoothing kernel with a standard deviation of 1/5th the window size.
For details on the numax estimation, see the `estimate_numax()` function.
The calculation performed is identical
Parameters
----------
numax : `.SeismologyResult` object
The object returned by `estimate_numax_acf2d()`.
Returns
-------
ax : `~matplotlib.axes.Axes`
The matplotlib axes object.
"""
with plt.style.context(MPLSTYLE):
fig, ax = plt.subplots(3, sharex=True, figsize=(8.485, 12))
periodogram.plot(ax=ax[0], label="")
ax[0].axvline(
numax.value,
c="r",
linewidth=2,
alpha=0.4,
label="{} = {:7.5} {}".format(
r"$\nu_{\rm max}$",
numax.value,
periodogram.frequency.unit.to_string("latex"),
),
)
ax[0].legend(loc="upper right")
ax[0].set_xlabel("")
ax[0].text(
0.05,
0.9,
"Input Power Spectrum",
horizontalalignment="left",
transform=ax[0].transAxes,
fontsize=15,
)
vmin = np.nanpercentile(numax.diagnostics["acf2d"], 5)
vmax = np.nanpercentile(numax.diagnostics["acf2d"], 95)
ax[1].pcolormesh(
numax.diagnostics["numaxs"],
np.linspace(
0,
numax.diagnostics["window_width"],
num=numax.diagnostics["acf2d"].shape[0],
),
numax.diagnostics["acf2d"],
cmap="Blues",
vmin=vmin,
vmax=vmax,
)
ax[1].set_ylabel(
r"Frequency lag [{}]".format(periodogram.frequency.unit.to_string("latex"))
)
ax[1].axvline(numax.value, c="r", linewidth=2, alpha=0.4)
ax[1].text(
0.05,
0.9,
"2D AutoCorrelation",
horizontalalignment="left",
transform=ax[1].transAxes,
fontsize=13,
)
ax[2].plot(numax.diagnostics["numaxs"], numax.diagnostics["metric"])
ax[2].plot(
numax.diagnostics["numaxs"],
numax.diagnostics["metric_smooth"],
lw=2,
alpha=0.7,
label="Smoothed Metric",
)
ax[2].set_xlabel(
"Frequency [{}]".format(periodogram.frequency.unit.to_string("latex"))
)
ax[2].set_ylabel(r"Correlation Metric")
ax[2].axvline(numax.value, c="r", linewidth=2, alpha=0.4)
ax[2].text(
0.05,
0.9,
"Correlation Metric",
horizontalalignment="left",
transform=ax[2].transAxes,
fontsize=13,
)
ax[2].legend(loc="upper right")
ax[2].set_xlim(numax.diagnostics["numaxs"][0], numax.diagnostics["numaxs"][-1])
plt.subplots_adjust(hspace=0, wspace=0)
return ax
| 12,338
| 36.966154
| 88
|
py
|
lightkurve
|
lightkurve-main/src/lightkurve/seismology/__init__.py
|
"""The `lightkurve.seismology` sub-package provides classes and functions for
quick-look asteroseismic analyses."""
# Do not export the modules in this subpackage to the root namespace, important
# because `lightkurve.utils` collides with `lightkurve.seismology.utils`.
__all__ = [
"Seismology",
"SeismologyQuantity",
"estimate_numax_acf2d",
"diagnose_numax_acf2d",
"estimate_deltanu_acf2d",
"diagnose_deltanu_acf2d",
"estimate_radius",
"estimate_mass",
"estimate_logg",
]
from .core import *
from .utils import *
from .numax_estimators import *
from .deltanu_estimators import *
from .stellar_estimators import *
| 653
| 26.25
| 79
|
py
|
lightkurve
|
lightkurve-main/tests/test_interact_bls.py
|
"""Tests the features of the lightkurve.interact_bls module."""
import pytest
from astropy.timeseries import BoxLeastSquares
import astropy.units as u
import numpy as np
from lightkurve.lightcurve import KeplerLightCurve, TessLightCurve
from .test_lightcurve import KEPLER10, TESS_SIM
bad_optional_imports = False
try:
import bokeh
except:
bad_optional_imports = True
@pytest.mark.remote_data
@pytest.mark.skipif(bad_optional_imports, reason="requires bokeh and astropy.stats.bls")
def test_malformed_notebook_url():
"""Test if malformed notebook_urls raise proper exceptions."""
lc = KeplerLightCurve.read(KEPLER10)
lc = lc.normalize().remove_nans().flatten()
with pytest.raises(ValueError) as exc:
lc.interact_bls(notebook_url="")
assert "Empty host value" in exc.value.args[0]
with pytest.raises(AttributeError) as exc:
lc.interact_bls(notebook_url=None)
assert "object has no attribute" in exc.value.args[0]
@pytest.mark.remote_data
@pytest.mark.skipif(bad_optional_imports, reason="requires bokeh and astropy.stats.bls")
def test_graceful_exit_outside_notebook():
"""Test if running interact outside of a notebook does fails gracefully."""
lc = KeplerLightCurve.read(KEPLER10)
lc = lc.normalize().remove_nans().flatten()
result = lc.interact_bls()
assert result is None
@pytest.mark.remote_data
@pytest.mark.skipif(bad_optional_imports, reason="requires bokeh and astropy.stats.bls")
def test_helper_functions():
"""Can we use all the functions in interact_bls?"""
from lightkurve.interact_bls import (
prepare_bls_datasource,
prepare_folded_datasource,
prepare_lightcurve_datasource,
)
from lightkurve.interact_bls import (
make_bls_figure_elements,
make_folded_figure_elements,
make_lightcurve_figure_elements,
)
from lightkurve.interact_bls import (
prepare_bls_help_source,
prepare_f_help_source,
prepare_lc_help_source,
)
lc = KeplerLightCurve.read(KEPLER10)
lc = lc.normalize().remove_nans().flatten()
lc_source = prepare_lightcurve_datasource(lc)
f_source = prepare_folded_datasource(lc.fold(1))
model = BoxLeastSquares(lc.time, lc.flux)
result = model.power([1, 2, 3], 0.3)
bls_source = prepare_bls_datasource(result, 0)
lc_help = prepare_lc_help_source(lc)
f_help = prepare_f_help_source(lc.fold(1))
bls_help = prepare_bls_help_source(bls_source, 1)
make_lightcurve_figure_elements(lc, lc, lc_source, lc_source, lc_help)
make_folded_figure_elements(lc.fold(1), lc.fold(1), f_source, f_source, f_help)
make_bls_figure_elements(result, bls_source, bls_help)
@pytest.mark.remote_data
def test_preprocess_lc():
"""Test to ensure the lightcurve is pre-processed before applying BLS for correctness and consistent output"""
from lightkurve.interact_bls import _preprocess_lc_for_bls
lc = KeplerLightCurve.read(KEPLER10)
# As of AstroPy v5, flux is a `MaskedQuantity` in which NaNs are masked;
# so the next assert would not pass.
if not hasattr(lc.flux, "mask"):
# ensure the test data has nan in flux pre-Astropy v5
assert np.isnan(lc.flux).any()
clean = _preprocess_lc_for_bls(lc)
assert not np.isnan(clean.flux).any() # ensure processed lc has no nan
assert clean.meta.get("NORMALIZED", False)
assert clean.flux.unit == u.dimensionless_unscaled
# case the lc is normalized, but in other units
lc = lc.normalize(unit="percent")
clean = _preprocess_lc_for_bls(lc)
assert not np.isnan(clean.flux).any() # ensure processed lc has no nan
assert clean.meta.get("NORMALIZED", False)
assert clean.flux.unit == u.dimensionless_unscaled
@pytest.mark.remote_data
@pytest.mark.skipif(bad_optional_imports, reason="requires bokeh and astropy.stats.bls")
def test_full_widget():
"""Test if we can run the widget with the keywords"""
lc = KeplerLightCurve.read(KEPLER10)
lc = lc.normalize().remove_nans().flatten()
lc.interact_bls()
lc.interact_bls(minimum_period=4)
lc.interact_bls(maximum_period=5)
lc.interact_bls(resolution=1000)
@pytest.mark.remote_data
@pytest.mark.skipif(bad_optional_imports, reason="requires bokeh and astropy.stats.bls")
def test_tess_widget():
"""Test if we can run the widget with the keywords"""
lc = TessLightCurve.read(TESS_SIM)
lc = lc.normalize().remove_nans().flatten()
lc.interact_bls()
lc.interact_bls(minimum_period=4)
lc.interact_bls(maximum_period=5)
lc.interact_bls(resolution=1000)
| 4,603
| 35.539683
| 114
|
py
|
lightkurve
|
lightkurve-main/tests/test_targetpixelfile.py
|
import collections
import os
import tempfile
import warnings
import matplotlib.pyplot as plt
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from astropy.utils.data import get_pkg_data_filename
from astropy.io.fits.verify import VerifyWarning
from astropy.coordinates import SkyCoord
from astropy.io import fits
from astropy import wcs
from astropy.io.fits.card import UNDEFINED
import astropy.units as u
from astropy.utils.exceptions import AstropyWarning
from lightkurve.targetpixelfile import KeplerTargetPixelFile, TargetPixelFileFactory
from lightkurve.targetpixelfile import TessTargetPixelFile, TargetPixelFile
from lightkurve.lightcurve import TessLightCurve
from lightkurve.utils import LightkurveWarning, LightkurveDeprecationWarning
from lightkurve.io import read
from lightkurve.search import search_tesscut
from .test_synthetic_data import filename_synthetic_flat
filename_tpf_all_zeros = get_pkg_data_filename("data/test-tpf-all-zeros.fits")
filename_tpf_one_center = get_pkg_data_filename("data/test-tpf-non-zero-center.fits")
filename_tess = get_pkg_data_filename("data/tess25155310-s01-first-cadences.fits.gz")
# a local version of TABBY_TPF with ~ 2 days of data; should be sufficient for most tests
filename_tpf_tabby_lite = get_pkg_data_filename("data/test-tpf-kplr-tabby-100-cadences.fits")
TABBY_Q8 = (
"https://archive.stsci.edu/missions/kepler/lightcurves"
"/0084/008462852/kplr008462852-2011073133259_llc.fits"
)
TABBY_TPF = (
"https://archive.stsci.edu/missions/kepler/target_pixel_files"
"/0084/008462852/kplr008462852-2011073133259_lpd-targ.fits.gz"
)
TESS_SIM = (
"https://archive.stsci.edu/missions/tess/ete-6/tid/00/000"
"/004/176/tess2019128220341-0000000417699452-0016-s_tp.fits"
)
asteroid_TPF = get_pkg_data_filename("data/asteroid_test.fits")
@pytest.mark.remote_data
def test_load_bad_file():
"""Test if a light curve can be opened without exception."""
with pytest.raises(ValueError) as exc:
KeplerTargetPixelFile(TABBY_Q8)
assert "is this a target pixel file?" in exc.value.args[0]
with pytest.raises(ValueError) as exc:
TessTargetPixelFile(TABBY_Q8)
assert "is this a target pixel file?" in exc.value.args[0]
def test_tpf_shapes():
"""Are the data array shapes of the TargetPixelFile object consistent?"""
with warnings.catch_warnings():
# Ignore the "TELESCOP is not equal to TESS" warning
warnings.simplefilter("ignore", LightkurveWarning)
tpfs = [
KeplerTargetPixelFile(filename_tpf_all_zeros),
TessTargetPixelFile(filename_tpf_all_zeros),
]
for tpf in tpfs:
assert tpf.quality_mask.shape == tpf.hdu[1].data["TIME"].shape
assert tpf.flux.shape == tpf.flux_err.shape
def test_tpf_math():
"""Can you add, subtract, multiply and divide?"""
with warnings.catch_warnings():
# Ignore the "TELESCOP is not equal to TESS" warning
warnings.simplefilter("ignore", LightkurveWarning)
tpfs = [
KeplerTargetPixelFile(filename_tpf_all_zeros),
TessTargetPixelFile(filename_tpf_all_zeros),
]
# These should work
for tpf in tpfs:
for other in [1, np.ones(tpf.flux.shape[1:]), np.ones(tpf.shape)]:
tpf + other
tpf - other
tpf * other
tpf / other
tpf += other
tpf -= other
tpf *= other
tpf /= other
# These should fail with a value error because their shape is wrong.
for tpf in tpfs:
for other in [
np.asarray([1, 2]),
np.arange(len(tpf.time) - 1),
np.ones([100, 1]),
np.ones([1, 2, 3]),
]:
with pytest.raises(ValueError):
tpf + other
# Check the values are correct
assert np.all(
((tpf.flux.value + 2) == (tpf + 2).flux.value)[np.isfinite(tpf.flux)]
)
assert np.all(
((tpf.flux.value - 2) == (tpf - 2).flux.value)[np.isfinite(tpf.flux)]
)
assert np.all(
((tpf.flux.value * 2) == (tpf * 2).flux.value)[np.isfinite(tpf.flux)]
)
assert np.all(
((tpf.flux.value / 2) == (tpf / 2).flux.value)[np.isfinite(tpf.flux)]
)
assert np.all(
((tpf.flux_err.value * 2) == (tpf * 2).flux_err.value)[
np.isfinite(tpf.flux)
]
)
assert np.all(
((tpf.flux_err.value / 2) == (tpf / 2).flux_err.value)[
np.isfinite(tpf.flux)
]
)
def test_tpf_plot():
"""Sanity check to verify that tpf plotting works"""
with warnings.catch_warnings():
# Ignore the "TELESCOP is not equal to TESS" warning
warnings.simplefilter("ignore", LightkurveWarning)
tpfs = [
KeplerTargetPixelFile(filename_tpf_one_center),
TessTargetPixelFile(filename_tpf_one_center),
]
for tpf in tpfs:
tpf.plot()
tpf.plot(aperture_mask=tpf.pipeline_mask)
tpf.plot(aperture_mask="all")
tpf.plot(frame=3)
with pytest.raises(ValueError):
tpf.plot(frame=999999)
tpf.plot(cadenceno=125250)
with pytest.raises(ValueError):
tpf.plot(cadenceno=999)
tpf.plot(bkg=True)
tpf.plot(scale="sqrt")
tpf.plot(scale="log")
with pytest.raises(ValueError):
tpf.plot(scale="blabla")
tpf.plot(column="FLUX")
tpf.plot(column="FLUX_ERR")
tpf.plot(column="FLUX_BKG")
tpf.plot(column="FLUX_BKG_ERR")
tpf.plot(column="RAW_CNTS")
tpf.plot(column="COSMIC_RAYS")
with pytest.raises(ValueError):
tpf.plot(column="not a column")
plt.close("all")
def test_tpf_zeros():
"""Does the LightCurve of a zero-flux TPF make sense?"""
tpf = KeplerTargetPixelFile(filename_tpf_all_zeros, quality_bitmask=None)
with warnings.catch_warnings():
# Ignore "LightCurve contains NaN times" warnings triggered by the liberal mask
warnings.simplefilter("ignore", LightkurveWarning)
lc = tpf.to_lightcurve()
# If you don't mask out bad data, time contains NaNs
assert np.any(
lc.time.value != tpf.time
) # Using the property that NaN does not equal NaN
# When you do mask out bad data everything should work.
assert (tpf.time.value == 0).any()
tpf = KeplerTargetPixelFile(filename_tpf_all_zeros, quality_bitmask="hard")
lc = tpf.to_lightcurve(aperture_mask="all")
assert len(lc.time) == len(lc.flux)
assert np.all(lc.time == tpf.time)
assert np.all(np.isnan(lc.flux)) # we expect all NaNs because of #874
# The default QUALITY bitmask should have removed all NaNs in the TIME
assert ~np.any(np.isnan(tpf.time.value))
@pytest.mark.parametrize("centroid_method", [("moments"), ("quadratic")])
def test_tpf_ones(centroid_method):
"""Does the LightCurve of a one-flux TPF make sense? Regression test for #1103."""
with warnings.catch_warnings():
# Ignore the "TELESCOP is not equal to TESS" warning
warnings.simplefilter("ignore", LightkurveWarning)
tpfs = [
KeplerTargetPixelFile(filename_tpf_one_center),
TessTargetPixelFile(filename_tpf_one_center),
]
for tpf in tpfs:
lc = tpf.to_lightcurve(aperture_mask="all", centroid_method=centroid_method)
assert np.all(lc.flux.value == 1)
# The test TPF file contains 3x3 pixels with a single bright pixel in the center pixel.
# Because pixel coordinates refer to the center of a pixel (cf. #755),
# we expect the centroid to be exactly one larger than the corner coordinates.
# This is a regression test for #1103.
assert np.all(lc.centroid_row.value == tpf.row + 1)
assert np.all(lc.centroid_col.value == tpf.column + 1)
@pytest.mark.parametrize(
"quality_bitmask,answer",
[
(None, 1290),
("none", 1290),
("default", 1233),
("hard", 1101),
("hardest", 1101),
(1, 1290),
(100, 1278),
(2096639, 1101),
],
)
def test_bitmasking(quality_bitmask, answer):
"""Test whether the bitmasking behaves like it should"""
tpf = KeplerTargetPixelFile(
filename_tpf_one_center, quality_bitmask=quality_bitmask
)
with warnings.catch_warnings():
# Ignore "LightCurve contains NaN times" warnings triggered by liberal masks
warnings.simplefilter("ignore", LightkurveWarning)
lc = tpf.to_lightcurve()
assert len(lc.flux) == answer
def test_wcs():
"""Test the wcs property."""
for tpf in [
KeplerTargetPixelFile(filename_tpf_one_center),
TessTargetPixelFile(filename_tess),
]:
w = tpf.wcs
ra, dec = tpf.get_coordinates()
assert ra.shape == tpf.shape
assert dec.shape == tpf.shape
assert type(w).__name__ == "WCS"
@pytest.mark.parametrize("method", [("moments"), ("quadratic")])
def test_wcs_tabby(method):
"""Test the centroids from Tabby's star against simbad values"""
tpf = KeplerTargetPixelFile(filename_tpf_tabby_lite)
tpf.wcs
ra, dec = tpf.get_coordinates(0)
col, row = tpf.estimate_centroids(method=method)
col = col.value - tpf.column
row = row.value - tpf.row
y, x = int(np.round(col[0])), int(np.round(row[1]))
# Compare with RA and Dec from Simbad
assert np.isclose(ra[x, y], 301.5643971, 1e-4)
assert np.isclose(dec[x, y], 44.4568869, 1e-4)
def test_centroid_methods_consistency():
"""Are the centroid methods consistent for a well behaved target?"""
pixels = read(filename_synthetic_flat)
centr_moments = pixels.estimate_centroids(method="moments")
centr_quadratic = pixels.estimate_centroids(method="quadratic")
# check that the maximum relative difference doesnt exceed 1%
assert (
np.max(np.abs(centr_moments[0] - centr_quadratic[0]) / centr_moments[0]) < 1e-2
)
assert (
np.max(np.abs(centr_moments[1] - centr_quadratic[1]) / centr_moments[1]) < 1e-2
)
def test_properties():
"""Test the short-hand properties."""
tpf = KeplerTargetPixelFile(filename_tpf_all_zeros)
assert tpf.channel == tpf.hdu[0].header["CHANNEL"]
assert tpf.module == tpf.hdu[0].header["MODULE"]
assert tpf.output == tpf.hdu[0].header["OUTPUT"]
assert tpf.ra == tpf.hdu[0].header["RA_OBJ"]
assert tpf.dec == tpf.hdu[0].header["DEC_OBJ"]
assert_array_equal(tpf.flux.value, tpf.hdu[1].data["FLUX"][tpf.quality_mask])
assert_array_equal(
tpf.flux_err.value, tpf.hdu[1].data["FLUX_ERR"][tpf.quality_mask]
)
assert_array_equal(
tpf.flux_bkg.value, tpf.hdu[1].data["FLUX_BKG"][tpf.quality_mask]
)
assert_array_equal(
tpf.flux_bkg_err.value, tpf.hdu[1].data["FLUX_BKG_ERR"][tpf.quality_mask]
)
assert_array_equal(tpf.quality, tpf.hdu[1].data["QUALITY"][tpf.quality_mask])
assert tpf.campaign == tpf.hdu[0].header["CAMPAIGN"]
assert tpf.quarter is None
def test_repr():
"""Do __str__ and __repr__ work?"""
for tpf in [
KeplerTargetPixelFile(filename_tpf_all_zeros),
TessTargetPixelFile(filename_tess),
]:
str(tpf)
repr(tpf)
def test_to_lightcurve():
for tpf in [
KeplerTargetPixelFile(filename_tpf_all_zeros),
TessTargetPixelFile(filename_tess),
]:
tpf.to_lightcurve()
tpf.to_lightcurve(aperture_mask=None)
tpf.to_lightcurve(aperture_mask="all")
lc = tpf.to_lightcurve(aperture_mask="threshold")
assert lc.time.scale == "tdb"
assert lc.label == tpf.hdu[0].header["OBJECT"]
if np.any(tpf.pipeline_mask):
tpf.to_lightcurve(aperture_mask="pipeline")
else:
with pytest.raises(ValueError):
tpf.to_lightcurve(aperture_mask="pipeline")
def test_bkg_lightcurve():
for tpf in [
KeplerTargetPixelFile(filename_tpf_all_zeros),
TessTargetPixelFile(filename_tess),
]:
lc = tpf.get_bkg_lightcurve()
lc = tpf.get_bkg_lightcurve(aperture_mask=None)
lc = tpf.get_bkg_lightcurve(aperture_mask="all")
assert lc.time.scale == "tdb"
assert lc.flux.shape == lc.flux_err.shape
assert len(lc.time) == len(lc.flux)
def test_aperture_photometry():
for tpf in [
KeplerTargetPixelFile(filename_tpf_all_zeros),
TessTargetPixelFile(filename_tess),
]:
tpf.extract_aperture_photometry()
for mask in [None, "all", "default", "threshold", "background"]:
tpf.extract_aperture_photometry(aperture_mask=mask)
if np.any(tpf.pipeline_mask):
tpf.extract_aperture_photometry(aperture_mask="pipeline")
else:
with pytest.raises(ValueError):
tpf.extract_aperture_photometry(aperture_mask="pipeline")
def test_tpf_to_fits():
"""Can we write a TPF back to a fits file?"""
for tpf in [
KeplerTargetPixelFile(filename_tpf_all_zeros),
TessTargetPixelFile(filename_tess),
]:
# `delete=False` is necessary to enable writing to the file on Windows
# but it means we have to clean up the tmp file ourselves
tmp = tempfile.NamedTemporaryFile(delete=False)
try:
tpf.to_fits(tmp.name)
finally:
tmp.close()
os.remove(tmp.name)
def test_tpf_factory():
"""Can we create TPFs using TargetPixelFileFactory?"""
from lightkurve.targetpixelfile import FactoryError
factory = TargetPixelFileFactory(n_cadences=10, n_rows=6, n_cols=8)
flux_0 = np.ones((6, 8))
factory.add_cadence(frameno=0, flux=flux_0, header={"TSTART": 0, "TSTOP": 10})
flux_9 = 3 * np.ones((6, 8))
factory.add_cadence(frameno=9, flux=flux_9, header={"TSTART": 90, "TSTOP": 100})
# You shouldn't be able to build a TPF like this...because TPFs shouldn't
# have extensions where time stamps are duplicated (here frames 1-8 will have)
# time stamp zero
with pytest.warns(LightkurveWarning, match="identical TIME values"):
tpf = factory.get_tpf()
[
factory.add_cadence(
frameno=i, flux=flux_0, header={"TSTART": i * 10, "TSTOP": (i * 10) + 10}
)
for i in np.arange(2, 9)
]
# This should fail because the time stamps of the images are not in order...
with pytest.warns(LightkurveWarning, match="chronological order"):
tpf = factory.get_tpf()
[
factory.add_cadence(
frameno=i, flux=flux_0, header={"TSTART": i * 10, "TSTOP": (i * 10) + 10}
)
for i in np.arange(1, 9)
]
# This should pass
tpf = factory.get_tpf(hdu0_keywords={"TELESCOP": "TESS"})
assert_array_equal(tpf.flux[0].value, flux_0)
assert_array_equal(tpf.flux[9].value, flux_9)
tpf = factory.get_tpf(hdu0_keywords={"TELESCOP": "Kepler"})
assert_array_equal(tpf.flux[0].value, flux_0)
assert_array_equal(tpf.flux[9].value, flux_9)
assert tpf.time[0].value == 5
assert tpf.time[9].value == 95
# Can you add the WRONG sized frame?
flux_wrong = 3 * np.ones((6, 9))
with pytest.raises(FactoryError):
factory.add_cadence(
frameno=2, flux=flux_wrong, header={"TSTART": 90, "TSTOP": 100}
)
# Can you add the WRONG cadence?
flux_wrong = 3 * np.ones((6, 8))
with pytest.raises(FactoryError):
factory.add_cadence(
frameno=11, flux=flux_wrong, header={"TSTART": 90, "TSTOP": 100}
)
# Can we add our own keywords?
tpf = factory.get_tpf(
hdu0_keywords={"creator": "Christina TargetPixelFileWriter", "TELESCOP": "TESS"}
)
assert tpf.get_keyword("CREATOR") == "Christina TargetPixelFileWriter"
def _create_image_array(header=None, shape=(5, 5)):
"""Helper function for tests below."""
if header is None:
header = fits.Header()
images = []
for i in range(5):
header["TSTART"] = i
header["TSTOP"] = i + 1
images.append(fits.ImageHDU(data=np.ones(shape), header=header))
return images
def test_tpf_from_images():
"""Basic tests of tpf.from_fits_images()"""
# Not without a wcs...
with pytest.raises(Exception):
TargetPixelFile.from_fits_images(
_create_image_array(),
size=(3, 3),
position=SkyCoord(-234.75, 8.3393, unit="deg"),
)
# Make a fake WCS based on astropy.docs...
w = wcs.WCS(naxis=2)
w.wcs.crpix = [-234.75, 8.3393]
w.wcs.cdelt = np.array([-0.066667, 0.066667])
w.wcs.crval = [0, -90]
w.wcs.ctype = ["RA---AIR", "DEC--AIR"]
w.wcs.set_pv([(2, 1, 45.0)])
pixcrd = np.array([[0, 0], [24, 38], [45, 98]], np.float_)
header = w.to_header()
header["CRVAL1P"] = 10
header["CRVAL2P"] = 20
ra, dec = 268.21686048, -73.66991904
# Now this should work.
images = _create_image_array(header=header)
with warnings.catch_warnings():
# Ignore "LightkurveWarning: Could not detect filetype as TESSTargetPixelFile or KeplerTargetPixelFile, returning generic TargetPixelFile instead."
warnings.simplefilter("ignore", LightkurveWarning)
tpf = TargetPixelFile.from_fits_images(
images, size=(3, 3), position=SkyCoord(ra, dec, unit=(u.deg, u.deg))
)
assert isinstance(tpf, TargetPixelFile)
with warnings.catch_warnings():
# Some cards are too long -- to be investigated.
warnings.simplefilter("ignore", VerifyWarning)
# Can we write the output to disk?
# `delete=False` is necessary below to enable writing to the file on Windows
# but it means we have to clean up the tmp file ourselves
tmp = tempfile.NamedTemporaryFile(delete=False)
try:
tpf.to_fits(tmp.name)
finally:
tmp.close()
os.remove(tmp.name)
# Can we read in a list of file names or a list of HDUlists?
hdus = []
tmpfile_names = []
for im in images:
tmpfile = tempfile.NamedTemporaryFile(delete=False)
tmpfile_names.append(tmpfile.name)
hdu = fits.HDUList([fits.PrimaryHDU(), im])
hdu.writeto(tmpfile.name)
hdus.append(hdu)
with warnings.catch_warnings():
# Ignore "LightkurveWarning: Could not detect filetype as TESSTargetPixelFile or KeplerTargetPixelFile, returning generic TargetPixelFile instead."
warnings.simplefilter("ignore", LightkurveWarning)
# Should be able to run with a list of file names
tpf_tmpfiles = TargetPixelFile.from_fits_images(
tmpfile_names,
size=(3, 3),
position=SkyCoord(ra, dec, unit=(u.deg, u.deg)),
)
# Should be able to run with a list of HDUlists
tpf_hdus = TargetPixelFile.from_fits_images(
hdus, size=(3, 3), position=SkyCoord(ra, dec, unit=(u.deg, u.deg))
)
# Clean up the temporary files we created
for filename in tmpfile_names:
try:
os.remove(filename)
except PermissionError:
pass # This appears to happen on Windows
def test_tpf_wcs_from_images():
"""Test to see if tpf.from_fits_images() output a tpf with WCS in the header"""
# Not without a wcs...
with pytest.raises(Exception):
TargetPixelFile.from_fits_images(
_create_image_array(),
size=(3, 3),
position=SkyCoord(-234.75, 8.3393, unit="deg"),
)
# Make a fake WCS based on astropy.docs...
w = wcs.WCS(naxis=2)
w.wcs.crpix = [0.0, 0.0]
w.wcs.cdelt = np.array([0.001111, 0.001111])
w.wcs.crval = [23.2334, 45.2333]
w.wcs.ctype = ["RA---TAN", "DEC--TAN"]
header = w.to_header()
header["CRVAL1P"] = 10
header["CRVAL2P"] = 20
ra, dec = 23.2336, 45.235
with warnings.catch_warnings():
# Ignore "LightkurveWarning: Could not detect filetype as TESSTargetPixelFile or KeplerTargetPixelFile, returning generic TargetPixelFile instead."
warnings.simplefilter("ignore", LightkurveWarning)
# Now this should work.
tpf = TargetPixelFile.from_fits_images(
_create_image_array(header=header),
size=(3, 3),
position=SkyCoord(ra, dec, unit=(u.deg, u.deg)),
)
assert tpf.hdu[1].header["1CRPX5"] != UNDEFINED
assert tpf.hdu[1].header["1CTYP5"] == "RA---TAN"
assert tpf.hdu[1].header["2CTYP5"] == "DEC--TAN"
assert tpf.hdu[1].header["1CRPX5"] != UNDEFINED
assert tpf.hdu[1].header["2CRPX5"] != UNDEFINED
assert tpf.hdu[1].header["1CUNI5"] == "deg"
assert tpf.hdu[1].header["2CUNI5"] == "deg"
with warnings.catch_warnings():
# Ignore the warning: "PC1_1 = a floating-point value was expected."
warnings.simplefilter("ignore", AstropyWarning)
assert tpf.wcs.to_header()["CDELT1"] == w.wcs.cdelt[0]
def test_properties2(capfd):
"""Test if the describe function produces an output.
The output is 1870 characters at the moment, but we might add more properties."""
tpf = KeplerTargetPixelFile(filename_tpf_all_zeros)
tpf.show_properties()
out, err = capfd.readouterr()
assert len(out) > 1000
def test_interact():
"""Test the Jupyter notebook interact() widget."""
for tpf in [
KeplerTargetPixelFile(filename_tpf_one_center),
TessTargetPixelFile(filename_tess),
]:
tpf.interact()
@pytest.mark.remote_data
def test_interact_sky():
"""Test the Jupyter notebook interact() widget."""
for tpf in [
KeplerTargetPixelFile(filename_tpf_one_center),
TessTargetPixelFile(filename_tess),
]:
tpf.interact_sky()
@pytest.mark.remote_data # get_model / get_prf_model relies on calibration files on stsci.edu
def test_get_models():
"""Can we obtain PRF and TPF models?"""
tpf = KeplerTargetPixelFile(filename_tpf_all_zeros, quality_bitmask=None)
with warnings.catch_warnings():
# Ignore "RuntimeWarning: All-NaN slice encountered"
warnings.simplefilter("ignore", RuntimeWarning)
tpf.get_model()
tpf.get_prf_model()
@pytest.mark.remote_data
def test_tess_simulation():
"""Can we read simulated TESS data?"""
tpf = TessTargetPixelFile(TESS_SIM)
assert tpf.mission == "TESS"
assert tpf.time.scale == "tdb"
assert tpf.flux.shape == tpf.flux_err.shape
tpf.wcs
col, row = tpf.estimate_centroids()
# Regression test for https://github.com/lightkurve/lightkurve/pull/236
assert (tpf.time.value == 0).sum() == 0
def test_threshold_aperture_mask():
"""Does the threshold mask work?"""
tpf = KeplerTargetPixelFile(filename_tpf_one_center)
tpf.plot(aperture_mask="threshold")
lc = tpf.to_lightcurve(aperture_mask=tpf.create_threshold_mask(threshold=1))
assert (lc.flux.value == 1).all()
# The TESS file shows three pixel regions above a 2-sigma threshold;
# let's make sure the `reference_pixel` argument allows them to be selected.
tpf = TessTargetPixelFile(filename_tess)
assert tpf.create_threshold_mask(threshold=2.0).sum() == 25
assert (
tpf.create_threshold_mask(threshold=2.0, reference_pixel="center").sum() == 25
)
assert tpf.create_threshold_mask(threshold=2.0, reference_pixel=None).sum() == 28
assert tpf.create_threshold_mask(threshold=2.0, reference_pixel=(5, 0)).sum() == 2
# A mask which contains zero-flux pixels should work without crashing
tpf = KeplerTargetPixelFile(filename_tpf_all_zeros)
assert tpf.create_threshold_mask().sum() == 9
def test_tpf_tess():
"""Does a TESS Sector 1 TPF work?"""
tpf = TessTargetPixelFile(filename_tess, quality_bitmask=None)
assert tpf.mission == "TESS"
assert tpf.targetid == 25155310
assert tpf.sector == 1
assert tpf.camera == 4
assert tpf.ccd == 1
assert tpf.pipeline_mask.sum() == 9
assert tpf.background_mask.sum() == 30
lc = tpf.to_lightcurve()
assert isinstance(lc, TessLightCurve)
assert_array_equal(lc.time, tpf.time)
assert tpf.time.scale == "tdb"
assert tpf.flux.shape == tpf.flux_err.shape
tpf.wcs
col, row = tpf.estimate_centroids()
@pytest.mark.parametrize("tpf_type", [KeplerTargetPixelFile, TessTargetPixelFile])
def test_tpf_slicing(tpf_type):
"""Test indexing and slicing of TargetPixelFile objects."""
with warnings.catch_warnings():
# Ignore "LightkurveWarning: A Kepler data product is being opened using the `TessTargetPixelFile` class. Please use `KeplerTargetPixelFile` instead."
warnings.simplefilter("ignore", LightkurveWarning)
tpf = tpf_type(filename_tpf_one_center)
assert tpf[0].time == tpf.time[0]
assert tpf[-1].time == tpf.time[-1]
assert tpf[5:10].shape == tpf.flux[5:10].shape
assert tpf[0].targetid == tpf.targetid
assert_array_equal(tpf[tpf.time < tpf.time[5]].time, tpf.time[0:5])
frame = tpf[5]
assert frame.shape[0] == 1
assert frame.shape[1:] == tpf.shape[1:]
assert_array_equal(frame.time[0], tpf.time[5])
assert_array_equal(frame.flux[0], tpf.flux[5])
frames = tpf[100:200]
assert frames.shape[0] == 100
assert frames.shape[1:] == tpf.shape[1:]
assert_array_equal(frames.time, tpf.time[100:200])
assert_array_equal(frames.flux, tpf.flux[100:200])
def test_endianness():
"""Regression test for https://github.com/lightkurve/lightkurve/issues/188"""
tpf = KeplerTargetPixelFile(filename_tpf_one_center)
tpf.to_lightcurve().to_pandas().describe()
def test_get_keyword():
tpf = KeplerTargetPixelFile(filename_tpf_one_center)
assert tpf.get_keyword("TELESCOP") == "Kepler"
assert tpf.get_keyword("TTYPE1", hdu=1) == "TIME"
assert tpf.get_keyword("DOESNOTEXIST", default=5) == 5
def test_cutout():
"""Test tpf.cutout() function."""
for tpf in [
KeplerTargetPixelFile(filename_tpf_one_center),
TessTargetPixelFile(filename_tess, quality_bitmask=None),
]:
ntpf = tpf.cutout(size=2)
assert ntpf.flux[0].shape == (2, 2)
assert ntpf.flux_err[0].shape == (2, 2)
assert ntpf.flux_bkg[0].shape == (2, 2)
ntpf = tpf.cutout((0, 0), size=3)
ntpf = tpf.cutout(size=(1, 2))
assert ntpf.flux.shape[1] == 2
assert ntpf.flux.shape[2] == 1
ntpf = tpf.cutout(SkyCoord(tpf.ra, tpf.dec, unit="deg"), size=2)
ntpf = tpf.cutout(size=2)
assert np.product(ntpf.flux.shape[1:]) == 4
assert ntpf.targetid == tpf.targetid
def test_aperture_photometry_nan():
"""Regression test for #648.
When FLUX or FLUX_ERR is entirely NaN in a TPF, the resulting light curve
should report NaNs in that cadence rather than zero."""
tpf = read(filename_tpf_one_center)
tpf.hdu[1].data["FLUX"][2] = np.nan
tpf.hdu[1].data["FLUX_ERR"][2] = np.nan
lc = tpf.to_lightcurve(aperture_mask="all")
assert ~np.isnan(lc.flux[1])
assert ~np.isnan(lc.flux_err[1])
assert np.isnan(lc.flux[2])
assert np.isnan(lc.flux_err[2])
#@pytest.mark.remote_data
@pytest.mark.skip # At time of writing, the SkyBot API yields too many intermittent HTTP Errors
def test_SSOs():
# TESS test
tpf = TessTargetPixelFile(asteroid_TPF)
result = tpf.query_solar_system_objects() # default cadence_mask = 'outliers'
assert (
result is None
) # the TPF has only data for 1 epoch. The lone time is removed as outlier
result = tpf.query_solar_system_objects(cadence_mask="all", cache=False)
assert len(result) == 1
result = tpf.query_solar_system_objects(
cadence_mask=np.asarray([True]), cache=False
)
assert len(result) == 1
result = tpf.query_solar_system_objects(cadence_mask=[True], cache=False)
assert len(result) == 1
result = tpf.query_solar_system_objects(cadence_mask=(True), cache=False)
assert len(result) == 1
result, mask = tpf.query_solar_system_objects(
cadence_mask=np.asarray([True]), cache=True, return_mask=True
)
assert len(mask) == len(tpf.flux)
try:
result = tpf.query_solar_system_objects(
cadence_mask="str-not-supported", cache=False
)
pytest.fail("Unsupported cadence_mask should have thrown Error")
except ValueError:
pass
def test_get_header():
"""Test the basic functionality of ``tpf.get_header()``"""
tpf = read(filename_tpf_one_center)
assert tpf.get_header()["CHANNEL"] == tpf.get_keyword("CHANNEL")
assert tpf.get_header(0)["MISSION"] == tpf.get_keyword("MISSION")
assert tpf.get_header(ext=2)["EXTNAME"] == "APERTURE"
# ``tpf.header`` is deprecated
with pytest.warns(LightkurveDeprecationWarning, match="deprecated"):
tpf.header
def test_plot_pixels():
tpf = KeplerTargetPixelFile(filename_tpf_one_center)
tpf.plot_pixels()
tpf.plot_pixels(normalize=True)
tpf.plot_pixels(periodogram=True)
tpf.plot_pixels(periodogram=True, nyquist_factor=0.5)
tpf.plot_pixels(aperture_mask="all")
tpf.plot_pixels(aperture_mask=tpf.pipeline_mask)
tpf.plot_pixels(aperture_mask=tpf.create_threshold_mask())
tpf.plot_pixels(show_flux=True)
tpf.plot_pixels(corrector_func=lambda x: x)
plt.close("all")
@pytest.mark.remote_data
def test_missing_pipeline_mask():
"""Regression test for #791.
TPFs produced by TESSCut contain an empty pipeline mask. When the pipeline
mask is missing or empty, we want `to_lightcurve()` to fall back on the
'threshold' mask by default, to avoid creating a light curve based on zero pixels."""
tpf = search_tesscut("Proxima Cen", sector=12).download(cutout_size=3)
lc = tpf.to_lightcurve()
assert np.isfinite(lc.flux).any()
assert lc.meta.get("APERTURE_MASK", None) == "threshold"
with pytest.raises(ValueError):
# if aperture_mask is explicitly set as pipeline,
# the logic will throw an error as it is missing in the TPF
lc = tpf.to_lightcurve(aperture_mask="pipeline")
def test_cutout_quality_masking():
"""Regression test for #813: Does tpf.cutout() maintain the quality mask?"""
tpf = read(filename_tpf_one_center, quality_bitmask=8192)
tpfcut = tpf.cutout()
assert len(tpf) == len(tpfcut)
def test_parse_numeric_aperture_masks():
"""Regression test for #694: float or int aperture masks should be
interpreted as boolean masks."""
tpf = read(filename_tpf_one_center)
mask = tpf._parse_aperture_mask(np.zeros(tpf.shape[1:], dtype=float))
assert mask.dtype == bool
mask = tpf._parse_aperture_mask(np.zeros(tpf.shape[1:], dtype=int))
assert mask.dtype == bool
def test_tpf_meta():
"""Can we access meta data using tpf.meta?"""
tpf = read(filename_tpf_one_center)
assert tpf.meta.get("MISSION") == "K2"
assert tpf.meta["MISSION"] == "K2"
assert tpf.meta.get("mission", None) is None # key is case in-sensitive
assert tpf.meta.get("CHANNEL") == 45
# ensure meta is read-only view of the underlying self.hdu[0].header
with pytest.raises(TypeError):
tpf.meta["CHANNEL"] = 44
with pytest.raises(TypeError):
tpf.meta["KEY-NEW"] = 44
# needed for current internal HduToMetaMapping-based meta, ensuring it has
# a friendly __repr__() and __str__()
expected = collections.OrderedDict(tpf.meta).__repr__()
assert tpf.meta.__repr__() == expected
assert tpf.meta.__str__() == expected
def test_estimate_background():
"""Verifies tpf.estimate_background()."""
# Create a TPF with 100 electron/second in every pixel
tpf = read(filename_tpf_all_zeros) + 100.0
# The resulting background should be 100 e/s/pixel
bg = tpf.estimate_background(aperture_mask="all")
assert_array_equal(bg.flux.value, 100)
assert bg.flux.unit == tpf.flux.unit / u.pixel
def test_fluxmode():
"""This should verify the median flux use in an aperture"""
tpf = read(filename_tpf_one_center)
lc_n = tpf.extract_aperture_photometry(aperture_mask="all")
lc_sum = tpf.extract_aperture_photometry(aperture_mask="all", flux_method="sum")
lc_med = tpf.extract_aperture_photometry(aperture_mask="all", flux_method="median")
lc_mean = tpf.extract_aperture_photometry(aperture_mask="all", flux_method="mean")
assert lc_n.flux.value[0] == np.nansum(tpf.flux.value[0])
assert lc_sum.flux.value[0] == np.nansum(tpf.flux.value[0])
assert lc_med.flux.value[0] == np.nanmedian(tpf.flux.value[0])
assert lc_mean.flux.value[0] == np.nanmean(tpf.flux.value[0])
def test_animate():
tpf = read(filename_tpf_one_center)
tpf.animate()
| 33,035
| 36.583618
| 159
|
py
|
lightkurve
|
lightkurve-main/tests/conftest.py
|
import os
import tempfile
import pytest
def pytest_runtest_setup(item):
r"""Our tests will often run in headless virtual environments. For this
reason, we enforce the use of matplotlib's robust Agg backend, because it
does not require a graphical display.
This avoids errors such as:
c:\hostedtoolcache\windows\python\3.7.5\x64\lib\tkinter\__init__.py:2023: TclError
This probably means that tk wasn't installed properly.
"""
import matplotlib
matplotlib.use("Agg")
# Add a marker @pytest.mark.memtest
# - used to mark tests that stress memory, typically done by limiting the memory Python can use
# - thus they should be run in isolation.
#
# - skipped by default
# - tests marked as such can be run by "-m memtest" option
def pytest_configure(config):
config.addinivalue_line(
"markers", "memtest: mark memory usage tests that need to be run in isolation"
)
def pytest_collection_modifyitems(config, items):
keywordexpr = config.option.keyword
markexpr = config.option.markexpr
if keywordexpr or markexpr:
return # let pytest handle this
skip_memtest = pytest.mark.skip(reason='memtest skipped, need -m memtest option to run')
for item in items:
if 'memtest' in item.keywords:
item.add_marker(skip_memtest)
# Make sure we use temporary directories for the config and cache
# so that the tests are insensitive to local configuration.
os.environ['XDG_CONFIG_HOME'] = tempfile.mkdtemp('lightkurve_config')
os.mkdir(os.path.join(os.environ['XDG_CONFIG_HOME'], 'lightkurve'))
# Let users optionally specify XDG_CACHE_HOME for a test run
# use case: in a local dev env, an user might want to reuse an existing dir for cache,
# so as to speed up remote-data tests
if os.environ.get('XDG_CACHE_HOME', '') == '':
os.environ['XDG_CACHE_HOME'] = tempfile.mkdtemp('lightkurve_cache')
else:
print(f"lightkurve conftest: Use user-specified XDG_CACHE_HOME: {os.environ['XDG_CACHE_HOME']}")
_cache_dir = os.path.join(os.environ['XDG_CACHE_HOME'], 'lightkurve')
if not os.path.isdir(_cache_dir):
os.mkdir(_cache_dir)
| 2,138
| 33.5
| 100
|
py
|
lightkurve
|
lightkurve-main/tests/test_interact.py
|
"""Tests the features of the lightkurve.interact module."""
import warnings
from astropy.utils.data import get_pkg_data_filename
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from lightkurve import LightkurveWarning, LightkurveError
from lightkurve.search import search_targetpixelfile
from lightkurve.targetpixelfile import KeplerTargetPixelFile, TessTargetPixelFile
from .test_targetpixelfile import filename_tpf_tabby_lite
from lightkurve.interact import get_lightcurve_y_limits
bad_optional_imports = False
try:
import bokeh
from bokeh.plotting import ColumnDataSource
except ImportError:
bad_optional_imports = True
example_tpf = get_pkg_data_filename("data/tess25155310-s01-first-cadences.fits.gz")
example_tpf_kepler = get_pkg_data_filename("data/test-tpf-kplr-tabby-first-cadence.fits")
example_tpf_tess = get_pkg_data_filename("data/tess25155310-s01-first-cadences.fits.gz")
example_tpf_tesscut = get_pkg_data_filename("data/test-tpf-tesscut_1x1.fits")
# Headers PMRA, PMDEC, PMTOTAL are removed
example_tpf_no_pm = get_pkg_data_filename("data/tess25155310-s01-first-cadences_no_pm.fits.gz")
# Headers for PM, ra/dec, and equinox all removed
example_tpf_no_target_position = get_pkg_data_filename("data/tess25155310-s01-first-cadences_no_target_position.fits.gz")
def test_bokeh_import_error(caplog):
"""If bokeh is not installed (optional dependency),
is a friendly error message printed?"""
try:
import bokeh
except ImportError:
tpf = TessTargetPixelFile(example_tpf)
tpf.interact()
assert "requires the `bokeh` Python package" in caplog.text
@pytest.mark.skipif(bad_optional_imports, reason="requires bokeh")
def test_malformed_notebook_url():
"""Test if malformed notebook_urls raise proper exceptions."""
import bokeh
tpf = TessTargetPixelFile(example_tpf)
with pytest.raises(ValueError) as exc:
tpf.interact(notebook_url="")
assert "Empty host value" in exc.value.args[0]
with pytest.raises(AttributeError) as exc:
tpf.interact(notebook_url=None)
assert "object has no attribute" in exc.value.args[0]
@pytest.mark.skipif(bad_optional_imports, reason="requires bokeh")
def test_graceful_exit_outside_notebook():
"""Test if running interact outside of a notebook does fails gracefully."""
import bokeh
tpf = TessTargetPixelFile(example_tpf)
result = tpf.interact()
assert result is None
@pytest.mark.skipif(bad_optional_imports, reason="requires bokeh")
def test_custom_aperture_mask():
"""Can we provide a custom lightcurve to show?"""
with warnings.catch_warnings():
# Ignore the "TELESCOP is not equal to TESS" warning
warnings.simplefilter("ignore", LightkurveWarning)
tpfs = [KeplerTargetPixelFile(filename_tpf_tabby_lite), TessTargetPixelFile(example_tpf)]
import bokeh
for tpf in tpfs:
mask = tpf.flux[0, :, :] == tpf.flux[0, :, :]
tpf.interact(aperture_mask=mask)
mask = None
tpf.interact(aperture_mask=mask)
mask = "threshold"
tpf.interact(aperture_mask=mask)
@pytest.mark.skipif(bad_optional_imports, reason="requires bokeh")
def test_custom_exported_filename():
"""Can we provide a custom lightcurve to show?"""
import bokeh
with warnings.catch_warnings():
# Ignore the "TELESCOP is not equal to TESS" warning
warnings.simplefilter("ignore", LightkurveWarning)
tpfs = [KeplerTargetPixelFile(filename_tpf_tabby_lite), TessTargetPixelFile(example_tpf)]
for tpf in tpfs:
tpf.interact(exported_filename="demo.fits")
tpf[0:2].interact()
tpf[0:2].interact(exported_filename="string_only")
tpf[0:2].interact(exported_filename="demo2.FITS")
tpf[0:2].interact(exported_filename="demo3.png")
tpf[0:2].interact(exported_filename="")
tpf.interact(exported_filename=210690913)
mask = tpf.time == tpf.time
tpf[mask].interact()
@pytest.mark.skipif(bad_optional_imports, reason="requires bokeh")
def test_transform_and_ylim_funcs():
"""Test the transform_func and ylim_func"""
with warnings.catch_warnings():
# Ignore the "TELESCOP is not equal to TESS" warning
warnings.simplefilter("ignore", LightkurveWarning)
tpfs = [KeplerTargetPixelFile(filename_tpf_tabby_lite), TessTargetPixelFile(example_tpf)]
for tpf in tpfs:
tpf.interact(transform_func=lambda lc: lc.normalize())
tpf.interact(transform_func=lambda lc: lc.flatten().normalize())
tpf.interact(transform_func=lambda lc: lc, ylim_func=lambda lc: (0, 2))
tpf.interact(ylim_func=lambda lc: (0, lc.flux.max()))
@pytest.mark.skipif(bad_optional_imports, reason="requires bokeh")
def test_interact_functions():
"""Do the helper functions in the interact module run without syntax error?"""
import bokeh
from lightkurve.interact import (
prepare_tpf_datasource,
prepare_lightcurve_datasource,
aperture_mask_from_selected_indices,
get_lightcurve_y_limits,
make_lightcurve_figure_elements,
make_tpf_figure_elements,
show_interact_widget,
)
tpf = TessTargetPixelFile(example_tpf)
mask = tpf.flux[0, :, :] == tpf.flux[0, :, :]
# make the mask a bit more realistic
mask[0, 0] = False
mask[1, 2] = False
tpf_source = prepare_tpf_datasource(tpf, aperture_mask=mask)
# https://github.com/lightkurve/lightkurve/issues/990
# ensure proper 2D - 1D conversion
assert tpf_source.data["xx"].ndim == 1
assert tpf_source.data["yy"].ndim == 1
# for bokeh v3, .indices needs to plain list .
# cf. https://github.com/bokeh/bokeh/issues/12624
assert isinstance(tpf_source.selected.indices, list)
# the lower-level function aperture_mask_from_selected_indices() is used in
# callback _create_lightcurve_from_pixels(), which cannot be easily tested.
# So we directly test it instead.
assert_array_equal(aperture_mask_from_selected_indices(tpf_source.selected.indices, tpf), mask)
lc = tpf.to_lightcurve(aperture_mask=mask)
lc_source = prepare_lightcurve_datasource(lc)
get_lightcurve_y_limits(lc_source)
make_lightcurve_figure_elements(lc, lc_source)
def ylim_func_sample(lc):
return (np.nanpercentile(lc.flux, 0.1), np.nanpercentile(lc.flux, 99.9))
make_lightcurve_figure_elements(lc, lc_source, ylim_func=ylim_func_sample)
def ylim_func_unitless(lc):
return (
np.nanpercentile(lc.flux, 0.1).value,
np.nanpercentile(lc.flux, 99.9).value,
)
make_lightcurve_figure_elements(lc, lc_source, ylim_func=ylim_func_unitless)
make_tpf_figure_elements(tpf, tpf_source)
show_interact_widget(tpf)
@pytest.mark.remote_data
@pytest.mark.skipif(bad_optional_imports, reason="requires bokeh")
@pytest.mark.filterwarnings("ignore:Proper motion correction cannot be applied to the target") # for TESSCut
@pytest.mark.parametrize("tpf_class, tpf_file, aperture_mask", [
(TessTargetPixelFile, example_tpf_tess, "pipeline"),
(TessTargetPixelFile, example_tpf_tesscut, "empty"),
(KeplerTargetPixelFile, example_tpf_kepler, "threshold"),
(TessTargetPixelFile, example_tpf_no_pm, "default"),
])
def test_interact_sky_functions(tpf_class, tpf_file, aperture_mask):
"""Do the helper functions in the interact module run without syntax error?"""
import bokeh
from lightkurve.interact import (
prepare_tpf_datasource,
make_tpf_figure_elements,
add_gaia_figure_elements,
)
tpf = tpf_class(tpf_file)
mask = tpf._parse_aperture_mask(aperture_mask)
tpf_source = prepare_tpf_datasource(tpf, aperture_mask=mask)
fig1, slider1 = make_tpf_figure_elements(tpf, tpf_source, tpf_source_selectable=False)
add_gaia_figure_elements(tpf, fig1)
add_gaia_figure_elements(tpf, fig1, magnitude_limit=22)
@pytest.mark.remote_data
@pytest.mark.skipif(bad_optional_imports, reason="requires bokeh")
def test_interact_sky_functions_case_no_target_coordinate():
import bokeh
from lightkurve.interact import (
prepare_tpf_datasource,
make_tpf_figure_elements,
add_gaia_figure_elements,
)
tpf_class, tpf_file = TessTargetPixelFile, example_tpf_no_target_position
tpf = tpf_class(tpf_file)
mask = tpf.flux[0, :, :] == tpf.flux[0, :, :]
tpf_source = prepare_tpf_datasource(tpf, aperture_mask=mask)
fig1, slider1 = make_tpf_figure_elements(tpf, tpf_source)
with pytest.raises(LightkurveError, match=r".* no valid coordinate.*"):
add_gaia_figure_elements(tpf, fig1)
@pytest.mark.remote_data
def test_interact_sky_functions_add_nearby_tics():
"""Test the backend of interact_sky() that combine Nearby TIC report with Gaia result."""
from lightkurve.interact import (
_get_nearby_gaia_objects,
_add_nearby_tics_if_tess,
)
# This TIC's nearby report has a mix of stars with Gaia and without Gaia IDs.
# https://exofop.ipac.caltech.edu/tess/nearbytarget.php?id=233087860
tpf = search_targetpixelfile("TIC233087860", mission="TESS")[0].download()
magnitude_limit = 17
df_before = _get_nearby_gaia_objects(tpf, magnitude_limit)
df, source_colnames_extras, tooltips_extras = _add_nearby_tics_if_tess(tpf, magnitude_limit, df_before)
# based on what we know about the nearby report of the specific TIC,
# some existing Gaia entries are added with tic data
assert len(df[(df['Source'] > 0) & (df['tic'] != '')]) > 0
# Some new entries with data only from TIC nearby report are added (hence no Gaia info)
assert len(df[(df['Source'] == 0) & (df['tic'] != '')]) > 0
@pytest.mark.remote_data
def test_interact_sky_functions_add_nearby_tics_weird_dtype():
"""Test the backend of interact_sky() that combine Nearby TIC report with Gaia result.
Case the dtype from Gaia result dataframe is weird.
"""
from lightkurve.interact import (
_get_nearby_gaia_objects,
_add_nearby_tics_if_tess,
)
# For this TIC, the dataframe from Gaia search has weird DType:
# df['Source'].dtype is an instance of pd.Int64Dtype, not the type class itself.
# existing type check logic with np.issubdtype() fails with TypeError: Cannot interpret 'Int64Dtype()' as a data type
tpf = search_targetpixelfile("TIC135100529", mission="TESS")[0].download()
magnitude_limit = 18
df_before = _get_nearby_gaia_objects(tpf, magnitude_limit)
df, source_colnames_extras, tooltips_extras = _add_nearby_tics_if_tess(tpf, magnitude_limit, df_before)
# some TICs are added successfully, without any error raised.
assert len(df['tic'] != '') > 0
@pytest.mark.remote_data
@pytest.mark.skipif(bad_optional_imports, reason="requires bokeh")
def test_interact_sky_functions_case_nearby_tics_failed(monkeypatch):
"""Test to ensure in case Nearby TIC service from ExoFOP not available,
interact_sky will still function (without the TIC information) rather
than raising exceptions.
"""
import bokeh
from lightkurve.interact import (
prepare_tpf_datasource,
make_tpf_figure_elements,
add_gaia_figure_elements,
)
import lightkurve.interact as lk_interact
def mock_raise(*args):
raise IOError("simulated service unavailable")
monkeypatch.setattr(lk_interact, "_search_nearby_of_tess_target", mock_raise)
tpf = TessTargetPixelFile(example_tpf_tess)
mask = tpf.flux[0, :, :] == tpf.flux[0, :, :]
tpf_source = prepare_tpf_datasource(tpf, aperture_mask=mask)
fig1, slider1 = make_tpf_figure_elements(tpf, tpf_source)
with pytest.warns(LightkurveWarning, match="cannot obtain nearby TICs"):
add_gaia_figure_elements(tpf, fig1)
@pytest.mark.skipif(bad_optional_imports, reason="requires bokeh")
def test_ylim_with_nans():
"""Regression test for #679: y limits should not be NaN."""
lc_source = ColumnDataSource({"flux": [-1, np.nan, 1]})
ymin, ymax = get_lightcurve_y_limits(lc_source)
# ymin/ymax used to return nan, make sure this is no longer the case
assert ymin == -1.176
assert ymax == 1.176
| 12,240
| 39.39934
| 121
|
py
|
lightkurve
|
lightkurve-main/tests/test_correctors.py
|
import pytest
@pytest.mark.remote_data
def test_to_corrector():
"""Does the tpf.to_corrector('pld') convenience method work?"""
from lightkurve import KeplerTargetPixelFile
from .test_targetpixelfile import TABBY_TPF
tpf = KeplerTargetPixelFile(TABBY_TPF)
lc = tpf.to_corrector("pld").correct()
assert len(lc.flux) == len(tpf.time)
| 359
| 26.692308
| 67
|
py
|
lightkurve
|
lightkurve-main/tests/test_lightcurve.py
|
from astropy.io import fits as pyfits
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.masked import Masked
from astropy import units as u
from astropy.table import Table, Column, MaskedColumn
from astropy.time import Time, TimeDelta
from astropy.timeseries import aggregate_downsample
import matplotlib.pyplot as plt
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal, assert_allclose, assert_equal
import pytest
import tempfile
import warnings
from lightkurve.io import read
from lightkurve.lightcurve import LightCurve, KeplerLightCurve, TessLightCurve
from lightkurve.lightcurvefile import KeplerLightCurveFile, TessLightCurveFile
from lightkurve.targetpixelfile import KeplerTargetPixelFile, TessTargetPixelFile
from lightkurve.utils import LightkurveWarning, LightkurveDeprecationWarning
from lightkurve.search import search_lightcurve
from lightkurve.collections import LightCurveCollection
from .test_targetpixelfile import TABBY_TPF
_HAS_VAR_BINS = 'time_bin_end' in aggregate_downsample.__kwdefaults__
# 8th Quarter of Tabby's star
TABBY_Q8 = (
"https://archive.stsci.edu/missions/kepler/lightcurves"
"/0084/008462852/kplr008462852-2011073133259_llc.fits"
)
K2_C08 = (
"https://archive.stsci.edu/missions/k2/lightcurves/c8/"
"220100000/39000/ktwo220139473-c08_llc.fits"
)
KEPLER10 = (
"https://archive.stsci.edu/missions/kepler/lightcurves/"
"0119/011904151/kplr011904151-2010009091648_llc.fits"
)
TESS_SIM = (
"https://archive.stsci.edu/missions/tess/ete-6/tid/00/000/"
"004/104/tess2019128220341-0000000410458113-0016-s_lc.fits"
)
filename_tess = get_pkg_data_filename("data/tess25155310-s01-first-cadences.fits.gz")
filename_tess_custom = get_pkg_data_filename(
"data/test_TESS_interact_generated_custom-lc.fits"
)
filename_K2_custom = get_pkg_data_filename(
"data/test_K2_interact_generated_custom-lc.fits"
)
# `asteroid_test.fits` is a single cadence of TESS FFI data which contains a known solar system object
asteroid_TPF = get_pkg_data_filename("data/asteroid_test.fits")
def test_invalid_lightcurve():
"""Invalid LightCurves should not be allowed."""
time = np.array([1, 2, 3, 4, 5])
flux = np.array([1, 2, 3, 4])
with pytest.raises(ValueError) as err:
LightCurve(time=time, flux=flux)
assert err.value.args[0] == "Inconsistent data column lengths"
def test_lc_nan_time():
time = np.array([1, 2, 3, np.nan])
flux = np.array([1, 2, 3, 4])
with pytest.raises(ValueError):
LightCurve(time=time, flux=flux)
def test_math_operators():
lc = LightCurve(
time=np.arange(1, 5), flux=np.arange(1, 5), flux_err=np.arange(1, 5)
)
lc_add = lc + 1
lc_sub = lc - 1
lc_mul = lc * 2
lc_div = lc / 2
assert_array_equal(lc_add.flux, lc.flux + 1)
assert_array_equal(lc_sub.flux, lc.flux - 1)
assert_array_equal(lc_mul.flux, lc.flux * 2)
assert_array_equal(lc_div.flux, lc.flux / 2)
def test_math_operators_on_objects():
lc1 = LightCurve(
time=np.arange(1, 5), flux=np.arange(1, 5), flux_err=np.arange(1, 5)
)
lc2 = LightCurve(
time=np.arange(1, 5), flux=np.arange(11, 15), flux_err=np.arange(1, 5)
)
assert_array_equal((lc1 + lc2).flux, lc1.flux + lc2.flux)
assert_array_equal((lc1 - lc2).flux, lc1.flux - lc2.flux)
assert_array_equal((lc1 * lc2).flux, lc1.flux * lc2.flux)
assert_array_equal((lc1 / lc2).flux, lc1.flux / lc2.flux)
# Change order
assert_array_equal((lc2 + lc1).flux, lc2.flux + lc1.flux)
assert_array_equal((lc2 - lc1).flux, lc2.flux - lc1.flux)
assert_array_equal((lc2 * lc1).flux, lc2.flux * lc1.flux)
assert_array_equal((lc2 / lc1).flux, lc2.flux / lc1.flux)
# LightCurve objects can only be added or multiplied if they have equal length
with pytest.raises(ValueError):
lc = lc1 + lc1[0:-5]
with pytest.raises(ValueError):
lc = lc1 * lc1[0:-5]
def test_rmath_operators():
lc = LightCurve(
time=np.arange(1, 5), flux=np.arange(1, 5), flux_err=np.arange(1, 5)
)
lc_add = 1 + lc
lc_sub = 1 - lc
lc_mul = 2 * lc
lc_div = 2 / lc
assert_array_equal(lc_add.flux, lc.flux + 1)
assert_array_equal(lc_sub.flux, 1 - lc.flux)
assert_array_equal(lc_mul.flux, lc.flux * 2)
assert_array_equal(lc_div.flux, 2 / lc.flux)
def test_math_operators_on_units():
lc = LightCurve(
time=np.arange(1, 5), flux=np.arange(1, 5), flux_err=np.arange(1, 5)
)
lc_mul = lc * u.pixel
lc_div = lc / u.pixel
assert lc_mul.flux.unit == "pixel"
assert lc_mul.flux_err.unit == "pixel"
assert lc_div.flux.unit == 1 / u.pixel
assert lc_div.flux_err.unit == 1 / u.pixel
def test_math_regression_925():
"""Regression test for #925: left hand side multiplication with a np.float failed."""
lc = LightCurve(time=[1, 2, 3], flux=[1, 1, 1], flux_err=[1, 1, 1])
for three in [3, 3.0, np.float64(3), u.Quantity(3.)]:
# left hand side multiplication with a numpy float failed in the past, cf. #925
assert all((three * lc).flux == 3)
assert all((lc * three).flux == 3)
assert all((three + lc).flux == 4)
assert all((lc + three).flux == 4)
@pytest.mark.remote_data
@pytest.mark.parametrize("path, mission", [(TABBY_Q8, "Kepler"), (K2_C08, "K2")])
def test_KeplerLightCurveFile(path, mission):
lc = KeplerLightCurveFile(path, flux_column="sap_flux", quality_bitmask=None)
assert lc.obsmode == "long cadence"
assert len(lc.pos_corr1) == len(lc.pos_corr2)
assert lc.mission.lower() == mission.lower()
if lc.mission.lower() == "kepler":
assert lc.meta.get("CAMPAIGN") is None
assert lc.quarter == 8
elif lc.mission.lower() == "k2":
assert lc.campaign == 8
assert lc.meta.get("QUARTER") is None
assert lc.time.format == "bkjd"
assert lc.time.scale == "tdb"
assert lc.flux.unit == u.electron / u.second
assert lc.meta["FLUX_ORIGIN"] == "sap_flux"
# Does the data match what one would obtain using pyfits.open?
hdu = pyfits.open(path)
assert lc.label == hdu[0].header["OBJECT"]
nanmask = ~np.isnan(hdu[1].data["TIME"])
assert_array_equal(lc.time.value, hdu[1].data["TIME"][nanmask])
assert_array_equal(lc.flux.value, hdu[1].data["SAP_FLUX"][nanmask])
@pytest.mark.remote_data
@pytest.mark.parametrize(
"quality_bitmask", ["hardest", "hard", "default", None, 1, 100, 2096639]
)
def test_TessLightCurveFile(quality_bitmask):
lc = TessLightCurveFile(
TESS_SIM, quality_bitmask=quality_bitmask, flux_column="sap_flux"
)
hdu = pyfits.open(TESS_SIM)
assert lc.mission == "TESS"
assert lc.label == hdu[0].header["OBJECT"]
assert lc.time.format == "btjd"
assert lc.time.scale == "tdb"
assert lc.flux.unit == u.electron / u.second
assert lc.sector == hdu[0].header["SECTOR"]
assert lc.camera == hdu[0].header["CAMERA"]
assert lc.ccd == hdu[0].header["CCD"]
assert lc.ra == hdu[0].header["RA_OBJ"]
assert lc.dec == hdu[0].header["DEC_OBJ"]
assert lc.meta["FLUX_ORIGIN"] == "sap_flux"
assert_array_equal(lc.time[0:10].value, hdu[1].data["TIME"][0:10])
assert_array_equal(lc.flux[0:10].value, hdu[1].data["SAP_FLUX"][0:10])
# Regression test for https://github.com/lightkurve/lightkurve/pull/236
assert np.isnan(lc.time.value).sum() == 0
@pytest.mark.remote_data
@pytest.mark.parametrize(
"quality_bitmask, answer",
[
("hardest", 2661),
("hard", 2706),
("default", 3113),
(None, 3143),
(1, 3143),
(100, 3116),
(2096639, 2661),
],
)
def test_bitmasking(quality_bitmask, answer):
"""Test whether the bitmasking behaves like it should"""
lc = read(TABBY_Q8, quality_bitmask=quality_bitmask)
assert len(lc) == answer
def test_lightcurve_fold():
"""Test the ``LightCurve.fold()`` method."""
lc = KeplerLightCurve(
time=np.linspace(0, 10, 100),
flux=np.zeros(100) + 1,
targetid=999,
label="mystar",
meta={"CCD": 2},
)
fold = lc.fold(period=1)
assert_almost_equal(fold.phase[0], -0.5, 2)
assert_almost_equal(np.min(fold.phase), -0.5, 2)
assert_almost_equal(np.max(fold.phase), 0.5, 2)
assert fold.targetid == lc.targetid
assert fold.label == lc.label
assert set(lc.meta).issubset(set(fold.meta))
assert lc.meta["CCD"] == fold.meta["CCD"]
assert_array_equal(np.sort(fold.time_original), lc.time)
assert len(fold.time_original) == len(lc.time)
fold = lc.fold(period=1, epoch_time=-0.1)
assert_almost_equal(fold.time[0], -0.5, 2)
assert_almost_equal(np.min(fold.phase), -0.5, 2)
assert_almost_equal(np.max(fold.phase), 0.5, 2)
with warnings.catch_warnings():
# `transit_midpoint` is deprecated and its use will emit a warning
warnings.simplefilter("ignore", LightkurveWarning)
fold = lc.fold(period=1, transit_midpoint=-0.1)
assert_almost_equal(fold.time[0], -0.5, 2)
ax = fold.plot()
assert "Phase" in ax.get_xlabel()
ax = fold.scatter()
assert "Phase" in ax.get_xlabel()
ax = fold.errorbar()
assert "Phase" in ax.get_xlabel()
plt.close("all")
# bad transit midpoint should give a warning
# if user tries a t0 in JD but time is in BKJD
with pytest.warns(LightkurveWarning, match="appears to be given in JD"):
lc.fold(10, 2456600)
@pytest.mark.parametrize(
"normalize_phase", [False, True]
)
def test_lightcurve_fold_odd_even_masks(normalize_phase):
"""Test for FoldedLightCurve odd/even mask. See #1104. """
# a sine curve with 4-day period, with minimum at day 3, 7, ...
epoch_time, period = 3, 4
lc = LightCurve(
time=np.linspace(0, 10, 100),
targetid=999,
label="mystar",
meta={"CCD": 2},
)
lc.flux = np.sin((period * 0.75 + lc.time.value - epoch_time) * 2 * np.pi / period)
# epoch_phase should only shift how the folded lightcurve,
# but not the actual odd/even mask calculation
fold = lc.fold(period=period, epoch_time=epoch_time, epoch_phase=0.5, normalize_phase=normalize_phase)
odd = fold.odd_mask
even = fold.even_mask
assert len(odd) == len(fold.time)
assert np.all(odd == ~even)
# cycle 0: time [0, 1)
# cycle 1: time [1, 5)
# cycle 2: time [5, 9)
# cycle 3: time [9, 10]
def create_expected_even(times):
def _mask(t):
if t < 1 or (5 <= t and t < 9):
return True
return False
return np.array([_mask(t) for t in fold.time_original.value])
def create_expected_cycle(times):
def _cycle(t):
if t < 1:
return 0
elif 1 <= t < 5:
return 1
elif 5 <= t < 9:
return 2
else:
return 3
return np.array([_cycle(t) for t in fold.time_original.value])
even_expected = create_expected_even(fold)
assert_array_equal(even, even_expected)
assert_array_equal(fold.cycle, create_expected_cycle(fold))
# the following plot is only useful for visualizing the result,
# say, when someone copies the test to Jupyter notebook to run
ax = lc.plot()
fold_e = fold[fold.even_mask]
ax.scatter(fold_e.time_original.value, fold_e.flux, label="actual")
ax.legend()
ax = lc.plot()
fold_e = fold[even_expected]
ax.scatter(fold_e.time_original.value, fold_e.flux, label="expected")
ax.legend()
plt.close("all")
def test_lightcurve_fold_issue520():
"""Regression test for #520; accept quantities in `fold()`."""
lc = LightCurve(time=np.linspace(0, 10, 100), flux=np.zeros(100) + 1)
lc.fold(period=1 * u.day, epoch_time=5 * u.day)
def test_lightcurve_append():
"""Test ``LightCurve.append()``."""
lc = LightCurve(time=[1, 2, 3], flux=[1, 0.5, 1], flux_err=[0.1, 0.2, 0.3])
lc = lc.append(lc)
assert_array_equal(lc.time.value, 2 * [1, 2, 3])
assert_array_equal(lc.flux, 2 * [1, 0.5, 1])
assert_array_equal(lc.flux_err, 2 * [0.1, 0.2, 0.3])
# KeplerLightCurve has extra data
lc = KeplerLightCurve(
time=[1, 2, 3],
flux=[1, 0.5, 1],
centroid_col=[4, 5, 6],
centroid_row=[7, 8, 9],
cadenceno=[10, 11, 12],
quality=[10, 20, 30],
)
lc = lc.append(lc)
assert_array_equal(lc.time.value, 2 * [1, 2, 3])
assert_array_equal(lc.flux, 2 * [1, 0.5, 1])
assert_array_equal(lc.centroid_col, 2 * [4, 5, 6])
assert_array_equal(lc.centroid_row, 2 * [7, 8, 9])
assert_array_equal(lc.cadenceno, 2 * [10, 11, 12])
assert_array_equal(lc.quality, 2 * [10, 20, 30])
def test_lightcurve_append_multiple():
"""Test ``LightCurve.append()`` for multiple lightcurves at once."""
lc = LightCurve(time=[1, 2, 3], flux=[1, 0.5, 1])
lc = lc.append([lc, lc, lc])
assert_array_equal(lc.flux, 4 * [1, 0.5, 1])
assert_array_equal(lc.time.value, 4 * [1, 2, 3])
def test_lightcurve_copy():
"""Test ``LightCurve.copy()``."""
time = np.array([1, 2, 3, 4])
flux = np.array([1, 2, 3, 4])
error = np.array([0.1, 0.2, 0.3, 0.4])
lc = LightCurve(time=time, flux=flux, flux_err=error)
nlc = lc.copy()
assert_array_equal(lc.time, nlc.time)
assert_array_equal(lc.flux, nlc.flux)
assert_array_equal(lc.flux_err, nlc.flux_err)
nlc.time[1] = 5
nlc.flux[1] = 6
nlc.flux_err[1] = 7
# By changing 1 of the 4 data points in the new lightcurve's array-like
# attributes, we expect assert_array_equal to raise an AssertionError
# indicating a mismatch of 1/4 (or 25%).
with pytest.raises(AssertionError, match=r"ismatch.*25"):
assert_array_equal(lc.time, nlc.time)
with pytest.raises(AssertionError, match=r"ismatch.*25"):
assert_array_equal(lc.flux, nlc.flux)
with pytest.raises(AssertionError, match=r"ismatch.*25"):
assert_array_equal(lc.flux_err, nlc.flux_err)
# KeplerLightCurve has extra data
lc = KeplerLightCurve(
time=[1, 2, 3],
flux=[1, 0.5, 1],
centroid_col=[4, 5, 6],
centroid_row=[7, 8, 9],
cadenceno=[10, 11, 12],
quality=[10, 20, 30],
)
nlc = lc.copy()
assert_array_equal(lc.time, nlc.time)
assert_array_equal(lc.flux, nlc.flux)
assert_array_equal(lc.centroid_col, nlc.centroid_col)
assert_array_equal(lc.centroid_row, nlc.centroid_row)
assert_array_equal(lc.cadenceno, nlc.cadenceno)
assert_array_equal(lc.quality, nlc.quality)
nlc.time[1] = 6
nlc.flux[1] = 7
nlc.centroid_col[1] = 8
nlc.centroid_row[1] = 9
nlc.cadenceno[1] = 10
nlc.quality[1] = 11
# As before, by changing 1/3 data points, we expect a mismatch of 33.3%
# with a repeating decimal. However, float precision for python 2.7 is 10
# decimal digits, while python 3.6's is 13 decimal digits. Therefore,
# a regular expression is needed for both versions.
with pytest.raises(AssertionError, match=r"ismatch.*33\.3+"):
assert_array_equal(lc.time, nlc.time)
with pytest.raises(AssertionError, match=r"ismatch.*33\.3+"):
assert_array_equal(lc.flux, nlc.flux)
with pytest.raises(AssertionError, match=r"ismatch.*33\.3+"):
assert_array_equal(lc.centroid_col, nlc.centroid_col)
with pytest.raises(AssertionError, match=r"ismatch.*33\.3+"):
assert_array_equal(lc.centroid_row, nlc.centroid_row)
with pytest.raises(AssertionError, match=r"ismatch.*33\.3+"):
assert_array_equal(lc.cadenceno, nlc.cadenceno)
with pytest.raises(AssertionError, match=r"ismatch.*33\.3+"):
assert_array_equal(lc.quality, nlc.quality)
@pytest.mark.parametrize(
"path, mission", [(filename_tess_custom, "TESS"), (filename_K2_custom, "K2")]
)
def test_custom_lightcurve_file(path, mission):
"""Test whether we can read in custom interact()-produced lightcurvefiles"""
if mission == "K2":
lc = KeplerLightCurve.read(path)
elif mission == "TESS":
# with pytest.warns(LightkurveWarning):
lc = TessLightCurve.read(path)
assert lc.cadenceno[0] >= 0
assert lc.dec == lc.dec
assert lc.time[-1] > lc.time[0]
assert len(lc.flux) > 0
assert lc.mission.lower() == mission.lower()
# Does the data match what one would obtain using pyfits.open?
hdu = pyfits.open(path)
assert lc.label == hdu[0].header["OBJECT"]
assert_array_equal(lc.time.value, hdu[1].data["TIME"])
assert_array_equal(lc.flux.value, hdu[1].data["FLUX"])
# TESS has QUALITY while Kepler/K2 has SAP_QUALITY:
if mission == "TESS":
assert "QUALITY" in hdu[1].columns.names
assert_array_equal(lc.quality, hdu[1].data["QUALITY"])
if mission in ["K2", "Kepler"]:
assert "SAP_QUALITY" in hdu[1].columns.names
assert_array_equal(lc.quality, hdu[1].data["SAP_QUALITY"])
@pytest.mark.remote_data
def test_lightcurve_plots():
"""Sanity check to verify that lightcurve plotting works"""
for lc in [KeplerLightCurve.read(TABBY_Q8), TessLightCurve.read(TESS_SIM)]:
lc.plot()
lc.scatter()
lc.errorbar()
lc.plot()
lc.plot(normalize=False, title="Not the default")
lc.scatter()
lc.scatter(c="C3")
lc.scatter(c=lc.time.value, show_colorbar=True, colorbar_label="Time")
lc.plot(column="sap_flux")
lc.plot(column="sap_bkg", normalize=True)
lc.plot(column="cadenceno")
lc.errorbar(column="psf_centr1")
lc.errorbar(column="timecorr")
plt.close("all")
@pytest.mark.remote_data
def test_lightcurve_scatter():
"""Sanity check to verify that lightcurve scatter plotting works"""
lc = KeplerLightCurve.read(KEPLER10)
lc = lc.flatten()
# get an array of original times, in the same order as the folded lightcurve
foldkw = dict(period=0.837491)
originaltime = LightCurve(time=lc.time, flux=lc.flux)
foldedtimeinorder = originaltime.fold(**foldkw).flux
# plot a grid of phase-folded and not, with colors
fi, ax = plt.subplots(2, 2, figsize=(10, 6), sharey=True, sharex="col")
scatterkw = dict(s=5, cmap="winter")
lc.scatter(ax=ax[0, 0])
lc.fold(**foldkw).scatter(ax=ax[0, 1])
lc.scatter(ax=ax[1, 0], c=lc.time.value, **scatterkw)
lc.fold(**foldkw).scatter(ax=ax[1, 1], c=foldedtimeinorder, **scatterkw)
plt.ylim(0.999, 1.001)
def test_lightcurve_plots_unitless():
"""Sanity check to verify that lightcurve plotting works when data is unitless."""
lc = LightCurve(time=np.arange(10))
# make flux non-uniform to avoid warnings with clip_outliers=True during test
lc.flux = np.append(np.zeros(3), np.ones(7))
lc.flux_err = np.zeros(10) # need flux_err to avoid warnings
lc.plot()
lc.scatter()
lc.errorbar()
lc.plot(normalize=True, clip_outliers=True)
plt.close("all")
def test_cdpp():
"""Test the basics of the CDPP noise metric."""
# A flat lightcurve should have a CDPP close to zero
lc = LightCurve(time=np.arange(200), flux=np.ones(200))
assert_almost_equal(lc.estimate_cdpp(), 0)
# An artificial lightcurve with sigma=100ppm should have cdpp=100ppm
lc = LightCurve(
time=np.arange(10000), flux=np.random.normal(loc=1, scale=100e-6, size=10000)
)
assert_almost_equal(lc.estimate_cdpp(transit_duration=1).value, 100, decimal=-0.5)
# Transit_duration must be an integer (cadences)
with pytest.raises(ValueError):
lc.estimate_cdpp(transit_duration=6.5)
@pytest.mark.remote_data
def test_cdpp_tabby():
"""Compare the cdpp noise metric against the pipeline value."""
lc = KeplerLightCurve.read(TABBY_Q8)
# Tabby's star shows dips after cadence 1000 which increase the cdpp
lc2 = LightCurve(time=lc.time[:1000], flux=lc.flux[:1000])
assert np.abs(lc2.estimate_cdpp().value - lc.cdpp6_0) < 30
def test_bin():
"""Does binning work?"""
with warnings.catch_warnings(): # binsize is deprecated
warnings.simplefilter("ignore", LightkurveDeprecationWarning)
lc = LightCurve(
time=np.arange(10), flux=2 * np.ones(10), flux_err=2 ** 0.5 * np.ones(10)
)
binned_lc = lc.bin(binsize=2)
assert_allclose(binned_lc.flux, 2 * np.ones(5))
# stderr changed since with the initial workaround for `binsize` in 2.x
# the first bin gets 3, the last only a single point!
if _HAS_VAR_BINS: # With Astropy 5.0 check the exact numbers again
assert_allclose(binned_lc.flux_err, np.ones(5))
else:
assert_allclose(binned_lc.flux_err, np.sqrt([2./3, 1, 1, 1, 2]))
assert len(binned_lc.time) == 5
with pytest.raises(TypeError):
lc.bin(method='doesnotexist')
# If `flux_err` is missing, the errors on the bins should be the stddev
lc = LightCurve(time=np.arange(10), flux=2 * np.ones(10))
binned_lc = lc.bin(binsize=2)
assert_allclose(binned_lc.flux_err, np.zeros(5))
# Regression test for #377
lc = KeplerLightCurve(time=np.arange(10), flux=2 * np.ones(10))
lc.bin(5).remove_outliers()
# Second regression test for #377
lc = KeplerLightCurve(
time=np.arange(1000) * 0.02,
flux=1 * np.ones(1000) + np.random.normal(0, 1e-6, 1000),
cadenceno=np.arange(1000),
)
assert np.isclose(lc.bin(2).estimate_cdpp(), 1, rtol=1)
# Regression test for #500
lc = LightCurve(
time=np.arange(2000), flux=np.random.normal(loc=42, scale=0.01, size=2000)
)
assert np.round(lc.bin(2000).flux_err[0], 2) == 0.01
def test_bin_meta():
"""Ensure .bin() result carries original meta. See #1040 """
lc = LightCurve(
time=np.arange(10), flux=2 * np.ones(10), flux_err=2 ** 0.5 * np.ones(10)
)
lc.meta['CREATOR'] = 'lk unit test'
lc.meta['SECTOR'] = 99
binned_lc = lc.bin(time_bin_size=5)
assert binned_lc.meta == lc.meta
def test_bin_folded():
# bin folded light curves issue #927
lc = LightCurve(
time=np.arange(2000), flux=np.random.normal(loc=42, scale=0.01, size=2000)
)
binned_folded_lc = lc.fold(period=100).bin(time_bin_size=100)
assert np.round(binned_folded_lc.flux_err[0], 2) == 0.01
@pytest.mark.skip # expected to be resolved in AstroPy v5.0.1 via PR #12527
def test_bins_kwarg():
"""Does binning work with user-defined bin placement?"""
n_times = 3800
end_time = 80.0
time_points = np.sort(np.random.uniform(low=0.0, high=end_time, size=n_times))
lc = LightCurve(
time=time_points,
flux=1.0 + np.random.normal(0, 0.1, n_times),
flux_err=0.1 * np.ones(n_times),
)
# Do the shapes of binned lightcurves make sense?
binned_lc = lc.bin(time_bin_size=10 * u.day)
assert len(binned_lc) == np.ceil(end_time / 10)
binned_lc = lc.bin(time_bin_size=11 * u.day)
assert len(binned_lc) == np.ceil(end_time / 11)
# Resulting length with `n_bins=N` yields exactly N bins every time
binned_lc = lc.bin(time_bin_size=10 * u.day, n_bins=38)
assert len(binned_lc) == 38
# The `bins=`` kwarg cannot support a list or array with aggregate_downsample < #11266
time_bin_edges = [0, 10, 20, 30, 40, 50, 60, 70, 80]
if not _HAS_VAR_BINS: # Need Astropy 5.0 for those
with pytest.raises(ValueError, match="Sequence or method for ``bins`` requires Astropy"):
binned_lc = lc.bin(bins=time_bin_edges)
else:
# You get N-1 bins when you enter N fenceposts
binned_lc = lc.bin(bins=time_bin_edges)
assert len(binned_lc) == (len(time_bin_edges) - 1)
time_bin_edges = np.arange(0, 81, 1)
binned_lc = lc.bin(bins=time_bin_edges)
assert len(binned_lc) == (len(time_bin_edges) - 1)
# Bins outside of the range get stuck in the last bin
time_bin_edges = np.arange(0, 61, 1)
binned_lc = lc.bin(bins=time_bin_edges)
assert len(binned_lc) == (len(time_bin_edges) - 1)
# The `bins=`` kwarg also supports the methods from astropy.stats.histogram
if not _HAS_VAR_BINS: # Need Astropy 5.0 for those
with pytest.raises(ValueError, match="Sequence or method for ``bins`` requires Astropy"):
for special_bins in ["blocks", "knuth", "scott", "freedman"]:
binned_lc = lc.bin(bins=special_bins)
with pytest.raises(TypeError, match="``bins`` must have integer type."):
binned_lc = lc.bin(bins="junk_input!")
# In dense bins, flux error should go down as root-N for N number of bins
binned_lc = lc.bin(binsize=100) # Exactly 100 samples per bin
assert np.isclose(lc.flux_err.mean() / np.sqrt(100), binned_lc.flux_err.mean(), rtol=0.3)
binned_lc = lc.bin(bins=38) # Roughly 100 samples per bin
assert np.isclose(lc.flux_err.mean() / np.sqrt(100), binned_lc.flux_err.mean(), rtol=0.3)
# The bins parameter must be integer not a float
with pytest.raises(TypeError, match="``bins`` must have integer type."):
binned_lc = lc.bin(bins=381.0)
# Binned lightcurve can have *more* bins than input lightcurve,
# but may strip empty bins at start and end.
binned_lc = lc.bin(bins=10000)
assert 10000 - 2 <= len(binned_lc) <= 10000
# To-do: Check for unusual edge cases that are now possible:
# - Binned lightcurve has NaN fluxes in empty bins
# - Binned lightcurve has a single bin (e.g. in Knuth)
# - Bins = 310.0
def test_bin_quality():
"""Binning must also revise the quality and centroid columns."""
lc = KeplerLightCurve(
time=[1, 2, 3, 4],
flux=[1, 1, 1, 1],
quality=[0, 1, 2, 3],
centroid_col=[0., 1, 0, 1],
centroid_row=[0., 2, 0, 2],
)
binned_lc = lc.bin(binsize=2)
if _HAS_VAR_BINS:
assert_allclose(binned_lc.centroid_col, [0.5, 0.5]) # Expect mean
assert_allclose(binned_lc.centroid_row, [1, 1]) # Expect mean
else: # Again account for 3-1 allocation to first and last bin
assert_allclose(binned_lc.centroid_col, [1./3, 1]) # Expect mean
assert_allclose(binned_lc.centroid_row, [2./3, 2]) # Expect mean
# TEMPORARILY SKIPPED, cf. https://github.com/lightkurve/lightkurve/issues/663
@pytest.mark.xfail # pytest.xfail("aggregate_downsample does not handle bitwise binning correctly")
def test_binned_quality():
"""Binning must also revise the quality and centroid columns."""
lc = KeplerLightCurve(
time=[1, 2, 3, 4],
flux=[1, 1, 1, 1],
quality=[0, 1, 2, 3],
centroid_col=[0., 1, 0, 1],
centroid_row=[0., 2, 0, 2],
)
binned_lc = lc.bin(binsize=2)
assert_allclose(binned_lc.quality, [1, 3]) # Expect bitwise or
# BEGIN codes for lc.bin memory usage test
#
bad_resource_module_imports = False
try:
import resource # supported on Unix only
except ImportError:
bad_resource_module_imports = True
def duplicate_and_stitch(lc, num_copies):
"""Helper to create a large LC by duplicating and stitching the supplied one"""
duration = lc.time.max() - lc.time.min()
lcc = [lc]
for i in range(1, num_copies):
lc_copy = lc.copy()
lc_copy.time = lc_copy.time + (duration + 1 * u.day) * i
lcc.append(lc_copy)
return LightCurveCollection(lcc).stitch()
@pytest.mark.memtest
@pytest.mark.skipif(bad_resource_module_imports, reason="Requires resource module, only available for Unix")
@pytest.mark.remote_data
@pytest.mark.parametrize(
"dict_of_bin_args",
[ # variants for lc.bin() call, they all result in roughly the same number of bins.
dict(bins=10000),
dict(binsize=10),
dict(time_bin_size=20 * u.min),
]
)
def test_bin_memory_usage(dict_of_bin_args):
"""Ensure lc.bin() does not use excessive memory (#1092)"""
# create a large lightcurve that could stress memory
lc = duplicate_and_stitch(read(TESS_SIM), 10)
import resource
# empirically, need about 1.1Gb just to open and stitch the lc
# (with ipython kernel)
# if we hit excessive memory usage like those in #1092,
# the system can easily need another 1+ Gb.
memory_limit = int(1.5 * 1024 * 1024 * 1024)
resource.setrlimit(resource.RLIMIT_AS, (memory_limit, memory_limit))
# Ensure it does not result in Out of Memory Error
with warnings.catch_warnings(): # lc.bin(binsize=n) is is deprecated
warnings.simplefilter("ignore", LightkurveDeprecationWarning)
lc_b = lc.bin(**dict_of_bin_args)
#
# END codes for lc.bin memory usage test
def test_normalize():
"""Does the `LightCurve.normalize()` method normalize the flux?"""
lc = LightCurve(
time=np.arange(10), flux=5 * np.ones(10), flux_err=0.05 * np.ones(10)
)
assert_allclose(np.median(lc.normalize().flux), 1)
assert_allclose(np.median(lc.normalize().flux_err), 0.05 / 5)
# already in relative units
lc = LightCurve(time=np.arange(10), flux=np.ones(10)).normalize()
with pytest.warns(None) as warn_record:
lc.normalize()
assert len(warn_record) == 0
assert lc.meta["NORMALIZED"]
def test_invalid_normalize():
"""Normalization makes no sense if the light curve is negative,
zero-centered, or already in relative units."""
# zero-centered light curve
lc = LightCurve(time=np.arange(10), flux=np.zeros(10))
with pytest.warns(LightkurveWarning, match="zero-centered"):
lc.normalize()
# zero-centered light curve with flux errors
lc = LightCurve(time=np.arange(10), flux=np.zeros(10), flux_err=0.05 * np.ones(10))
with pytest.warns(LightkurveWarning, match="zero-centered"):
lc.normalize()
# negative light curve
lc = LightCurve(time=np.arange(10), flux=-np.ones(10), flux_err=0.05 * np.ones(10))
with pytest.warns(LightkurveWarning, match="negative"):
lc.normalize()
def test_to_pandas():
"""Test the `LightCurve.to_pandas()` method."""
time, flux, flux_err = range(3), np.ones(3), np.zeros(3)
lc = LightCurve(time=time, flux=flux, flux_err=flux_err)
try:
df = lc.to_pandas()
assert_allclose(df.index, lc.time.value)
assert_allclose(df.flux, flux)
assert_allclose(df.flux_err, flux_err)
df.describe() # Will fail if for Endianness bugs
except ImportError:
# pandas is an optional dependency
pass
def test_to_pandas_kepler():
"""When to_pandas() is executed on a KeplerLightCurve, it should include
extra columns such as `quality`."""
time, flux, quality = range(3), np.ones(3), np.zeros(3)
lc = KeplerLightCurve(time=time, flux=flux, quality=quality)
try:
df = lc.to_pandas()
assert_allclose(df.quality, quality)
except ImportError:
# pandas is an optional dependency
pass
def test_to_table():
"""Test the `LightCurve.to_table()` method."""
time, flux, flux_err = range(3), np.ones(3), np.zeros(3)
lc = LightCurve(time=time, flux=flux, flux_err=flux_err)
tbl = lc.to_table()
assert_allclose(tbl["time"].value, time)
assert_allclose(tbl["flux"], flux)
assert_allclose(tbl["flux_err"], flux_err)
# Looks like `to_pandas` forces the time field to become an ISO datetime;
# it may not be worth fixing this because we may want to deprecate
# this function in favor of `Table.write()`.
@pytest.mark.xfail
def test_to_csv():
"""Test the `LightCurve.to_csv()` method."""
time, flux, flux_err = range(3), np.ones(3), np.zeros(3)
try:
lc = LightCurve(time=time, flux=flux, flux_err=flux_err)
assert (
lc.to_csv(line_terminator="\n")
== "time,flux,flux_err\n0,1.0,0.0\n1,1.0,0.0\n2,1.0,0.0\n"
)
except ImportError:
# pandas is an optional dependency
pass
@pytest.mark.remote_data
def test_to_fits():
"""Test the KeplerLightCurve.to_fits() method"""
lc = KeplerLightCurve.read(TABBY_Q8)
hdu = lc.to_fits()
KeplerLightCurve.read(hdu) # Regression test for #233
assert type(hdu).__name__ == "HDUList"
assert len(hdu) == 2
assert hdu[0].header["EXTNAME"] == "PRIMARY"
assert hdu[1].header["EXTNAME"] == "LIGHTCURVE"
assert hdu[1].header["TTYPE1"] == "TIME"
assert hdu[1].header["TTYPE2"] == "FLUX"
assert hdu[1].header["TTYPE3"] == "FLUX_ERR"
hdu = LightCurve(time=[0, 1, 2, 3, 4], flux=[1, 1, 1, 1, 1]).to_fits()
# Test "round-tripping": can we read-in what we write
lc_new = KeplerLightCurve.read(hdu) # Regression test for #233
assert hdu[0].header["EXTNAME"] == "PRIMARY"
assert hdu[1].header["EXTNAME"] == "LIGHTCURVE"
assert hdu[1].header["TTYPE1"] == "TIME"
assert hdu[1].header["TTYPE2"] == "FLUX"
# Test aperture mask support in to_fits
for tpf in [KeplerTargetPixelFile(TABBY_TPF), TessTargetPixelFile(filename_tess)]:
random_mask = np.random.randint(0, 2, size=tpf.flux[0].shape, dtype=bool)
thresh_mask = tpf.create_threshold_mask(threshold=3)
lc = tpf.to_lightcurve(aperture_mask=random_mask)
lc.to_fits(path=tempfile.NamedTemporaryFile().name, aperture_mask=random_mask)
lc.to_fits(
path=tempfile.NamedTemporaryFile().name,
overwrite=True,
flux_column_name="SAP_FLUX",
)
lc = tpf[0:2].to_lightcurve(aperture_mask=thresh_mask)
lc.to_fits(aperture_mask=thresh_mask, path=tempfile.NamedTemporaryFile().name)
# Test the extra data kwargs
bkg_mask = ~tpf.create_threshold_mask(threshold=0.1)
bkg_lc = tpf.to_lightcurve(aperture_mask=bkg_mask)
lc = tpf.to_lightcurve(aperture_mask=tpf.hdu["APERTURE"].data)
lc = tpf.to_lightcurve(aperture_mask=None)
lc = tpf.to_lightcurve(aperture_mask=thresh_mask)
lc_out = lc - bkg_lc.flux * (thresh_mask.sum() / bkg_mask.sum())
lc_out.to_fits(
aperture_mask=thresh_mask,
path=tempfile.NamedTemporaryFile().name,
overwrite=True,
extra_data={"BKG": bkg_lc.flux},
)
def test_astropy_time_bkjd():
"""Does `KeplerLightCurve` support bkjd?"""
bkjd = np.array([100, 200])
lc = KeplerLightCurve(time=[100, 200])
assert_allclose(lc.time.jd, bkjd + 2454833.0)
def test_lightcurve_repr():
"""Do __str__ and __repr__ work?"""
time, flux = range(3), np.ones(3)
str(LightCurve(time=time, flux=flux))
str(KeplerLightCurve(time=time, flux=flux))
str(TessLightCurve(time=time, flux=flux))
repr(LightCurve(time=time, flux=flux))
repr(KeplerLightCurve(time=time, flux=flux))
repr(TessLightCurve(time=time, flux=flux))
@pytest.mark.remote_data
def test_lightcurvefile_repr():
"""Do __str__ and __repr__ work?"""
lcf = KeplerLightCurve.read(TABBY_Q8)
str(lcf)
repr(lcf)
lcf = TessLightCurve.read(TESS_SIM)
str(lcf)
repr(lcf)
def test_slicing():
"""Does LightCurve.__getitem__() allow slicing?"""
time = np.linspace(0, 10, 10)
flux = np.linspace(100, 200, 10)
flux_err = np.linspace(5, 50, 10)
lc = LightCurve(time=time, flux=flux, flux_err=flux_err)
assert_array_equal(lc[0:5].time.value, time[0:5])
assert_array_equal(lc[2::2].flux, flux[2::2])
assert_array_equal(lc[5:9:-1].flux_err, flux_err[5:9:-1])
# KeplerLightCurves contain additional data arrays that need to be sliced
centroid_col = np.linspace(40, 50, 10)
centroid_row = np.linspace(50, 60, 10)
quality = np.linspace(70, 80, 10)
cadenceno = np.linspace(90, 100, 10)
lc = KeplerLightCurve(
time=time,
flux=flux,
flux_err=flux_err,
centroid_col=centroid_col,
centroid_row=centroid_row,
cadenceno=cadenceno,
quality=quality,
)
assert_array_equal(lc[::3].centroid_col, centroid_col[::3])
assert_array_equal(lc[4:].centroid_row, centroid_row[4:])
assert_array_equal(lc[10:2].quality, quality[10:2])
assert_array_equal(lc[3:6].cadenceno, cadenceno[3:6])
# The same is true for TessLightCurve
lc = TessLightCurve(
time=time,
flux=flux,
flux_err=flux_err,
centroid_col=centroid_col,
centroid_row=centroid_row,
cadenceno=cadenceno,
quality=quality,
)
assert_array_equal(lc[::4].centroid_col, centroid_col[::4])
assert_array_equal(lc[5:].centroid_row, centroid_row[5:])
assert_array_equal(lc[10:3].quality, quality[10:3])
assert_array_equal(lc[4:6].cadenceno, cadenceno[4:6])
def test_boolean_masking():
lc = KeplerLightCurve(
time=[1, 2, 3], flux=[1, 1, 10], quality=[0, 0, 200], cadenceno=[5, 6, 7]
)
assert_array_equal(lc[lc.flux < 5].time.value, [1, 2])
assert_array_equal(lc[lc.flux < 5].flux, [1, 1])
assert_array_equal(lc[lc.flux < 5].quality, [0, 0])
assert_array_equal(lc[lc.flux < 5].cadenceno, [5, 6])
def test_remove_nans():
"""Does LightCurve.__getitem__() allow slicing?"""
time, flux = [1, 2, 3, 4], [100, np.nan, 102, np.nan]
lc = LightCurve(time=time, flux=flux)
lc_clean = lc.remove_nans()
assert_array_equal(lc_clean.time.value, [1, 3])
assert_array_equal(lc_clean.flux, [100, 102])
lc_clean = lc.remove_nans("flux_err")
assert_array_equal(lc_clean.flux, [])
def test_remove_outliers():
# Does `remove_outliers()` remove outliers?
lc = LightCurve(time=[1, 2, 3, 4], flux=[1, 1, 1000, 1])
lc_clean = lc.remove_outliers(sigma=1)
assert_array_equal(lc_clean.time.value, [1, 2, 4])
assert_array_equal(lc_clean.flux, [1, 1, 1])
# It should also be possible to return the outlier mask
lc_clean, outlier_mask = lc.remove_outliers(sigma=1, return_mask=True)
assert len(outlier_mask) == len(lc.flux)
assert outlier_mask.sum() == 1
# Can we set sigma_lower and sigma_upper?
lc = LightCurve(time=[1, 2, 3, 4, 5], flux=[1, 1000, 1, -1000, 1])
lc_clean = lc.remove_outliers(sigma_lower=float("inf"), sigma_upper=1)
assert_array_equal(lc_clean.time.value, [1, 3, 4, 5])
assert_array_equal(lc_clean.flux, [1, 1, -1000, 1])
@pytest.mark.remote_data
def test_properties(capfd):
"""Test if the describe function produces an output.
The output is 624 characters at the moment, but we might add more properties."""
kplc = KeplerLightCurve.read(TABBY_Q8, flux_column="sap_flux")
kplc.show_properties()
out, _ = capfd.readouterr()
assert len(out) > 500
def test_flatten_with_nans():
"""Flatten should not remove NaNs."""
lc = LightCurve(
time=[1, 2, 3, 4, 5],
flux=[np.nan, 1.1, 1.2, np.nan, 1.4],
flux_err=[1.0, np.nan, 1.2, 1.3, np.nan],
)
flat_lc = lc.flatten(window_length=3)
assert len(flat_lc.time) == 5
assert np.isfinite(flat_lc.flux).sum() == 3
assert np.isfinite(flat_lc.flux_err).sum() == 3
def test_flatten_robustness():
"""Test various special cases for flatten()."""
# flatten should work with integer fluxes
lc = LightCurve(time=[1, 2, 3, 4, 5, 6], flux=[10, 20, 30, 40, 50, 60])
expected_result = np.array([1.0, 1.0, 1.0, 1.0, 1.0, 1.0])
flat_lc = lc.flatten(window_length=3, polyorder=1)
assert_allclose(flat_lc.flux, expected_result)
# flatten should work even if `window_length > len(flux)`
flat_lc = lc.flatten(window_length=7, polyorder=1)
assert_allclose(flat_lc.flux, flat_lc.flux / np.median(flat_lc.flux))
# flatten should work even if `polyorder >= window_length`
flat_lc = lc.flatten(window_length=3, polyorder=3)
assert_allclose(flat_lc.flux, expected_result)
flat_lc = lc.flatten(window_length=3, polyorder=5)
assert_allclose(flat_lc.flux, expected_result)
# flatten should work even if `break_tolerance = None`
flat_lc = lc.flatten(window_length=3, break_tolerance=None)
assert_allclose(flat_lc.flux, expected_result)
flat_lc, trend_lc = lc.flatten(return_trend=True)
assert_allclose(flat_lc.time.value, trend_lc.time.value)
assert_allclose(lc.flux, flat_lc.flux * trend_lc.flux)
def test_flatten_returns_normalized():
"""Ensure returned lightcurves from flatten() can be normalized"""
# Test for https://github.com/lightkurve/lightkurve/issues/838
lc_flux_unit = u.Unit("electron/second")
lc = LightCurve(
time=[1, 2, 3, 4, 5, 6],
flux=[10.1, 20.2, 30.3, 40.4, 50.5, 60.6] * lc_flux_unit,
flux_err=[0.01, 0.02, 0.03, 0.04, 0.05, 0.06] * lc_flux_unit,
)
flat_lc, trend_lc = lc.flatten(window_length=3, polyorder=1, return_trend=True)
assert flat_lc.flux.unit == u.dimensionless_unscaled
assert flat_lc.flux_err.unit == u.dimensionless_unscaled
assert flat_lc.meta["NORMALIZED"]
assert trend_lc.flux.unit is lc_flux_unit
assert trend_lc.flux_err.unit is lc_flux_unit
# once the above assertions pass, the normalize() should work
# but we test it anyway just in case something else goes wrong
flat_lc.normalize(unit="percent")
trend_lc.normalize(unit="percent")
def test_iterative_flatten():
"""Test the iterative sigma clipping in flatten """
# Test a light curve with a single, buried outlier.
x = np.arange(2000)
y = np.sin(x / 200) / 100 + 1
y[250] -= 0.01
lc = LightCurve(time=x, flux=y)
# Flatten it
c, f = lc.flatten(window_length=25, niters=2, sigma=3, return_trend=True)
# Only one outlier should remain.
assert np.isclose(c.flux, 1, rtol=0.00001).sum() == 1999
mask = np.zeros(2000, dtype=bool)
mask[250] = True
# Flatten it using a mask to remove the bad data point.
c, f = lc.flatten(window_length=25, niters=1, sigma=3, mask=mask, return_trend=True)
# Only one outlier should remain.
assert np.isclose(c.flux, 1, rtol=0.00001).sum() == 1999
def test_fill_gaps():
lc = LightCurve(time=[1, 2, 3, 4, 6, 7, 8], flux=[1, 1, 1, 1, 1, 1, 1])
nlc = lc.fill_gaps()
assert len(lc.time) < len(nlc.time)
assert np.any(nlc.time.value == 5)
assert np.all(nlc.flux == 1)
lc = LightCurve(time=[1, 2, 3, 4, 6, 7, 8], flux=[1, 1, np.nan, 1, 1, 1, 1])
nlc = lc.fill_gaps()
assert len(lc.time) < len(nlc.time)
assert np.any(nlc.time.value == 5)
assert np.all(nlc.flux == 1)
assert np.all(np.isfinite(nlc.flux))
# Regression test for https://github.com/lightkurve/lightkurve/pull/1172
lc_mask = [False, False, True, False, False, False, False]
lc = LightCurve(
time=[1, 2, 3, 4, 6, 7, 8],
flux=Masked([1, 1, np.nan, 1, 1, 1, 1], mask=lc_mask),
flux_err=Masked([0, 0, np.nan, 0, 0, 0, 0], mask=lc_mask)
)
nlc = lc.fill_gaps()
assert len(lc.time) < len(nlc.time)
assert np.any(nlc.time.value == 5)
assert np.all(nlc.flux == 1)
assert np.all(nlc.flux_err == 0)
assert np.all(np.isfinite(nlc.flux))
# Because fill_gaps() uses pandas, check that it works regardless of endianness
# For details see https://github.com/lightkurve/lightkurve/issues/188
lc = LightCurve(
time=np.array([1, 2, 3, 4, 6, 7, 8], dtype=">f8"),
flux=np.array([1, 1, 1, np.nan, np.nan, 1, 1], dtype=">f8"),
)
lc.fill_gaps()
lc = LightCurve(
time=np.array([1, 2, 3, 4, 6, 7, 8], dtype="<f8"),
flux=np.array([1, 1, 1, np.nan, np.nan, 1, 1], dtype="<f8"),
)
lc.fill_gaps()
def test_targetid():
"""Is a generic targetid available on each type of LighCurve object?"""
lc = LightCurve(time=[], targetid=5)
assert lc.targetid == 5
# Can we assign a new value?
lc.targetid = 99
assert lc.targetid == 99
# Does it work for Kepler?
lc = KeplerLightCurve(time=[], targetid=10)
assert lc.targetid == 10
# Can we assign a new value?
lc.targetid = 99
assert lc.targetid == 99
# Does it work for TESS?
lc = TessLightCurve(time=[], targetid=20)
assert lc.targetid == 20
@pytest.mark.remote_data # due to test (K2_C08 is remote)
def test_regression_346():
"""Regression test for https://github.com/lightkurve/lightkurve/issues/346"""
# This previously triggered an IndexError:
with warnings.catch_warnings(): # KeplerLightCurveFile is deprecated
warnings.simplefilter("ignore", LightkurveDeprecationWarning)
from lightkurve import KeplerLightCurveFile
KeplerLightCurveFile(
K2_C08
).PDCSAP_FLUX.remove_nans().to_corrector().correct().estimate_cdpp()
def test_flux_unit():
"""Checks the use of lc.flux_unit and lc.flux_quantity."""
with warnings.catch_warnings(): # We deprecated `flux_unit` in v2.0
warnings.simplefilter("ignore", LightkurveDeprecationWarning)
unit_obj = u.Unit("electron/second")
# Can we set flux units using a Unit object?
time, flux = range(3), np.ones(3)
lc = LightCurve(time=time, flux=flux, flux_unit=unit_obj)
assert lc.flux.unit == unit_obj
# Can we set flux units using a string?
lc = LightCurve(time=time, flux=flux, flux_unit="electron/second")
assert lc.flux.unit == unit_obj
# Can we pass a quantity to flux?
lc = LightCurve(time=time, flux=flux * unit_obj)
assert lc.flux.unit == unit_obj
# Can we retrieve correct flux quantities?
with warnings.catch_warnings(): # flux_quantity is deprecated
warnings.simplefilter("ignore", LightkurveDeprecationWarning)
assert lc.flux_quantity.unit == unit_obj
assert_array_equal(lc.flux_quantity.value, flux)
# Is invalid user input validated?
with pytest.raises(ValueError) as err:
lc = LightCurve(time=time, flux=flux, flux_unit="blablabla")
assert "not a valid unit" in err.value.args[0]
def test_astropy_time_initialization():
"""Does the `LightCurve` constructor accept Astropy time objects?"""
time = [1, 2, 3]
lc = LightCurve(time=Time(2.454e6 + np.array(time), format="jd", scale="utc"))
assert lc.time.format == "jd"
assert lc.time.scale == "utc"
with warnings.catch_warnings(): # we deprecated `astropy_time` in v2.0
warnings.simplefilter("ignore", LightkurveDeprecationWarning)
assert lc.astropy_time.format == "jd"
assert lc.astropy_time.scale == "utc"
lc = LightCurve(time=time, time_format="bkjd", time_scale="tdb")
assert lc.time.format == "bkjd"
assert lc.time.scale == "tdb"
with warnings.catch_warnings(): # we deprecated `astropy_time` in v2.0
warnings.simplefilter("ignore", LightkurveDeprecationWarning)
assert lc.astropy_time.format == "bkjd"
assert lc.astropy_time.scale == "tdb"
def test_normalize_unit():
"""Can the units of a normalized light curve be set?"""
lc = LightCurve(flux=[1, 2, 3])
for unit in ["percent", "ppt", "ppm"]:
assert lc.normalize(unit=unit).flux.unit.name == unit
@pytest.mark.skip
def test_to_stingray():
"""Test the `LightCurve.to_stingray()` method."""
time, flux, flux_err = range(3), np.ones(3), np.zeros(3)
lc = LightCurve(time=time, flux=flux, flux_err=flux_err)
try:
with warnings.catch_warnings():
# Ignore "UserWarning: Numba not installed" raised by stingray.
warnings.simplefilter("ignore", UserWarning)
sr = lc.to_stingray()
assert_allclose(sr.time, time)
assert_allclose(sr.counts, flux)
assert_allclose(sr.counts_err, flux_err)
except ImportError:
# Requires Stingray
pass
@pytest.mark.skip
def test_from_stingray():
"""Test the `LightCurve.from_stingray()` method."""
try:
from stingray import sampledata
sr = sampledata.sample_data()
lc = LightCurve.from_stingray(sr)
assert_allclose(sr.time, lc.time)
assert_allclose(sr.counts, lc.flux)
assert_allclose(sr.counts_err, lc.flux_err)
except ImportError:
pass # stingray is not a required dependency
def test_river():
lc = LightCurve(
time=np.arange(100),
flux=np.random.normal(1, 0.01, 100),
flux_err=np.random.normal(0, 0.01, 100),
)
lc.plot_river(10, 1)
plt.close()
folded_lc = lc.fold(10, 1)
folded_lc.plot_river()
plt.close()
folded_lc.plot_river(minimum_phase=-0.1, maximum_phase=0.2)
plt.close()
folded_lc.plot_river(method="median", bin_points=5)
plt.close()
folded_lc.plot_river(method="sigma", bin_points=5)
plt.close()
with pytest.warns(LightkurveWarning, match="`bin_points` is too high to plot"):
folded_lc.plot_river(method="median", bin_points=6)
plt.close()
def test_bin_issue705():
"""Regression test for #705: binning failed."""
lc = TessLightCurve(time=np.arange(50), flux=np.ones(50), quality=np.zeros(50))
with warnings.catch_warnings(): # binsize is deprecated
warnings.simplefilter("ignore", LightkurveDeprecationWarning)
lc.bin(binsize=15)
#@pytest.mark.remote_data
@pytest.mark.skip # At time of writing, the SkyBot API yields too many intermittent HTTP Errors
def test_SSOs():
# TESS test
lc = TessTargetPixelFile(asteroid_TPF).to_lightcurve(aperture_mask="all")
lc.meta["MISSION"] = "TESS" # needed to resolve default value for location argument
result = lc.query_solar_system_objects(cadence_mask="all", cache=False)
assert len(result) == 1
result = lc.query_solar_system_objects(cadence_mask=np.asarray([True]), cache=False)
assert len(result) == 1
result = lc.query_solar_system_objects(cadence_mask=[True], cache=False)
assert len(result) == 1
result = lc.query_solar_system_objects(cadence_mask=(True), cache=False)
assert len(result) == 1
result, mask = lc.query_solar_system_objects(
cadence_mask=np.asarray([True]), cache=True, return_mask=True
)
assert len(mask) == len(lc.flux)
try:
result = lc.query_solar_system_objects(
cadence_mask="str-not-supported", cache=False
)
pytest.fail("Unsupported cadence_mask should have thrown Error")
except ValueError:
pass
@pytest.mark.xfail # LightCurveFile was removed in Lightkurve v2.x
def test_get_header():
"""Test the basic functionality of ``tpf.get_header()``"""
lcf = TessLightCurveFile(filename_tess_custom)
assert lcf.get_header()["CREATOR"] == lcf.get_keyword("CREATOR")
assert lcf.get_header(ext=2)["EXTNAME"] == "APERTURE"
# ``tpf.header`` is deprecated
with pytest.warns(LightkurveWarning, match="deprecated"):
lcf.header()
def test_fold_v2():
"""The API of LightCurve.fold() changed in Lightkurve v2.x when we adopted
AstroPy's TimeSeries.fold() method. This test verifies the new API."""
lc = LightCurve(time=np.linspace(0, 10, 100), flux=np.zeros(100) + 1)
# Can period be passed as a float?
fld = lc.fold(period=1)
fld2 = lc.fold(period=1 * u.day)
assert_array_equal(fld.phase, fld2.phase)
assert isinstance(fld.time, TimeDelta)
fld.plot_river()
plt.close()
# Does phase normalization work?
fld = lc.fold(period=1, normalize_phase=True)
assert isinstance(fld.time, u.Quantity)
fld.plot_river()
plt.close()
@pytest.mark.remote_data
def test_combine_kepler_tess():
"""Can we append or stitch a TESS light curve to a Kepler light curve?"""
# KIC 11904151: Kepler-10
lc_kplr = search_lightcurve("KIC 11904151", mission="Kepler", author="Kepler")[
0
].download()
lc_tess = search_lightcurve("KIC 11904151", mission="TESS", author="SPOC")[
0
].download()
# Can we use append()?
lc = lc_kplr.append(lc_tess)
assert len(lc) == len(lc_kplr) + len(lc_tess)
# Can we use stitch()?
coll = LightCurveCollection((lc_kplr, lc_tess))
lc = coll.stitch()
assert len(lc) == len(lc_kplr) + len(lc_tess)
# Test initialization with `data`` in various form
# - adapated from: https://github.com/astropy/astropy/blob/v5.0.4/astropy/timeseries/tests/test_sampled.py
# - the goal is not to repeat the tests, but to ensure LightCurve supports the same type variants.
INPUT_TIME = Time(['2016-03-22T12:30:31',
'2015-01-21T12:30:32',
'2016-03-22T12:30:40'])
INPUT_RAW_TIME = [25800000.0, 25800000.1, 25800000.2] # raw time in JD
PLAIN_TABLE = Table([[1, 2, 11], [3, 4, 1], [1, 1, 1]], names=['flux', 'flux_err', 'c'])
def test_initialization_with_data():
lc = LightCurve(time=INPUT_TIME, data=[[10, 2, 3], [4, 5, 6]], names=['flux', 'flux_err'])
assert_equal(lc.time.isot, INPUT_TIME.isot)
assert_equal(lc['flux'], [10, 2, 3])
assert_equal(lc['flux_err'], [4, 5, 6])
def test_initialization_with_table():
lc = LightCurve(time=INPUT_TIME, data=PLAIN_TABLE)
assert lc.colnames == ['time', 'flux', 'flux_err', 'c']
def test_initialization_with_time_in_data():
data = PLAIN_TABLE.copy()
data['time'] = INPUT_TIME
lc1 = LightCurve(data=data)
assert set(lc1.colnames) == set(['time', 'flux', 'flux_err', 'c'])
assert all(lc1.time == INPUT_TIME)
# flux / flux_err is not required in input, but will be automatically generated
lc2 = LightCurve(data=[[10, 2, 3], INPUT_TIME], names=['a', 'time'])
assert set(lc2.colnames) == set(['time', 'a', 'flux', 'flux_err'])
assert all(lc2.time == INPUT_TIME)
# `LightCurve.__init__()` also needs to support `data` in a list of (Time, Column/Column Mix-ins) without `names`
# used internally by `Table.__getitem__()``:
# https://github.com/astropy/astropy/blob/326435449ad8d859f1abf36800c3fb88d49c27ea/astropy/table/table.py#L1888
# It is not a public API code path, and is implicitly tested in `test_select_columns_as_lightcurve()`.
def test_initialization_with_raw_time_in_data():
"""Variant of `test_initialization_with_time_in_data() that is Lightcurve-specific.
Time can be raw values in default format
"""
lc = LightCurve(data=[[10, 2, 3], [4, 5, 6], INPUT_RAW_TIME], names=['flux', 'flux_err', 'time'])
assert set(lc.colnames) == set(['time', 'flux', 'flux_err'])
assert_array_equal(lc.time, Time(INPUT_RAW_TIME, format=lc.time.format, scale=lc.time.scale))
# case multiple time columns: handled by the base TimeSeries
def test_initialization_with_ndarray():
# test init with ndarray does not exist in astropy `test_sampled.py`, and is added
# for completeness sake
data = np.array([(1.0, 0.2, 0),
(3.0, 0.4, 4),
(5.0, 0.6, 2)],
dtype=[('flux', 'f8'), ('flux_err', 'f8'), ('c', 'i4')])
lc = LightCurve(time=INPUT_TIME, data=data)
assert lc.colnames == ['time', 'flux', 'flux_err', 'c']
def test_initialization_with_time_in_ndarray():
data = np.array([(1.0, 0.2, 0, INPUT_RAW_TIME[0]),
(3.0, 0.4, 4, INPUT_RAW_TIME[1]),
(5.0, 0.6, 2, INPUT_RAW_TIME[2])],
dtype=[('flux', 'f8'), ('flux_err', 'f8'), ('c', 'i4'), ('time', 'f8')])
lc = LightCurve(data=data)
assert lc.colnames == ['time', 'flux', 'flux_err', 'c']
def test_mixed_instantiation():
"""Can a LightCurve be instantianted using a mix of keywords and colums?"""
LightCurve(flux=[4, 5, 6], flux_err=[7, 8, 9], data={"time": [1, 2, 3]})
LightCurve(flux=[4, 5, 6], flux_err=[7, 8, 9], data=Table({"time": [1, 2, 3]}))
LightCurve(time=[1, 2, 3], flux_err=[7, 8, 9], data={"flux": [4, 5, 6]})
LightCurve(time=[1, 2, 3], flux_err=[7, 8, 9], data=Table({"flux": [4, 5, 6]}))
LightCurve(data=Table({"time": [1, 2, 3]}), flux=[4, 5, 6])
LightCurve(data={"time": [1, 2, 3]}, flux=[4, 5, 6])
LightCurve(time=[1, 2, 3], flux=[1, 2, 3], data=Table({"flux_err": [3, 4, 5]}))
LightCurve(time=[1, 2, 3], flux=[1, 2, 3], data={"flux_err": [3, 4, 5]})
def test_assignment_time():
"""Ensure time property can be reassigned"""
lc = KeplerLightCurve(
time=Time([1, 2, 3], scale="tdb", format="bkjd"),
flux=[4, 5, 6],
flux_err=[7, 8, 9],
)
time_adjusted = lc.time - 0.5
lc.time = time_adjusted
assert_array_equal(lc.time, time_adjusted)
# case the input is not given format / scale, ensure default format / scale is applied
time_adjusted_raw = [11.0, 12.0, 13.0]
lc.time = time_adjusted_raw
assert_array_equal(lc.time, Time(time_adjusted_raw, scale="tdb", format="bkjd"))
# case the input is scalar, it'd be broadcasted to the existing time's length
lc.time = 21
assert_array_equal(lc.time, Time([21, 21, 21], scale="tdb", format="bkjd"))
def test_attr_access_columns():
"""Test accessing columns as attributes"""
u_e_s = u.electron / u.second
lc = LightCurve(
time=Time([1, 2, 3], scale="tdb", format="jd"), flux=[4, 5, 6] * u_e_s
)
# Read/Write access of flux: explicitly defined as property
assert_array_equal(lc.flux, lc["flux"])
flux_updated = [7, 8, 9] * u_e_s
lc.flux = flux_updated
assert_array_equal(lc.flux, flux_updated)
# Read/Write access of cadenceno: not an explicit property, but present in most LightCurve objects in practice.
cadenceno_unitless = [101, 102, 103]
lc["cadenceno"] = cadenceno_unitless
assert_array_equal(lc["cadenceno"], cadenceno_unitless)
assert lc.cadenceno is lc["cadenceno"]
# Read/Write access of new column
flux_adjusted = [7.1, 8.1, 9.1] * u_e_s
lc["flux_adjusted"] = flux_adjusted
assert_array_equal(lc["flux_adjusted"], flux_adjusted)
assert lc.flux_adjusted is lc["flux_adjusted"]
# column name is an existing method / attribute: attribute access not available
info_col = [9, 8, 7] * u_e_s
lc["info"] = info_col # .info is a built-in attribute (from base TimeSeries)
assert type(lc.info) is not type(info_col)
bin_col = [5, 6, 7] * u_e_s
lc["bin"] = bin_col # .bin is a built-in method
assert type(lc.bin) is not type(bin_col)
# Create a new column directly as an attribute: only attribute is created, not a column
flux2_unitless = [6, 7, 8]
with pytest.warns(UserWarning, match="new attribute name"):
lc.flux2 = flux2_unitless
with pytest.raises(KeyError):
lc["flux2"]
assert_array_equal(lc.flux2, flux2_unitless)
assert (
type(lc.flux2) is list
) # as it's just an attribute, there is no conversion done to Quantity
# ensure no warning is raised when updating an existing attribute
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
lc.foo = "bar"
with pytest.warns(None) as warn_record:
lc.foo = "bar2"
assert len(warn_record) == 0
u_e_s = u.electron / u.second
@pytest.mark.parametrize(
"new_col_val",
[
([2, 3, 4] * u_e_s), # Quantity
(np.array([2, 3, 4])), # ndarray
([2, 3, 4]), # list
],
)
def test_attr_access_columns_consistent_update(new_col_val):
"""Replace/Update a column: ensure consistent behavior across column API and attribute API"""
lc1 = LightCurve(
time=Time([1, 2, 3], scale="tdb", format="jd"), flux=[4, 5, 6] * u_e_s
)
lc1["flux"] = new_col_val
lc2 = LightCurve(
time=Time([1, 2, 3], scale="tdb", format="jd"), flux=[4, 5, 6] * u_e_s
)
lc2.flux = new_col_val
# ensure the result type is the same,
# irrespective whether the update is done via column API or attribute API
assert isinstance(lc1["flux"], type(lc2["flux"]))
def test_attr_access_meta():
"""Test accessing meta values as attributes"""
u_e_s = u.electron / u.second
lc = LightCurve(
time=Time([1, 2, 3], scale="tdb", format="jd"), flux=[4, 5, 6] * u_e_s
)
# Read/Write access of meta via attribute
lc.meta["SECTOR"] = 14
assert lc.sector == 14 # uppercased meta key is accessed as lowercased attributes
sector_corrected = 15
lc.sector = sector_corrected
assert lc.sector == sector_corrected
assert lc.sector == lc.meta["SECTOR"]
# meta key is an existing attribute / method: : attribute access not available
lc.meta["INFO"] = "Some information" # .info: an existing attribute
assert lc.info != lc.meta["INFO"]
lc.meta["BIN"] = "Some value" # .bin: an existing method
assert lc.bin != lc.meta["BIN"]
# Create a attribute: it is created as a object attribute, rather than meta
attr_value = "bar_value"
with pytest.warns(UserWarning, match="new attribute name"):
lc.foo = attr_value
assert lc.meta.get("foo", None) is None
assert lc.foo == attr_value
# Case meta has 2 keys that only differs in case
lc.meta["KEYCASE"] = "VALUE UPPER"
lc.meta["keycase"] = "value lower"
# they are two different entries (case sensitive)
assert lc.meta["KEYCASE"] == "VALUE UPPER"
assert lc.meta["keycase"] == "value lower"
assert lc.keycase == "value lower" # the meta entry with exact case is retrieved
@pytest.mark.parametrize(
"lc",
[
LightCurve(time=[1, 2, 3], flux=[4, 5, 6], meta={'SECTOR': 5}),
LightCurve(time=[1, 2, 3], flux=[4, 5, 6]),
],
)
def test_meta_assignment(lc):
"""Test edge cases in trying to assign meta (#1046)"""
# ensure lc.meta assignment does not emit any warnings.
meta_new = {'TSTART': 123456789.0}
with pytest.warns(None) as record:
lc.meta = meta_new
if (len(record) > 0):
pytest.fail(f"{len(record)} unexpected warning: {record[0]}")
# for the case existing meta is not empty
# ensure the assignment overwrites it
# (rather than just copying the values over to the existing one)
assert lc.meta == meta_new
def test_attr_access_others():
"""Test accessing attributes, misc. boundary cases"""
u_e_s = u.electron / u.second
lc = LightCurve(
time=Time([1, 2, 3], scale="tdb", format="jd"), flux=[4, 5, 6] * u_e_s
)
# case the name is present both as a column name and a meta key: column is returned
val_of_col = [5, 6, 7]
val_of_meta_key = "value"
lc["foo"] = val_of_col
lc.meta["FOO"] = val_of_meta_key
assert_array_equal(lc.foo, val_of_col) # lc.foo refers to the column
val_of_col_updated = [6, 7, 8] * u_e_s
lc.foo = val_of_col_updated # should update the column rather than meta
assert_array_equal(lc.foo, val_of_col_updated)
# case the same name is present as column name, meta key, and actual attribute
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
lc.bar = "bar_attr_val"
lc["bar"] = [7, 8, 9]
lc.meta["BAR"] = "bar_meta_val"
assert lc.bar == "bar_attr_val" # actual attribute takes priority
lc.bar = "bar_attr_val_updated"
assert (
lc.bar == "bar_attr_val_updated"
) # the update should be done on actual attribute
def test_create_transit_mask():
"""Test for `LightCurve.create_transit_mask()`."""
# Set planet parameters
period = 2.0
transit_time = Time(2450000.0, format="jd")
duration = 0.1
depth = 0.2
flux_err = 0.01
# Create the synthetic light curve
time = np.arange(0, 100, 0.1)
flux = np.ones_like(time)
transit_mask = (
np.abs((time - transit_time.value + 0.5 * period) % period - 0.5 * period)
< 0.5 * duration
)
flux[transit_mask] = 1.0 - depth
flux += flux_err * np.random.randn(len(time))
synthetic_lc = LightCurve(time=time, flux=flux)
# Create planet mask
mask = synthetic_lc.create_transit_mask(
period=period, duration=duration, transit_time=transit_time
)
# Are all masked values out of transit?
assert all(f > 0.9 for f in synthetic_lc[~mask].flux.value)
# Are all unmasked values in transit?
assert all(f < 0.9 for f in synthetic_lc[mask].flux.value)
# Can it handle multi-planet masks?
period_2 = 3.0
transit_time_2 = 0.75
duration_2 = 0.1
transit_mask_2 = (
np.abs((time - transit_time_2 + 0.5 * period_2) % period_2 - 0.5 * period_2)
< 0.5 * duration_2
)
flux[transit_mask_2] = 1.0 - depth
synthetic_lc = LightCurve(time=time, flux=flux)
# Create multi-planet planet mask
mask = synthetic_lc.create_transit_mask(
period=[period, period_2],
duration=[duration, duration_2],
transit_time=[transit_time, transit_time_2],
)
# Are all masked values out of transit?
assert all(f > 0.9 for f in synthetic_lc[~mask].flux.value)
# Are all unmasked values in transit?
assert all(f < 0.9 for f in synthetic_lc[mask].flux.value)
# #1117: Repeat multi-planet planet mask test,
# ensure values specified in Quantity are also accepted.
mask = synthetic_lc.create_transit_mask(
period=u.Quantity([period, period_2], unit="day"), # inputs in Quantity
# ensure:
# 1. implementation does proper conversion
# 2. accept a mix of float and Quantity
duration=[duration, (duration_2 * u.day).to(u.hr)],
transit_time=[transit_time, transit_time_2],
)
# Are all masked values out of transit?
assert all(f > 0.9 for f in synthetic_lc[~mask].flux.value)
# Are all unmasked values in transit?
assert all(f < 0.9 for f in synthetic_lc[mask].flux.value)
def test_row_repr():
"""Regression test for #830: ensure the repr works for a single row."""
lc = LightCurve({"time": [1, 2, 3], "flux": [1.0, 1.0, 1.0]})
lc[0].__repr__()
lc[0]._repr_html_()
def test_fill_gaps_with_cadenceno():
"""Does `fill_gaps` work when a ``cadenceno`` column is present?
This is a regression test for #868."""
lc = LightCurve(
{"time": [1, 2, 4, 5], "flux": [1, 1, 1, 1], "cadenceno": [11, 12, 14, 15]}
)
lc.fill_gaps() # raised a `UnitConversionError` in the past, cf. #868
def test_fill_gaps_after_normalization():
"""Does `fill_gaps` work correctly after normalization?
This is a regression test for #868."""
lc = LightCurve(
{"time": [1, 2, 4, 5], "flux": [1, 1, 1, 1], "flux_err": [0.1, 0.1, 0.1, 0.1]}
)
lc = lc.normalize("ppm")
lc2 = lc.fill_gaps()
assert lc2.time[2].value == 3.0
assert lc2.flux[2].value == 1e6
assert lc2.flux[2].unit == "ppm"
assert lc2.flux_err[2].value == 1e5
assert lc2.flux_err[2].unit == "ppm"
@pytest.mark.parametrize(
"new_col_val",
[
([2, 3, 4] * u_e_s), # Quantity
(np.array([2, 3, 4])), # ndarray
([2, 3, 4]), # list
Column([2, 3, 4]), # Column
MaskedColumn([2, -1, 4], mask=[False, True, False], fill_value=-999),
],
)
def test_columns_have_value_accessor(new_col_val):
"""Ensure resulting column has ``.value`` accessor to raw data, irrespective of type of input.
The test won't be needed once https://github.com/astropy/astropy/pull/10962 is in astropy
release and Lightkurve requires the corresponding astropy release (5.0).
"""
expected_raw_value = new_col_val
if hasattr(new_col_val, "value"):
expected_raw_value = new_col_val.value
elif hasattr(new_col_val, "data"):
expected_raw_value = new_col_val.data
lc = LightCurve(time=[1, 2, 3])
lc["col1"] = new_col_val
assert_array_equal(lc["col1"].value, expected_raw_value)
# additional check for MaskedColumn, to ensure we don't lose its properties
if isinstance(new_col_val, MaskedColumn):
assert_array_equal(lc["col1"].mask, new_col_val.mask)
assert lc["col1"].fill_value == new_col_val.fill_value
def test_support_non_numeric_columns():
lc = LightCurve(time=[1, 2, 3], flux=[2, 3, 4])
lc["col1"] = ["a", "b", "c"]
lc_copy = lc.copy()
assert_array_equal(lc_copy["col1"], lc["col1"])
def test_select_columns_as_lightcurve():
"""Select a subset of columns as a lightcurve object. #1194 """
lc = LightCurve(time=np.arange(0, 12))
lc["flux"] = np.ones_like(lc.time, dtype="f8") - 0.01
lc["flux_err"] = np.ones_like(lc.time, dtype="f8") * 0.0001
lc["col1"] = np.zeros_like(lc.time, dtype="i4")
lc["col2"] = np.zeros_like(lc.time, dtype="i4")
# subset of columns including "time" works
lc_subset = lc['time', 'flux', 'col2']
# columns flux / flux_err are always there as part of a LightCurve object
assert set(lc_subset.colnames) == set(['time', 'flux', 'flux_err', 'col2'])
# the flux_err in the subset, as it is not specified requested,
# is one with `nan`, rather than rather than the original lc.flux_err.
assert np.isnan(lc_subset.flux_err).all()
# the subset should still be an instance of LightCurve (rather than just QTable)
assert(isinstance(lc_subset, type(lc)))
lc_b = lc.bin(time_bin_size=3*u.day)
lc_b_subset = lc_b['time', 'flux', 'flux_err', 'col1']
assert set(lc_b_subset.colnames) == set(['time', 'flux', 'flux_err', 'col1'])
assert(isinstance(lc_b_subset, type(lc_b)))
lc_f = lc.fold(period=3)
lc_f_subset = lc_f['time', 'flux', 'flux_err']
assert set(lc_f_subset.colnames) == set(['time', 'flux', 'flux_err'])
assert(isinstance(lc_f_subset, type(lc_f)))
def test_timedelta():
"""Can the time column be initialized using TimeDelta?"""
td = TimeDelta([-0.5, 0, +0.5])
LightCurve(time=td)
LightCurve(data={"time": td})
def test_issue_916():
"""Regression test for #916: Can we flatten after folding?"""
LightCurve(flux=np.random.randn(100)).fold(period=2.5).flatten()
@pytest.mark.remote_data
def test_search_neighbors():
"""The closest neighbor to Proxima Cen in Sector 11 is TIC 388852407."""
lc = search_lightcurve("Proxima Cen", author="spoc", sector=11).download()
search = lc.search_neighbors(limit=1, radius=300, author="spoc", sector="11")
assert len(search) == 1
assert search.distance.value < 300
assert search.target_name[0] == "388852407"
def test_plot_with_offset():
"""Regression test for #961: `lc.plot(offset=N)` increased `lc.flux` by N."""
lc = LightCurve(flux=[1.0])
ax = lc.plot(offset=1)
plt.close(ax.figure)
assert lc.flux[0].value == 1.0
def test_string_column_with_unit():
"""Regression test for #980."""
# string-typed columns with a unit set were making `_convert_col_for_table` crash
col = Column(data=["a", "b", "c"], unit='unitless')
LightCurve(data={'time': [1, 2, 3], 'x': col})
def test_head_tail_truncate():
"""Simple test for the `head()`, `tail()`, and `truncate()` methods."""
lc = LightCurve({'time': [1, 2, 3, 4, 5], 'flux':[1, 2, 3, 4, 5]})
assert lc.head(1).flux == 1
assert lc.head(n=1).flux == 1
assert lc.tail(1).flux == 5
assert lc.tail(n=1).flux == 5
assert all(lc.truncate(2, 4).flux == [2, 3, 4])
assert lc.truncate(before=2).head(1).flux == 2
assert lc.truncate(after=3).tail(1).flux == 3
# test optional column parameter for truncate()
lc["cadenceno"] = [901, 902, 903, 904, 905]
assert all(lc.truncate(902, 904, column="cadenceno").flux == [2, 3, 4])
# case it is a property, not a column. furthermore, it is plain numbers
with warnings.catch_warnings():
# we do want to create an attribute in this case
warnings.simplefilter("ignore", UserWarning)
lc.cycle = [11, 12, 15, 14, 13]
assert all(lc.truncate(12, 14, column="cycle").flux == [2, 4, 5])
def test_select_flux():
"""Simple test for the `LightCurve.select_flux()` method."""
u_e_s = u.electron / u.second
lc = LightCurve(data={'time': [1,2,3],
'flux': [2, 3, 4] * u_e_s,
'flux_err': [0, 1, 2] * u_e_s,
'newflux': [4, 5, 6] * u_e_s,
'newflux_err': [7, 8, 9] * u_e_s,
'newflux_n1': [0.9, 1, 1.1] * u.dimensionless_unscaled, # normalized, unitless
'newflux_n2': [0.9, 1, 1.1], # normalized, no unit
},
)
# Can we set flux to newflux?
assert all(lc.select_flux("newflux").flux == lc.newflux)
assert lc.select_flux("newflux").meta["FLUX_ORIGIN"] == "newflux"
# Did `select_flux()` return a copy rather than operating in place?
assert not all(lc.flux == lc.newflux)
# Does `select_flux()` set the error column by default?
assert all(lc.select_flux("newflux").flux_err == lc.newflux_err)
# Can a different error column be specified?
assert all(lc.select_flux("newflux", flux_err_column="newflux").flux_err == lc.newflux)
# ensure flux_err in the new lc is nan if the origin does not have it
assert all(np.isnan(lc.select_flux("newflux_n1")["flux_err"]))
# Do invalid column names raise a ValueError?
with pytest.raises(ValueError):
lc.select_flux("doesnotexist")
with pytest.raises(ValueError):
lc.select_flux("newflux", "doesnotexist")
# Test for setting normalized correctly (#1091)
lc_n = lc.normalize(unit="percent")
assert lc_n.meta["NORMALIZED"] # expected behavior of normalize, not the real test
assert lc_n.select_flux("newflux").meta.get("NORMALIZED", False) is False # actual test 1
assert lc.meta.get("NORMALIZED", False) is False # expected behavior, not the real test
assert lc.select_flux("newflux_n1").meta.get("NORMALIZED", False) # actual test 2a, the new column is normalized
assert lc.select_flux("newflux_n2").meta.get("NORMALIZED", False) # actual test 2b, the new column is normalized
def test_transit_mask_with_quantities():
"""Regression test for #1141."""
lc = LightCurve(time=range(10), flux=range(10))
mask_quantity = lc.create_transit_mask(period=2.9*u.day, transit_time=1*u.day, duration=1*u.day)
mask_no_quantity = lc.create_transit_mask(period=2.9, transit_time=1, duration=1)
assert all(mask_quantity == mask_no_quantity)
@pytest.mark.skip # expected to be resolved in AstroPy v5.0.1 via PR #12527
def test_nbins():
"""Regression test for #1162."""
lc = LightCurve(flux=[0, 0, 0])
# This statement raised an IndexError with Astropy v5.0rc2:
lc.bin(bins=2)
def test_river_plot_with_masked_flux():
"""Regression test for #1175."""
flux = Masked(np.random.normal(loc=1, scale=0.1, size=100))
flux_err = Masked(0.1*np.ones(100))
lc = LightCurve(time=np.linspace(1, 100, 100), flux=flux, flux_err=flux_err)
lc.plot_river(period=10.)
| 74,521
| 37.021429
| 117
|
py
|
lightkurve
|
lightkurve-main/tests/test_synthetic_data.py
|
"""Use synthetic data to verify lightkurve detrending and signal recovery.
"""
from __future__ import division, print_function
from astropy.utils.data import get_pkg_data_filename
from astropy.stats.bls import BoxLeastSquares
import numpy as np
import pytest
from scipy import stats
from lightkurve.targetpixelfile import KeplerTargetPixelFile
from lightkurve.correctors import SFFCorrector, PLDCorrector
# See `data/synthetic/README.md` for details about these synthetic test files
filename_synthetic_sine = get_pkg_data_filename(
"data/synthetic/synthetic-k2-sinusoid.targ.fits.gz"
)
filename_synthetic_transit = get_pkg_data_filename(
"data/synthetic/synthetic-k2-planet.targ.fits.gz"
)
filename_synthetic_flat = get_pkg_data_filename(
"data/synthetic/synthetic-k2-flat.targ.fits.gz"
)
def test_sine_sff():
"""Can we recover a synthetic sine curve using SFF and LombScargle?"""
# Retrieve the custom, known signal properties
tpf = KeplerTargetPixelFile(filename_synthetic_sine)
true_period = float(tpf.hdu[3].header["PERIOD"])
true_amplitude = float(tpf.hdu[3].header["SINE_AMP"])
# Run the SFF algorithm
lc = tpf.to_lightcurve()
corrector = SFFCorrector(lc)
cor_lc = corrector.correct(
tpf.pos_corr2,
tpf.pos_corr1,
niters=4,
windows=1,
bins=7,
restore_trend=True,
timescale=0.5,
)
# Verify that we get the period within ~20%
pg = cor_lc.to_periodogram(
method="lombscargle", minimum_period=1, maximum_period=10, oversample_factor=10
)
ret_period = pg.period_at_max_power.value
threshold = 0.2
assert (ret_period > true_period * (1 - threshold)) & (
ret_period < true_period * (1 + threshold)
)
# Verify that we get the amplitude to within 10%
n_cad = len(tpf.time)
design_matrix = np.vstack(
[
np.ones(n_cad),
np.sin(2.0 * np.pi * cor_lc.time.value / ret_period),
np.cos(2.0 * np.pi * cor_lc.time.value / ret_period),
]
).T
ATA = np.dot(design_matrix.T, design_matrix / cor_lc.flux_err[:, None] ** 2)
least_squares_coeffs = np.linalg.solve(
ATA, np.dot(design_matrix.T, cor_lc.flux / cor_lc.flux_err ** 2)
)
const, sin_weight, cos_weight = least_squares_coeffs
fractional_amplitude = (sin_weight ** 2 + cos_weight ** 2) ** (0.5) / const
assert (fractional_amplitude > true_amplitude / 1.1) & (
fractional_amplitude < true_amplitude * 1.1
)
def test_transit_sff():
"""Can we recover a synthetic exoplanet signal using SFF and BLS?"""
# Retrieve the custom, known signal properties
tpf = KeplerTargetPixelFile(filename_synthetic_transit)
true_period = float(tpf.hdu[3].header["PERIOD"])
true_rprs = float(tpf.hdu[3].header["RPRS"])
true_transit_lc = tpf.hdu[3].data["NOISELESS_INPUT"]
max_depth = 1 - np.min(true_transit_lc)
# Run the SFF algorithm
lc = tpf.to_lightcurve().normalize()
corrector = SFFCorrector(lc)
cor_lc = corrector.correct(
tpf.pos_corr2,
tpf.pos_corr1,
niters=4,
windows=1,
bins=7,
restore_trend=False,
timescale=0.5,
)
# Verify that we get the transit period within 5%
pg = cor_lc.to_periodogram(
method="bls",
minimum_period=1,
maximum_period=9,
frequency_factor=0.05,
duration=np.arange(0.1, 0.6, 0.1),
)
ret_period = pg.period_at_max_power.value
threshold = 0.05
assert (ret_period > true_period * (1 - threshold)) & (
ret_period < true_period * (1 + threshold)
)
# Verify that we get the transit depth in expected bounds
assert (pg.depth_at_max_power >= true_rprs ** 2) & (
pg.depth_at_max_power < max_depth
)
def test_transit_pld():
"""Can we recover a synthetic exoplanet signal using PLD and BLS?"""
# Retrieve the custom, known signal properties
tpf = KeplerTargetPixelFile(filename_synthetic_transit)
true_period = float(tpf.hdu[3].header["PERIOD"])
true_rprs = float(tpf.hdu[3].header["RPRS"])
true_transit_lc = tpf.hdu[3].data["NOISELESS_INPUT"]
max_depth = 1 - np.min(true_transit_lc)
# Run the PLD algorithm on a first pass
corrector = PLDCorrector(tpf)
cor_lc = corrector.correct()
pg = cor_lc.to_periodogram(
method="bls",
minimum_period=1,
maximum_period=9,
frequency_factor=0.05,
duration=np.arange(0.1, 0.6, 0.1),
)
# Re-do PLD with the suspected transits masked
cor_lc = corrector.correct(cadence_mask=~pg.get_transit_mask()).normalize()
pg = cor_lc.to_periodogram(
method="bls",
minimum_period=1,
maximum_period=9,
frequency_factor=0.05,
duration=np.arange(0.1, 0.6, 0.1),
)
# Verify that we get the period within ~5%
ret_period = pg.period_at_max_power.value
threshold = 0.05
assert (ret_period > true_period * (1 - threshold)) & (
ret_period < true_period * (1 + threshold)
)
# Verify that we get the transit depth in expected bounds
assert (pg.depth_at_max_power >= true_rprs ** 2) & (
pg.depth_at_max_power < max_depth
)
def test_sine_pld():
"""Can we recover a synthetic sine wave using PLD and LombScargle?"""
# Retrieve the custom, known signal properties
tpf = KeplerTargetPixelFile(filename_synthetic_sine)
true_period = float(tpf.hdu[3].header["PERIOD"])
true_amplitude = float(tpf.hdu[3].header["SINE_AMP"])
# Run the PLD algorithm
corrector = tpf.to_corrector("pld")
cor_lc = corrector.correct()
# Verify that we get the period within ~20%
pg = cor_lc.to_periodogram(
method="lombscargle", minimum_period=1, maximum_period=10, oversample_factor=10
)
ret_period = pg.period_at_max_power.value
threshold = 0.2
assert (ret_period > true_period * (1 - threshold)) & (
ret_period < true_period * (1 + threshold)
)
# Verify that we get the amplitude to within 20%
n_cad = len(tpf.time)
design_matrix = np.vstack(
[
np.ones(n_cad),
np.sin(2.0 * np.pi * cor_lc.time.value / ret_period),
np.cos(2.0 * np.pi * cor_lc.time.value / ret_period),
]
).T
ATA = np.dot(design_matrix.T, design_matrix / cor_lc.flux_err[:, None] ** 2)
least_squares_coeffs = np.linalg.solve(
ATA, np.dot(design_matrix.T, cor_lc.flux / cor_lc.flux_err ** 2)
)
const, sin_weight, cos_weight = least_squares_coeffs
fractional_amplitude = (sin_weight ** 2 + cos_weight ** 2) ** (0.5) / const
assert (fractional_amplitude > true_amplitude / 1.1) & (
fractional_amplitude < true_amplitude * 1.1
)
def test_detrending_residuals():
"""Test the detrending residual distributions"""
# Retrieve the custom, known signal properties
tpf = KeplerTargetPixelFile(filename_synthetic_flat)
# Run the SFF algorithm
lc = tpf.to_lightcurve()
corrector = SFFCorrector(lc)
cor_lc = corrector.correct(
tpf.pos_corr2, tpf.pos_corr1, niters=10, windows=5, bins=7, restore_trend=True
)
# Verify that we get a significant reduction in RMS
cdpp_improvement = lc.estimate_cdpp() / cor_lc.estimate_cdpp()
assert cdpp_improvement > 10.0
# The residuals should be Gaussian-"ish"
# Table 4.1 of Ivezic, Connolly, Vanerplas, Gray 2014
anderson_threshold = 1.57
resid_n_sigmas = (cor_lc.flux - np.mean(cor_lc.flux)) / cor_lc.flux_err
A_value, _, _ = stats.anderson(resid_n_sigmas)
assert A_value ** 2 < anderson_threshold
n_sigma = np.std(resid_n_sigmas)
assert n_sigma < 2.0
corrector = tpf.to_corrector("pld")
cor_lc = corrector.correct(restore_trend=False)
cdpp_improvement = lc.estimate_cdpp() / cor_lc.estimate_cdpp()
assert cdpp_improvement > 10.0
resid_n_sigmas = (cor_lc.flux - np.mean(cor_lc.flux)) / cor_lc.flux_err
A_value, crit, sig = stats.anderson(resid_n_sigmas)
assert A_value ** 2 < anderson_threshold
n_sigma = np.std(resid_n_sigmas)
assert n_sigma < 2.0
def test_centroids():
"""Test the estimate centroid method."""
for fn in (
filename_synthetic_sine,
filename_synthetic_transit,
filename_synthetic_flat,
):
tpf = KeplerTargetPixelFile(fn)
xraw, yraw = tpf.estimate_centroids()
xnorm = xraw - np.median(xraw)
ynorm = yraw - np.median(yraw)
xposc = tpf.pos_corr2 - np.median(tpf.pos_corr2)
yposc = tpf.pos_corr1 - np.median(tpf.pos_corr1)
rmax = np.max(np.sqrt((xnorm.value - xposc) ** 2 + (ynorm.value - yposc) ** 2))
# The centroids should agree to within a hundredth of a pixel.
assert rmax < 0.01
| 8,818
| 32.660305
| 87
|
py
|
lightkurve
|
lightkurve-main/tests/test_search.py
|
"""Test features of lightkurve that interact with the data archive at MAST.
Note: if you have the `pytest-remotedata` package installed, then tests flagged
with the `@pytest.mark.remote_data` decorator below will only run if the
`--remote-data` argument is passed to py.test. This allows tests to pass
if no internet connection is available.
"""
import os
import pytest
from numpy.testing import assert_almost_equal, assert_array_equal
import tempfile
from requests import HTTPError
from astropy.coordinates import SkyCoord
import astropy.units as u
from astropy.table import Table
import lightkurve as lk
from lightkurve.utils import LightkurveWarning, LightkurveError
from lightkurve.search import (
search_lightcurve,
search_targetpixelfile,
search_tesscut,
SearchResult,
SearchError,
log,
)
from lightkurve import (
KeplerTargetPixelFile,
TessTargetPixelFile,
TargetPixelFileCollection,
)
from .test_conf import use_custom_config_file, remove_custom_config
@pytest.mark.remote_data
def test_search_targetpixelfile():
# EPIC 210634047 was observed twice in long cadence
assert len(search_targetpixelfile("EPIC 210634047", mission="K2").table) == 2
# ...including Campaign 4
assert (
len(search_targetpixelfile("EPIC 210634047", mission="K2", campaign=4).table)
== 1
)
# KIC 11904151 (Kepler-10) was observed in LC in 15 Quarters
assert (
len(
search_targetpixelfile(
"KIC 11904151", mission="Kepler", cadence="long"
).table
)
== 15
)
# ...including quarter 11 but not 12:
assert (
len(
search_targetpixelfile(
"KIC 11904151", mission="Kepler", cadence="long", quarter=11
).unique_targets
)
== 1
)
assert (
len(
search_targetpixelfile(
"KIC 11904151", mission="Kepler", cadence="long", quarter=12
).table
)
== 0
)
search_targetpixelfile("KIC 11904151", quarter=11, cadence="long").download()
# with mission='TESS', it should return TESS observations
tic = "TIC 273985862" # Has been observed in multiple sectors including 1
assert len(search_targetpixelfile(tic, mission="TESS").table) > 1
assert (
len(search_targetpixelfile(tic, author="SPOC", sector=1, radius=100).table)
== 2
)
search_targetpixelfile(tic, author="SPOC", sector=1).download()
assert len(search_targetpixelfile("pi Mensae", sector=1, author="SPOC").table) == 1
# Issue #445: indexing with -1 should return the last index of the search result
assert len(search_targetpixelfile("pi Men")[-1]) == 1
@pytest.mark.remote_data
def test_search_split_campaigns():
"""Searches should should work for split campaigns.
K2 Campaigns 9, 10, and 11 were split into two halves for various technical
reasons (C91=C9a, C92=C9b, C101=C10a, C102=C10b, C111=C11a, C112=C11b).
We expect most targets from those campaigns to return two TPFs.
"""
campaigns = [9, 10, 11]
ids = ["EPIC 228162462", "EPIC 228726301", "EPIC 202975993"]
for c, idx in zip(campaigns, ids):
search = search_targetpixelfile(idx, campaign=c, cadence="long").table
assert len(search) == 2
@pytest.mark.remote_data
def test_search_lightcurve(caplog):
# We should also be able to resolve it by its name instead of KIC ID
# The name Kepler-10 somehow no longer works on MAST. So we use 2MASS instead:
# https://simbad.cds.unistra.fr/simbad/sim-id?Ident=%405506010&Name=Kepler-10
assert (
len(search_lightcurve("2MASS J19024305+5014286", mission="Kepler", cadence="long").table)
== 15
)
# An invalid KIC/EPIC ID or target name should be dealt with gracefully
search_lightcurve(-999)
assert "Could not resolve" in caplog.text
search_lightcurve("DOES_NOT_EXIST (UNIT TEST)")
assert "Could not resolve" in caplog.text
# If we ask for all cadence types, there should be four Kepler files given
assert len(search_lightcurve("KIC 4914423", quarter=6, cadence="any").table) == 4
# ...and only one should have long cadence
assert len(search_lightcurve("KIC 4914423", quarter=6, cadence="long").table) == 1
# Should be able to resolve an ra/dec
assert len(search_lightcurve("297.5835, 40.98339", quarter=6).table) == 1
# Should be able to resolve a SkyCoord
c = SkyCoord("297.5835 40.98339", unit=(u.deg, u.deg))
search = search_lightcurve(c, quarter=6)
assert len(search.table) == 1
assert len(search) == 1
# We should be able to download a light curve
search.download()
# The second call to download should use the local cache
caplog.clear()
caplog.set_level("DEBUG")
search.download()
assert "found in local cache" in caplog.text
# with mission='TESS', it should return TESS observations
tic = "TIC 273985862"
assert len(search_lightcurve(tic, mission="TESS").table) > 1
assert (
len(
search_lightcurve(
tic, mission="TESS", author="spoc", sector=1, radius=100
).table
)
== 2
)
search_lightcurve(tic, mission="TESS", author="SPOC", sector=1).download()
assert len(search_lightcurve("pi Mensae", author="SPOC", sector=1).table) == 1
@pytest.mark.remote_data
def test_search_tesscut():
# Cutout by target name
assert len(search_tesscut("pi Mensae", sector=1).table) == 1
assert len(search_tesscut("pi Mensae").table) > 1
# Cutout by TIC ID
assert len(search_tesscut("TIC 206669860", sector=28).table) == 1
# Cutout by RA, dec string
search_string = search_tesscut("30.578761, -83.210593")
# Cutout by SkyCoord
c = SkyCoord("30.578761 -83.210593", unit=(u.deg, u.deg))
search_coords = search_tesscut(c)
# These should be identical
assert len(search_string.table) == len(search_coords.table)
# The coordinates below are beyond the edge of the sector 4 (camera 1-4) FFI
search_edge = search_tesscut("30.578761, 6.210593", sector=4)
assert len(search_edge.table) == 0
@pytest.mark.remote_data
def test_search_tesscut_download(caplog):
"""Can we download TESS cutouts via `search_cutout().download()?"""
try:
ra, dec = 30.578761, -83.210593
search_string = search_tesscut("{}, {}".format(ra, dec), sector=[1, 12])
# Make sure they can be downloaded with default size
tpf = search_string[1].download()
# Ensure the correct object has been returned
assert isinstance(tpf, TessTargetPixelFile)
# Ensure default size is 5x5
assert tpf.flux[0].shape == (5, 5)
assert len(tpf.targetid) > 0 # Regression test #473
assert tpf.sector == 12 # Regression test #696
# Ensure the WCS is valid (#434 regression test)
center_ra, center_dec = tpf.wcs.all_pix2world([[2.5, 2.5]], 1)[0]
assert_almost_equal(ra, center_ra, decimal=1)
assert_almost_equal(dec, center_dec, decimal=1)
# Download with different dimensions
tpfc = search_string.download_all(cutout_size=4, quality_bitmask="hard")
assert isinstance(tpfc, TargetPixelFileCollection)
assert tpfc[0].quality_bitmask == "hard" # Regression test for #494
assert tpfc[0].sector == 1 # Regression test #696
assert tpfc[1].sector == 12 # Regression test #696
# Ensure correct dimensions
assert tpfc[0].flux[0].shape == (4, 4)
# Download with rectangular dimennsions?
rect_tpf = search_string[0].download(cutout_size=(3, 5))
assert rect_tpf.flux[0].shape == (3, 5)
# If we ask for the exact same cutout, do we get it from cache?
caplog.clear()
log.setLevel("DEBUG")
tpf_cached = search_string[0].download(cutout_size=(3, 5))
assert "Cached file found." in caplog.text
# test #1063 - ensure when download_dir is specified, there is no error
from tempfile import TemporaryDirectory
with TemporaryDirectory(dir=".", prefix="temp_lk_cache_4test_") as download_dir:
# ensure relative path works, the bug in #1063
tpf_w_download_dir = search_string[0].download(cutout_size=(3, 5), download_dir=download_dir)
assert tpf_w_download_dir.flux[0].shape == (3, 5)
tpf_w_download_dir = None # remove the tpf reference so that the underlying file can be deleted on Windows
except HTTPError as exc:
# TESSCut will occasionally return a "504 Gateway Timeout error" when
# it is overloaded. We don't want this to trigger a test failure.
if "504" not in str(exc):
raise exc
@pytest.mark.remote_data
def test_search_with_skycoord():
"""Can we pass both names, SkyCoord objects, and coordinate strings?"""
sr_name = search_targetpixelfile("KIC 11904151", mission="Kepler", cadence="long")
assert (
len(sr_name) == 15
) # Kepler-10 as observed during 15 quarters in long cadence
# Can we search using a SkyCoord objects?
sr_skycoord = search_targetpixelfile(
SkyCoord.from_name("KIC 11904151"), mission="Kepler", cadence="long"
)
assert_array_equal(
sr_name.table["productFilename"], sr_skycoord.table["productFilename"]
)
# Can we search using a string of "ra dec" decimals?
sr_decimal = search_targetpixelfile(
"285.67942179 +50.24130576", mission="Kepler", cadence="long"
)
assert_array_equal(
sr_name.table["productFilename"], sr_decimal.table["productFilename"]
)
# Can we search using a sexagesimal string?
sr_sexagesimal = search_targetpixelfile(
"19:02:43.1 +50:14:28.7", mission="Kepler", cadence="long"
)
assert_array_equal(
sr_name.table["productFilename"], sr_sexagesimal.table["productFilename"]
)
# Can we search using the KIC ID?
sr_kic = search_targetpixelfile("KIC 11904151", mission="Kepler", cadence="long")
assert_array_equal(
sr_name.table["productFilename"], sr_kic.table["productFilename"]
)
@pytest.mark.remote_data
def test_searchresult():
sr = search_lightcurve("KIC 11904151", mission="Kepler")
assert len(sr) == len(sr.table) # Tests SearchResult.__len__
assert len(sr[2:7]) == 5 # Tests SearchResult.__get__
assert len(sr[2]) == 1
assert "kplr" in sr.__repr__()
assert "kplr" in sr._repr_html_()
@pytest.mark.remote_data
def test_month():
# In short cadence, if we specify both quarter and month
sr = search_targetpixelfile("KIC 11904151", quarter=11, month=1, cadence="short")
assert len(sr) == 1
sr = search_targetpixelfile("KIC 11904151", quarter=11, month=[1, 3], cadence="short")
assert len(sr) == 2
@pytest.mark.remote_data
def test_collections():
# TargetPixelFileCollection class
assert (
len(search_targetpixelfile("EPIC 205998445", mission="K2", radius=900).table)
== 4
)
# LightCurveFileCollection class with set targetlimit
assert (
len(
search_lightcurve(
"EPIC 205998445", mission="K2", radius=900, limit=3, author="K2"
).download_all()
)
== 3
)
# if fewer targets are found than targetlimit, should still download all available
assert (
len(
search_targetpixelfile(
"EPIC 205998445", mission="K2", radius=900, limit=6
).table
)
== 4
)
# if download() is used when multiple files are available, should only download 1
with pytest.warns(LightkurveWarning, match="4 files available to download"):
assert isinstance(
search_targetpixelfile(
"EPIC 205998445", mission="K2", radius=900, author="K2"
).download(),
KeplerTargetPixelFile,
)
@pytest.mark.remote_data
def test_properties():
c = SkyCoord("297.5835 40.98339", unit=(u.deg, u.deg))
assert_almost_equal(search_targetpixelfile(c, quarter=6).ra, 297.5835)
assert_almost_equal(search_targetpixelfile(c, quarter=6).dec, 40.98339)
assert len(search_targetpixelfile(c, quarter=6).target_name) == 1
assert len(search_targetpixelfile(c, quarter=6).obsid) == 1
@pytest.mark.remote_data
def test_source_confusion():
# Regression test for issue #148.
# When obtaining the TPF for target 6507433, @benmontet noticed that
# a target 4 arcsec away was returned instead.
# See https://github.com/lightkurve/lightkurve/issues/148
desired_target = "KIC 6507433"
tpf = search_targetpixelfile(desired_target, quarter=8).download()
assert tpf.targetid == 6507433
def test_empty_searchresult():
"""Does an empty SearchResult behave gracefully?"""
sr = SearchResult(Table())
assert len(sr) == 0
str(sr)
with pytest.warns(LightkurveWarning, match="empty search"):
sr.download()
with pytest.warns(LightkurveWarning, match="empty search"):
sr.download_all()
@pytest.mark.remote_data
def test_issue_472():
"""Regression test for https://github.com/lightkurve/lightkurve/issues/472"""
# The line below previously threw an exception because the target was not
# observed in Sector 2; we're always expecting a SearchResult object (empty
# or not) rather than an exception.
# Whether or not this SearchResult is empty has changed over the years,
# because the target is only ~15 pixels beyond the FFI edge and the accuracy
# of the FFI footprint polygons at the MAST portal have changed at times.
search = search_tesscut("TIC41336498", sector=2)
assert isinstance(search, SearchResult)
@pytest.mark.remote_data
def test_corrupt_download_handling_case_empty():
"""When a corrupt file exists in the cache, make sure the user receives
a helpful error message.
This is a regression test for #511 and #1184.
For case the file is truncated, see test_read.py::test_file_corrupted
It cannot be done easily here because on Windows,
a similar test would result in PermissionError when `tempfile`
tries to do cleanup.
Some low level codes (probably astropy.fits) still hold a file handle
of the corrupted FIS file.
"""
with tempfile.TemporaryDirectory() as tmpdirname:
# Pretend a corrupt file exists at the expected cache location
expected_dir = os.path.join(
tmpdirname, "mastDownload", "Kepler", "kplr011904151_lc_Q111111110111011101"
)
expected_fn = os.path.join(
expected_dir, "kplr011904151-2010009091648_lpd-targ.fits.gz"
)
os.makedirs(expected_dir)
open(expected_fn, "w").close() # create "corrupt" i.e. empty file
with pytest.raises(LightkurveError) as err:
search_targetpixelfile("KIC 11904151", quarter=4, cadence="long").download(
download_dir=tmpdirname
)
assert "may be corrupt" in err.value.args[0]
assert expected_fn in err.value.args[0]
@pytest.mark.remote_data
def test_mast_http_error_handling(monkeypatch):
"""Regression test for #1211; ensure downloads yields an error when MAST download result in an error."""
from astroquery.mast import Observations
result = search_lightcurve("TIC 273985862", mission="TESS")
remote_url = result.table[0]["dataURL"]
def mock_http_error_response(*args, **kwargs):
"""Mock the `download_product()` response to simulate MAST returns HTTP error"""
print("DBG mock_http_error_response called")
return Table(data={
"Local Path": ["./mastDownload/acme_lc.fits"],
"Status": ["ERROR"],
"Message": ["HTTP Error 500: Internal Server Error"],
"URL": [remote_url],
})
monkeypatch.setattr(Observations, "download_products", mock_http_error_response)
with tempfile.TemporaryDirectory() as tmpdirname:
# ensure the we don't hit cache so that it'll always download from MAST
with pytest.raises(LightkurveError) as excinfo:
result[0].download(download_dir=tmpdirname)
assert "HTTP Error 500" in str(excinfo.value)
assert remote_url in str(excinfo.value)
@pytest.mark.remote_data
def test_indexerror_631():
"""Regression test for #631; avoid IndexError."""
# This previously triggered an exception:
result = search_lightcurve("KIC 8462852", sector=15, radius=1, author="spoc")
assert len(result) == 1
@pytest.mark.skip(
reason="TODO: issue re-appeared on 2020-01-11; needs to be revisited."
)
@pytest.mark.remote_data
def test_name_resolving_regression_764():
"""Due to a bug, MAST resolved "EPIC250105131" to a different position than
"EPIC 250105131". This regression test helps us verify that the bug does
not re-appear. Details: https://github.com/lightkurve/lightkurve/issues/764
"""
from astroquery.mast import MastClass
c1 = MastClass().resolve_object(objectname="EPIC250105131")
c2 = MastClass().resolve_object(objectname="EPIC 250105131")
assert c1.separation(c2).to("arcsec").value < 0.1
@pytest.mark.remote_data
def test_overlapping_targets_718():
"""Regression test for #718."""
# Searching for the following targets without radius should only return
# the requested targets, not their overlapping neighbors.
targets = ["KIC 5112705", "KIC 10058374", "KIC 5385723"]
for target in targets:
search = search_lightcurve(target, quarter=11, author="Kepler")
assert len(search) == 1
assert search.target_name[0] == f"kplr{target[4:].zfill(9)}"
# When using `radius=1` we should also retrieve the overlapping targets
search = search_lightcurve("KIC 5112705", quarter=11, author="Kepler", radius=1 * u.arcsec)
assert len(search) > 1
# Searching by `target_name` should not preven a KIC identifier to work
# in a TESS data search
search = search_targetpixelfile(
"KIC 8462852", mission="TESS", sector=15, author="spoc"
)
assert len(search) == 1
@pytest.mark.remote_data
def test_tesscut_795():
"""Regression test for #795: make sure the __repr__.of a TESSCut
SearchResult works."""
str(search_tesscut("KIC 8462852")) # This raised a KeyError
@pytest.mark.remote_data
def test_download_flux_column():
"""Can we pass reader keyword arguments to the download method?"""
lc = search_lightcurve("Pi Men", author="SPOC", sector=12).download(
flux_column="sap_flux"
)
assert_array_equal(lc.flux, lc.sap_flux)
@pytest.mark.remote_data
def test_exptime_filtering():
"""Can we pass "fast", "short", exposure time to the cadence argument?"""
# Try `cadence="fast"`
res = search_lightcurve("AU Mic", sector=27, cadence="fast")
assert len(res) == 1
assert res.exptime[0].value == 20
# Try `cadence="short"`
res = search_lightcurve("AU Mic", sector=27, cadence="short")
assert len(res) == 1
assert res.table["t_exptime"][0] == 120
# Try `cadence=20`
res = search_lightcurve("AU Mic", sector=27, cadence=20)
assert len(res) == 1
assert res.table["t_exptime"][0] == 20
assert "fast" in res.table["productFilename"][0]
# Now do the same with the new exptime argument,
# because `cadence` may be deprecated.
# Try `exptime="fast"`
res = search_lightcurve("AU Mic", sector=27, exptime="fast")
assert len(res) == 1
assert res.exptime[0].value == 20
# Try `exptime="SHoRt"` -- mixed lower/uppercase is on purpose
res = search_lightcurve("AU Mic", sector=27, exptime="SHoRt")
assert len(res) == 1
assert res.table["t_exptime"][0] == 120
# Try `exptime=20`
res = search_lightcurve("AU Mic", sector=27, exptime=20)
assert len(res) == 1
assert res.table["t_exptime"][0] == 20
assert "fast" in res.table["productFilename"][0]
@pytest.mark.remote_data
def test_search_slicing_regression():
# Regression test: slicing after calling __repr__ failed.
res = search_lightcurve("AU Mic", exptime=20)
res.__repr__()
res[res.exptime.value < 100]
@pytest.mark.remote_data
def test_ffi_hlsp():
"""Can SPOC, QLP (FFI), and TESS-SPOC (FFI) light curves be accessed?"""
search = search_lightcurve(
"TrES-2b", mission="tess", author="any", sector=26
) # aka TOI 2140.01
assert "QLP" in search.table["author"]
assert "TESS-SPOC" in search.table["author"]
assert "SPOC" in search.table["author"]
# tess-spoc also products tpfs
search = search_targetpixelfile("TrES-2b", mission="tess", author="any", sector=26)
assert "TESS-SPOC" in search.table["author"]
assert "SPOC" in search.table["author"]
@pytest.mark.remote_data
def test_qlp_ffi_lightcurve():
"""Can we search and download an MIT QLP FFI light curve?"""
search = search_lightcurve("TrES-2b", sector=26, author="qlp")
assert len(search) == 1
assert search.author[0] == "QLP"
assert search.exptime[0] == 30 * u.minute # Sector 26 had 30-minute FFIs
lc = search.download()
all(lc.flux == lc.kspsap_flux)
@pytest.mark.remote_data
def test_spoc_ffi_lightcurve():
"""Can we search and download a SPOC FFI light curve?"""
search = search_lightcurve("TrES-2b", sector=26, author="tess-spoc")
assert len(search) == 1
assert search.author[0] == "TESS-SPOC"
assert search.exptime[0] == 30 * u.minute # Sector 26 had 30-minute FFIs
lc = search.download()
all(lc.flux == lc.pdcsap_flux)
@pytest.mark.remote_data
def test_split_k2_campaigns():
"""Do split K2 campaign sections appear separately in search results?"""
# Campaign 9
search_c09 = search_targetpixelfile("EPIC 228162462", cadence="long", campaign=9)
assert search_c09.table["mission"][0] == "K2 Campaign 09a"
assert search_c09.table["mission"][1] == "K2 Campaign 09b"
# Campaign 10
search_c10 = search_targetpixelfile("EPIC 228725972", cadence="long", campaign=10)
assert search_c10.table["mission"][0] == "K2 Campaign 10a"
assert search_c10.table["mission"][1] == "K2 Campaign 10b"
# Campaign 11
search_c11 = search_targetpixelfile("EPIC 203830112", cadence="long", campaign=11)
assert search_c11.table["mission"][0] == "K2 Campaign 11a"
assert search_c11.table["mission"][1] == "K2 Campaign 11b"
@pytest.mark.remote_data
def test_customize_search_result_display():
search = search_lightcurve("TIC390021728")
# default display does not have proposal id
assert 'proposal_id' not in search.__repr__()
# custom config: has proposal_id in display
try:
use_custom_config_file("data/lightkurve_sr_cols_added.cfg")
# Note: here a *different* TIC is used for search to avoid the complication
# of caching.
# if the same TIC is used, the cached result would be returned, without
# consiering the customization specified.
# the TIC used is in multiple sectors, with some rows having proposal_id and some rows
# have none. So it's also a sanity test the for the actual proposal_id display logic.
search = search_lightcurve("TIC298734307")
assert 'proposal_id' in search.__repr__()
finally:
remove_custom_config() # restore default to avoid side effects
# test changing config at runtime
try:
lk.conf.search_result_display_extra_columns = ['sequence_number']
search = search_lightcurve("TIC169175503") # again use a different TIC to avoid caching complication
assert 'sequence_number' in search.__repr__()
finally:
lk.conf.search_result_display_extra_columns = [] # restore default to avoid side effects
# Test per-object customization
search.display_extra_columns = []
assert 'proposal_id' not in search.__repr__()
search.display_extra_columns = ['sequence_number', 'proposal_id'] # also support multiple columns
assert 'proposal_id' in search.__repr__()
assert 'sequence_number' in search.__repr__()
@pytest.mark.remote_data
def test_customize_search_result_display_case_nonexistent_column():
# Ensure that if an extra column specified are not in search result
# the extra column will not be shown (and it does not generate error)
#
# One typical case is that some columns are in the result of
# search_lightcurve() / search_targetpixelfile(), but not in those of search_tesscut()
search = search_lightcurve("TIC390021728")
search.display_extra_columns = ['foo_col']
assert 'foo_col' not in search.__repr__()
| 24,606
| 38.817152
| 119
|
py
|
lightkurve
|
lightkurve-main/tests/test_conf.py
|
from astropy.utils.data import get_pkg_data_filename
import os
from pathlib import Path
import shutil
import tempfile
import lightkurve as lk
def test_read_conf_from_file():
"""Sanity test to ensure lightkurve per-user config is in the expected location."""
# assert the default config
assert [] == lk.conf.search_result_display_extra_columns
# Use a custom config file, and assert the changes are read.
try:
use_custom_config_file("data/lightkurve_sr_cols_added.cfg")
assert ['proposal_id'] == lk.conf.search_result_display_extra_columns
finally:
# cleanup: remove the custom config file and its effect
remove_custom_config()
def use_custom_config_file(cfg_filepath):
"""Copy the config file in the given path (in tests) to the default lightkurve config file """
cfg_dest_path = Path(lk.config.get_config_dir(), 'lightkurve.cfg')
cfg_src_path = get_pkg_data_filename(cfg_filepath)
shutil.copy(cfg_src_path, cfg_dest_path)
lk.conf.reload()
def remove_custom_config():
cfg_dest_path = Path(lk.config.get_config_dir(), 'lightkurve.cfg')
cfg_dest_path.unlink()
lk.conf.reload()
def test_get_cache_dir():
# Sanity test default download dir
# We can't meaningful assert the location for typical cases.
# Because in test environment, it is overriden by XDG_CACHE_HOME env var
# (typically to some temp location)
actual_dir = lk.config.get_cache_dir()
assert os.path.isdir(actual_dir)
# Test customized default download dir
with tempfile.TemporaryDirectory() as expected_base:
try:
# I want to test that the impl would create a dir if not there
expected_dir = os.path.join(expected_base, "some_subdir")
lk.conf.cache_dir = expected_dir
actual_dir = lk.config.get_cache_dir()
assert expected_dir == actual_dir
assert os.path.isdir(actual_dir)
# repeated calls would work
# (e.g., it won't raise errors in attempting to mkdir for an existing dir)
actual_dir = lk.config.get_cache_dir()
assert expected_dir == actual_dir
finally:
lk.conf.cache_dir = None
| 2,226
| 34.349206
| 98
|
py
|
lightkurve
|
lightkurve-main/tests/test_collections.py
|
import warnings
import pytest
from astropy import units as u
from astropy.utils.data import get_pkg_data_filename
from astropy.utils.masked import Masked
import matplotlib.pyplot as plt
import numpy as np
from numpy.testing import assert_array_equal
from lightkurve.lightcurve import LightCurve, KeplerLightCurve, TessLightCurve
from lightkurve.search import search_lightcurve
from lightkurve.targetpixelfile import KeplerTargetPixelFile, TessTargetPixelFile
from lightkurve.collections import LightCurveCollection, TargetPixelFileCollection
from lightkurve.utils import LightkurveWarning
filename_tpf_all_zeros = get_pkg_data_filename("data/test-tpf-all-zeros.fits")
filename_tpf_one_center = get_pkg_data_filename("data/test-tpf-non-zero-center.fits")
def test_collection_init():
lc = LightCurve(
time=np.arange(1, 5), flux=np.arange(1, 5), flux_err=np.arange(1, 5)
)
lc2 = LightCurve(
time=np.arange(10, 15), flux=np.arange(10, 15), flux_err=np.arange(10, 15)
)
lcc = LightCurveCollection([lc, lc2])
assert len(lcc) == 2
assert lcc.data == [lc, lc2]
str(lcc) # Does repr work?
lcc.plot()
plt.close("all")
def test_collection_append():
"""Does Collection.append() work?"""
lc = LightCurve(
time=np.arange(1, 5),
flux=np.arange(1, 5),
flux_err=np.arange(1, 5),
targetid=500,
)
lc2 = LightCurve(
time=np.arange(10, 15),
flux=np.arange(10, 15),
flux_err=np.arange(10, 15),
targetid=100,
)
lcc = LightCurveCollection([lc])
lcc.append(lc2)
assert len(lcc) == 2
def test_collection_stitch():
"""Does Collection.stitch() work?"""
lc = LightCurve(time=np.arange(1, 5), flux=np.ones(4))
lc2 = LightCurve(time=np.arange(5, 16), flux=np.ones(11))
lcc = LightCurveCollection([lc, lc2])
lc_stitched = lcc.stitch()
assert len(lc_stitched.flux) == 15
lc_stitched2 = lcc.stitch(corrector_func=lambda x: x * 2)
assert_array_equal(lc_stitched.flux * 2, lc_stitched2.flux)
def test_collection_stitch_with_masked_values():
"""Test https://github.com/lightkurve/lightkurve/issues/1178 """
lc = LightCurve(time=np.arange(1, 5), flux=np.ones(4))
lc2 = LightCurve(
time=np.arange(5, 9),
flux=Masked([11, 11, np.nan, 11], mask=[False, False, True, False]),
)
lc_stitched = LightCurveCollection([lc, lc2]).stitch()
assert len(lc_stitched.flux) == 8
# ensure order (whether the first lc is masked or not) does not matter
lc3 = LightCurve(time=np.arange(9, 13), flux=np.ones(4))
lc_stitched = LightCurveCollection([lc2, lc3]).stitch()
assert len(lc_stitched.flux) == 8
def test_collection_getitem():
"""Tests Collection.__getitem__"""
lc = LightCurve(
time=np.arange(1, 5),
flux=np.arange(1, 5),
flux_err=np.arange(1, 5),
targetid=50000,
)
lc2 = LightCurve(
time=np.arange(10, 15),
flux=np.arange(10, 15),
flux_err=np.arange(10, 15),
targetid=120334,
)
lcc = LightCurveCollection([lc])
lcc.append(lc2)
assert (lcc[0] == lc).all()
assert (lcc[1] == lc2).all()
with pytest.raises(IndexError):
lcc[50]
def test_collection_getitem_by_boolean_array():
"""Tests Collection.__getitem__ , case the argument is a mask, i.e, indexed by boolean array"""
lc0 = LightCurve(
time=np.arange(1, 5),
flux=np.arange(1, 5),
flux_err=np.arange(1, 5),
targetid=50000,
)
lc1 = LightCurve(
time=np.arange(10, 15),
flux=np.arange(10, 15),
flux_err=np.arange(10, 15),
targetid=120334,
)
lc2 = LightCurve(
time=np.arange(15, 20),
flux=np.arange(15, 20),
flux_err=np.arange(15, 20),
targetid=23456,
)
lcc = LightCurveCollection([lc0, lc1, lc2])
lcc_f = lcc[[True, False, True]]
assert lcc_f.data == [lc0, lc2]
assert type(lcc_f) is LightCurveCollection
# boundary case: 1 element
lcc_f = lcc[[False, True, False]]
assert lcc_f.data == [lc1]
# boundary case: no element
lcc_f = lcc[[False, False, False]]
assert lcc_f.data == []
# other array-like input: tuple
lcc_f = lcc[(True, False, True)]
assert lcc_f.data == [lc0, lc2]
# other array-like input: ndarray
lcc_f = lcc[np.array([True, False, True])]
assert lcc_f.data == [lc0, lc2]
# boundary case: mask length not matching - shorter
with pytest.raises(IndexError):
lcc[[True, False]]
# boundary case: mask length not matching - longer
with pytest.raises(IndexError):
lcc[[True, False, True, True]]
def test_collection_getitem_by_other_array():
"""Tests Collection.__getitem__ , case the argument an non-boolean array"""
lc0 = LightCurve(
time=np.arange(1, 5),
flux=np.arange(1, 5),
flux_err=np.arange(1, 5),
targetid=50000,
)
lc1 = LightCurve(
time=np.arange(10, 15),
flux=np.arange(10, 15),
flux_err=np.arange(10, 15),
targetid=120334,
)
lc2 = LightCurve(
time=np.arange(15, 20),
flux=np.arange(15, 20),
flux_err=np.arange(15, 20),
targetid=23456,
)
lcc = LightCurveCollection([lc0, lc1, lc2])
# case: an int array-like, follow ndarray behavior
lcc_f = lcc[[2, 0]]
assert lcc_f.data == [lc2, lc0]
lcc_f = lcc[np.array([2, 0])]
assert lcc_f.data == [lc2, lc0]
# support other int types in np too
lcc_f = lcc[np.array([np.int64(2), np.uint8(0)])]
assert lcc_f.data == [lc2, lc0]
# boundary condition: True / False is interpreted as 1/0 in an bool/int mixed array-like
lcc_f = lcc[[True, False, 2]]
assert lcc_f.data == [lc1, lc0, lc2]
# boundary condition: some index is out of bound
with pytest.raises(IndexError):
lcc[[2, 99]]
# boundary conditions: array-like of neither bool or int, follow ndarray behavior
with pytest.raises(IndexError):
lcc[["abc", "def"]]
with pytest.raises(IndexError):
lcc[[True, "def"]]
def test_collection_getitem_by_slices():
"""Tests Collection.__getitem__ by slices"""
lc0 = LightCurve(
time=np.arange(1, 5),
flux=np.arange(1, 5),
flux_err=np.arange(1, 5),
targetid=50000,
)
lc1 = LightCurve(
time=np.arange(10, 15),
flux=np.arange(10, 15),
flux_err=np.arange(10, 15),
targetid=120334,
)
lc2 = LightCurve(
time=np.arange(15, 20),
flux=np.arange(15, 20),
flux_err=np.arange(15, 20),
targetid=23456,
)
lcc = LightCurveCollection([lc0, lc1, lc2])
assert lcc[:2].data == [lc0, lc1]
assert lcc[1:999].data == [lc1, lc2]
def test_collection_setitem():
"""Tests Collection. __setitem__"""
lc = LightCurve(
time=np.arange(1, 5),
flux=np.arange(1, 5),
flux_err=np.arange(1, 5),
targetid=50000,
)
lc2 = LightCurve(
time=np.arange(10, 15),
flux=np.arange(10, 15),
flux_err=np.arange(10, 15),
targetid=120334,
)
lcc = LightCurveCollection([lc])
lcc.append(lc2)
lc3 = LightCurve(time=[1], targetid=55)
lcc[1] = lc3
assert lcc[1].time == lc3.time
lcc.append(lc2)
assert (lcc[2].time == lc2.time).all()
with pytest.raises(IndexError):
lcc[51] = 10
def test_tpfcollection():
tpf = KeplerTargetPixelFile(filename_tpf_all_zeros)
tpf2 = KeplerTargetPixelFile(filename_tpf_one_center)
tpfc = TargetPixelFileCollection([tpf, tpf2])
assert len(tpfc) == 2
assert tpfc.data == [tpf, tpf2]
tpfc.append(tpf2)
assert len(tpfc) == 3
assert tpfc[0] == tpf
assert tpfc[1] == tpf2
assert tpfc[2] == tpf2
with pytest.raises(IndexError):
tpfc[51]
# ensure index by boolean array also works for TPFs
tpfc_f = tpfc[[False, True, True]]
assert tpfc_f.data == [tpf2, tpf2]
assert type(tpfc_f) is TargetPixelFileCollection
# Test __setitem__
tpf3 = KeplerTargetPixelFile(filename_tpf_one_center, targetid=55)
tpfc[1] = tpf3
assert tpfc[1] == tpf3
tpfc.append(tpf2)
assert tpfc[2] == tpf2
str(tpfc) # Regression test for #564
def test_tpfcollection_plot():
tpf = KeplerTargetPixelFile(filename_tpf_all_zeros)
tpf2 = KeplerTargetPixelFile(filename_tpf_one_center)
# Does plotting work with 3 TPFs?
coll = TargetPixelFileCollection([tpf, tpf2, tpf2])
coll.plot()
# Does plotting work with one TPF?
coll = TargetPixelFileCollection([tpf])
coll.plot()
plt.close("all")
@pytest.mark.remote_data
def test_stitch_repr():
"""Regression test for #884."""
lc = search_lightcurve("Pi Men", mission="TESS", author="SPOC", sector=1).download()
# The line below used to raise `ValueError: Unable to parse format string
# "{:10d}" for entry "70445.0" in column "cadenceno"`
LightCurveCollection((lc, lc)).stitch().__repr__()
def test_accessor_tess_sector():
lc0 = TessLightCurve(
time=np.arange(1, 5),
flux=np.arange(1, 5),
flux_err=np.arange(1, 5),
targetid=50000,
)
lc0.meta["SECTOR"] = 14
lc1 = TessLightCurve(
time=np.arange(10, 15),
flux=np.arange(10, 15),
flux_err=np.arange(10, 15),
targetid=120334,
)
lc1.meta["SECTOR"] = 26
lcc = LightCurveCollection([lc0, lc1])
assert (lcc.sector == [14, 26]).all()
# The sector accessor can be used to generate boolean array
# to support filter collection by sector
assert ((lcc.sector == 26) == [False, True]).all()
assert ((lcc.sector < 20) == [True, False]).all()
# boundary condition: some lightcurve objects do not have sector
lc2 = LightCurve(
time=np.arange(15, 20),
flux=np.arange(15, 20),
flux_err=np.arange(15, 20),
targetid=23456,
)
lcc.append(lc2)
# expecting [14, 26, np.nan], need 2 asserts to do it.
assert (lcc.sector[:-1] == [14, 26]).all()
assert np.isnan(lcc.sector[-1])
# The sector accessor can be used to generate boolean array
# to support filter collection by sector
assert ((lcc.sector == 26) == [False, True, False]).all()
assert ((lcc.sector < 20) == [True, False, False]).all()
# ensure it works for TPFs too.
with warnings.catch_warnings():
warnings.simplefilter("ignore", LightkurveWarning)
# Ignore "A Kepler data product is being opened using the `TessTargetPixelFile` class"
# the test only cares about the SECTOR header that it sets.
tpf = TessTargetPixelFile(filename_tpf_all_zeros)
tpf.hdu[0].header["SECTOR"] = 23
tpf2 = TessTargetPixelFile(filename_tpf_one_center)
# tpf2 has no sector defined
tpf3 = TessTargetPixelFile(filename_tpf_one_center)
tpf3.hdu[0].header["SECTOR"] = 1
tpfc = TargetPixelFileCollection([tpf, tpf2, tpf3])
assert (tpfc.sector == [23, None, 1]).all()
def test_accessor_kepler_quarter():
# scaled down version of tess sector test, as they share the same codepath
lc0 = KeplerLightCurve(
time=np.arange(1, 5),
flux=np.arange(1, 5),
flux_err=np.arange(1, 5),
targetid=50000,
)
lc0.meta["QUARTER"] = 2
lc1 = KeplerLightCurve(
time=np.arange(10, 15),
flux=np.arange(10, 15),
flux_err=np.arange(10, 15),
targetid=120334,
)
lc1.meta["QUARTER"] = 1
lcc = LightCurveCollection([lc0, lc1])
assert (lcc.quarter == [2, 1]).all()
# ensure it works for TPFs too.
tpf0 = KeplerTargetPixelFile(filename_tpf_all_zeros)
tpf0.hdu[0].header["QUARTER"] = 2
tpf1 = KeplerTargetPixelFile(filename_tpf_one_center)
tpf1.hdu[0].header["QUARTER"] = 1
tpfc = TargetPixelFileCollection([tpf0, tpf1])
assert (tpfc.quarter == [2, 1]).all()
def test_accessor_k2_campaign():
# scaled down version of tess sector test, as they share the same codepath
lc0 = KeplerLightCurve(
time=np.arange(1, 5),
flux=np.arange(1, 5),
flux_err=np.arange(1, 5),
targetid=50000,
)
lc0.meta["CAMPAIGN"] = 2
lc1 = KeplerLightCurve(
time=np.arange(10, 15),
flux=np.arange(10, 15),
flux_err=np.arange(10, 15),
targetid=120334,
)
lc1.meta["CAMPAIGN"] = 1
lcc = LightCurveCollection([lc0, lc1])
assert (lcc.campaign == [2, 1]).all()
# ensure it works for TPFs too.
tpf0 = KeplerTargetPixelFile(filename_tpf_all_zeros)
tpf0.hdu[0].header["CAMPAIGN"] = 2
tpf1 = KeplerTargetPixelFile(filename_tpf_one_center)
tpf1.hdu[0].header["CAMPAIGN"] = 1
tpfc = TargetPixelFileCollection([tpf0, tpf1])
assert (tpfc.campaign == [2, 1]).all()
def test_unmergeable_columns():
"""Regression test for #954 and #1015."""
lc1 = LightCurve(data={'time': [1,2,3], 'x': [1,2,3]})
lc2 = LightCurve(data={'time': [1,2,3], 'x': [1,2,3]*u.electron/u.second})
with pytest.warns(LightkurveWarning, match="column types are incompatible"):
LightCurveCollection([lc1, lc2]).stitch()
with pytest.warns(LightkurveWarning, match="column types are incompatible"):
lc1.append(lc2)
| 13,286
| 31.328467
| 99
|
py
|
lightkurve
|
lightkurve-main/tests/__init__.py
|
import os
# Where is the test data located?
TESTROOT = os.path.abspath(os.path.dirname(__file__))
TESTDATA = os.path.join(TESTROOT, "data")
| 141
| 22.666667
| 53
|
py
|
lightkurve
|
lightkurve-main/tests/test_time.py
|
from astropy.time import Time
import numpy as np
def test_bkjd():
"""Tests for the Barycentric Kepler Julian Date (BKJD) time format."""
# Sanity checks
t0 = Time(0, format="bkjd")
assert t0.format == "bkjd"
assert t0.scale == "tdb"
assert t0.iso == "2009-01-01 12:00:00.000"
def test_btjd():
"""Tests for the Barycentric TESS Julian Date (BTJD) time format."""
# Sanity checks
t0 = Time(0, format="btjd")
assert t0.format == "btjd"
assert t0.scale == "tdb"
assert t0.iso == "2014-12-08 12:00:00.000"
# The test values below correspond to the header keywords (TSTART, TSTOP, DATE-OBS, DATE-END)
# found in s3://stpubdata/tess/public/ffi/s0031/2020/296/4-3/tess2020296001912-s0031-4-3-0198-s_ffic.fits
tstart, tstop = 2144.513656838462, 2144.520601048349
date_obs, date_end = '2020-10-22 00:18:30.767', '2020-10-22 00:28:30.747'
assert np.isclose(Time(date_obs).btjd, tstart, rtol=1e-10)
assert np.isclose(Time(date_end).btjd, tstop, rtol=1e-10)
assert np.isclose(Time(date_end).btjd, Time(date_end).tdb.btjd, rtol=1e-10)
assert Time(tstart, format="btjd").utc.iso[:22] == date_obs[:22]
assert Time(tstop, format="btjd").utc.iso[:22] == date_end[:22]
| 1,239
| 39
| 109
|
py
|
lightkurve
|
lightkurve-main/tests/test_periodogram.py
|
import sys
import pytest
import numpy as np
import matplotlib.pyplot as plt
from numpy.testing import assert_almost_equal, assert_array_equal
from astropy import units as u
from astropy.time import Time
from astropy.stats.bls import BoxLeastSquares
from astropy.utils.masked import Masked
from lightkurve.lightcurve import LightCurve
from lightkurve.periodogram import Periodogram
from lightkurve.utils import LightkurveWarning
def test_periodogram_basics():
"""Sanity check to verify that periodogram plotting works"""
lc = LightCurve(
time=np.arange(1000),
flux=np.random.normal(1, 0.1, 1000),
flux_err=np.zeros(1000) + 0.1,
)
lc = lc.normalize()
pg = lc.to_periodogram()
pg.plot()
plt.close()
pg.plot(view="period")
plt.close()
pg.show_properties()
pg.to_table()
str(pg)
lc[400:500] = np.nan
pg = lc.to_periodogram()
def test_periodogram_normalization():
"""Tests the normalization options"""
lc = LightCurve(
time=np.arange(1000),
flux=np.random.normal(1, 0.1, 1000),
flux_err=np.zeros(1000) + 0.1,
flux_unit="electron/second",
)
# Test amplitude normalization and correct units
pg = lc.to_periodogram(normalization="amplitude")
assert pg.power.unit == u.electron / u.second
pg = lc.normalize(unit="ppm").to_periodogram(normalization="amplitude")
assert pg.power.unit == u.cds.ppm
# Test PSD normalization and correct units
pg = lc.to_periodogram(freq_unit=u.microhertz, normalization="psd")
assert pg.power.unit == (u.electron / u.second) ** 2 / u.microhertz
pg = lc.normalize(unit="ppm").to_periodogram(
freq_unit=u.microhertz, normalization="psd"
)
assert pg.power.unit == u.cds.ppm ** 2 / u.microhertz
def test_periodogram_warnings():
"""Tests if warnings are raised for non-normalized periodogram input"""
lc = LightCurve(
time=np.arange(1000),
flux=np.random.normal(1, 0.1, 1000),
flux_err=np.zeros(1000) + 0.1,
)
lc = lc.normalize(unit="ppm")
# Test amplitude normalization and correct units
pg = lc.to_periodogram(normalization="amplitude")
assert pg.power.unit == u.cds.ppm
pg = lc.to_periodogram(freq_unit=u.microhertz, normalization="psd")
assert pg.power.unit == u.cds.ppm ** 2 / u.microhertz
def test_periodogram_units():
"""Tests whether periodogram has correct units"""
# Fake, noisy data
lc = LightCurve(
time=np.arange(1000),
flux=np.random.normal(1, 0.1, 1000),
flux_err=np.zeros(1000) + 0.1,
flux_unit="electron/second",
)
p = lc.to_periodogram(normalization="amplitude")
# Has units
assert hasattr(p.frequency, "unit")
# Has the correct units
assert p.frequency.unit == 1.0 / u.day
assert p.power.unit == u.electron / u.second
assert p.period.unit == u.day
assert p.frequency_at_max_power.unit == 1.0 / u.day
assert p.max_power.unit == u.electron / u.second
def test_periodogram_can_find_periods():
"""Periodogram should recover the correct period"""
# Light curve that is noisy
lc = LightCurve(
time=np.arange(1000),
flux=np.random.normal(1, 0.1, 1000),
flux_err=np.zeros(1000) + 0.1,
)
# Add a 100 day period signal
lc.flux += np.sin((lc.time.value / float(lc.time.value.max())) * 20 * np.pi)
lc = lc.normalize()
p = lc.to_periodogram(normalization="amplitude")
assert np.isclose(p.period_at_max_power.value, 100, rtol=1e-3)
def test_periodogram_slicing():
"""Tests whether periodograms can be sliced"""
# Fake, noisy data
lc = LightCurve(
time=np.arange(1000),
flux=np.random.normal(1, 0.1, 1000),
flux_err=np.zeros(1000) + 0.1,
)
lc = lc.normalize()
p = lc.to_periodogram()
assert len(p[0:200].frequency) == 200
# Test divide
orig = p.power.sum()
p /= 2
assert np.sum(p.power) == orig / 2
# Test multiplication
p *= 0
assert np.sum(p.power) == 0
# Test addition
p += 100
assert np.all(p.power.value >= 100)
# Test subtraction
p -= 100
assert np.sum(p.power) == 0
def test_assign_periods():
"""Test if you can assign periods and frequencies."""
lc = LightCurve(
time=np.arange(1000),
flux=np.random.normal(1, 0.1, 1000),
flux_err=np.zeros(1000) + 0.1,
)
periods = np.arange(1, 100) * u.day
lc = lc.normalize()
p = lc.to_periodogram(period=periods)
# Get around the floating point error
assert np.isclose(np.sum(periods - p.period).value, 0, rtol=1e-14)
frequency = np.arange(1, 100) * u.Hz
p = lc.to_periodogram(frequency=frequency)
assert np.isclose(np.sum(frequency - p.frequency).value, 0, rtol=1e-14)
def test_bin():
"""Test if you can bin the periodogram."""
lc = LightCurve(
time=np.arange(1000),
flux=np.random.normal(1, 0.1, 1000),
flux_err=np.zeros(1000) + 0.1,
)
lc = lc.normalize()
p = lc.to_periodogram()
assert len(p.bin(binsize=10, method="mean").frequency) == len(p.frequency) // 10
assert len(p.bin(binsize=10, method="median").frequency) == len(p.frequency) // 10
def test_smooth():
"""Test if you can smooth the periodogram and check any pitfalls"""
np.random.seed(42)
lc = LightCurve(
time=np.arange(1000),
flux=np.random.normal(1, 0.1, 1000),
flux_err=np.zeros(1000) + 0.1,
)
lc = lc.normalize()
p = lc.to_periodogram(normalization="psd", freq_unit=u.microhertz)
# Test boxkernel and logmedian methods
assert all(p.smooth(method="boxkernel").frequency == p.frequency)
assert all(p.smooth(method="logmedian").frequency == p.frequency)
# Check output units
assert p.smooth().power.unit == p.power.unit
# Check logmedian smooth that the mean of the smoothed power should
# be consistent with the mean of the power
assert np.isclose(
np.mean(p.smooth(method="logmedian").power.value),
np.mean(p.power.value),
atol=0.05 * np.mean(p.power.value),
)
# Can't pass filter_width below 0.
with pytest.raises(ValueError) as err:
p.smooth(method="boxkernel", filter_width=-5.0)
# Can't pass a filter_width in the wrong units
with pytest.raises(ValueError) as err:
p.smooth(method="boxkernel", filter_width=5.0 * u.day)
assert (
err.value.args[0] == "the `filter_width` parameter must have frequency units."
)
# Can't (yet) use a periodogram with a non-evenly spaced frequencies
with pytest.raises(ValueError) as err:
p = np.arange(1, 100)
p = lc.to_periodogram(period=p)
p.smooth()
# Check logmedian doesn't work if I give the filter width units
with pytest.raises(ValueError) as err:
p.smooth(method="logmedian", filter_width=5.0 * u.day)
def test_flatten():
npts = 10000
np.random.seed(12069424)
lc = LightCurve(
time=np.arange(npts),
flux=np.random.normal(1, 0.1, npts),
flux_err=np.zeros(npts) + 0.1,
)
lc = lc.normalize()
p = lc.to_periodogram(normalization="psd", freq_unit=1 / u.day)
# Check method returns equal frequency
assert all(p.flatten(method="logmedian").frequency == p.frequency)
assert all(p.flatten(method="boxkernel").frequency == p.frequency)
# Check logmedian flatten of white noise returns mean of ~unity
assert np.isclose(
np.mean(p.flatten(method="logmedian").power.value), 1.0, atol=0.05
)
# Check return trend works
s, b = p.flatten(return_trend=True)
assert all(b.power == p.smooth(method="logmedian", filter_width=0.01).power)
assert all(s.power == p.flatten().power)
str(s)
s.plot()
plt.close()
def test_index():
"""Test if you can mask out periodogram"""
lc = LightCurve(
time=np.arange(1000),
flux=np.random.normal(1, 0.1, 1000),
flux_err=np.zeros(1000) + 0.1,
)
lc = lc.normalize()
p = lc.to_periodogram()
mask = (p.frequency > 0.1 * (1 / u.day)) & (p.frequency < 0.2 * (1 / u.day))
assert len(p[mask].frequency) == mask.sum()
def test_bls(caplog):
"""Test that BLS periodogram works and gives reasonable errors"""
lc = LightCurve(
time=np.linspace(0, 10, 200),
flux=np.random.normal(100, 0.1, 200),
flux_err=np.zeros(200) + 0.1,
)
# should be able to make a periodogram
p = lc.to_periodogram(method="bls")
keys = ["period", "power", "duration", "transit_time", "depth", "snr"]
assert np.all([key in dir(p) for key in keys])
p.plot()
plt.close()
# we should be able to specify some keywords
lc.to_periodogram(
method="bls", minimum_period=0.2, duration=0.1, maximum_period=0.5
)
# Ridiculous BLS spectra should break.
with pytest.raises(ValueError) as err:
lc.to_periodogram(method="bls", frequency_factor=0.00001)
assert err.value.args[0] == (
"`period` contains over 72000001 points.Periodogram is too large to evaluate. Consider setting `frequency_factor` to a higher value."
)
# Some errors should occur
p.compute_stats()
for record in caplog.records:
assert record.levelname == "WARNING"
assert len(caplog.records) == 3
assert "No period specified." in caplog.text
# No more errors
stats = p.compute_stats(1, 0.1, 0)
assert len(caplog.records) == 3
assert isinstance(stats, dict)
# Some errors should occur
p.get_transit_model()
for record in caplog.records:
assert record.levelname == "WARNING"
assert len(caplog.records) == 6
assert "No period specified." in caplog.text
model = p.get_transit_model(1, 0.1, 0)
# No more errors
assert len(caplog.records) == 6
# Model is LC
assert isinstance(model, LightCurve)
# Model is otherwise identical to LC
assert np.in1d(model.time, lc.time).all()
assert np.in1d(lc.time, model.time).all()
mask = p.get_transit_mask(1, 0.1, 0)
assert isinstance(mask, np.ndarray)
assert isinstance(mask[0], np.bool_)
assert mask.sum() < (~mask).sum()
assert isinstance(p.period_at_max_power, u.Quantity)
assert isinstance(p.duration_at_max_power, u.Quantity)
assert isinstance(p.transit_time_at_max_power, Time)
assert isinstance(p.depth_at_max_power, u.Quantity)
def test_bls_period_recovery():
"""Can BLS Periodogram recover the period of a synthetic light curve?"""
# Planet parameters
period = 2.0
transit_time = 0.5
duration = 0.1
depth = 0.2
flux_err = 0.01
# Create the synthetic light curve
time = np.arange(0, 20, 0.02)
flux = np.ones_like(time)
transit_mask = (
np.abs((time - transit_time + 0.5 * period) % period - 0.5 * period)
< 0.5 * duration
)
flux[transit_mask] = 1.0 - depth
flux += flux_err * np.random.randn(len(time))
synthetic_lc = LightCurve(time=time, flux=flux)
# Can BLS recover the period?
bls_period = synthetic_lc.to_periodogram("bls").period_at_max_power
assert_almost_equal(bls_period.value, period, decimal=2)
# Does it work if we inject a sneaky NaN?
synthetic_lc.flux[10] = np.nan
bls_period = synthetic_lc.to_periodogram("bls").period_at_max_power
assert_almost_equal(bls_period.value, period, decimal=2)
# Does it work if all errors are NaNs?
# This is a regression test for issue #428
synthetic_lc.flux_err = np.array([np.nan] * len(time))
assert_almost_equal(bls_period.value, period, decimal=2)
def test_error_messages():
"""Test periodogram raises reasonable errors"""
# Fake, noisy data
lc = LightCurve(
time=np.arange(1000),
flux=np.random.normal(1, 0.1, 1000),
flux_err=np.zeros(1000) + 0.1,
)
# Can't specify period range and frequency range
with pytest.raises(ValueError) as err:
lc.to_periodogram(maximum_frequency=0.1, minimum_period=10)
# Can't have a minimum frequency > maximum frequency
with pytest.raises(ValueError) as err:
lc.to_periodogram(maximum_frequency=0.1, minimum_frequency=10)
assert (
err.value.args[0] == "minimum_frequency cannot be larger than maximum_frequency"
)
# Can't have a minimum period > maximum period
with pytest.raises(ValueError) as err:
lc.to_periodogram(maximum_period=0.1, minimum_period=10)
assert err.value.args[0] == "minimum_period cannot be larger than maximum_period"
# Can't specify periods and frequencies
with pytest.raises(ValueError) as err:
lc.to_periodogram(frequency=np.arange(10), period=np.arange(10))
# No unitless periodograms
with pytest.raises(ValueError) as err:
Periodogram([0], [1])
assert err.value.args[0] == "frequency must be an `astropy.units.Quantity` object."
# No unitless periodograms
with pytest.raises(ValueError) as err:
Periodogram([0] * u.Hz, [1])
assert err.value.args[0] == "power must be an `astropy.units.Quantity` object."
# No single value periodograms
with pytest.raises(ValueError) as err:
Periodogram([0] * u.Hz, [1] * u.K)
assert err.value.args[0] == "frequency and power must have a length greater than 1."
# No uneven arrays
with pytest.raises(ValueError) as err:
Periodogram([0, 1, 2, 3] * u.Hz, [1, 1] * u.K)
assert err.value.args[0] == "frequency and power must have the same length."
# Bad frequency units
with pytest.raises(ValueError) as err:
Periodogram([0, 1, 2] * u.K, [1, 1, 1] * u.K)
assert err.value.args[0] == "Frequency must be in units of 1/time."
# Bad binning
with pytest.raises(ValueError) as err:
Periodogram([0, 1, 2] * u.Hz, [1, 1, 1] * u.K).bin(binsize=-2)
assert err.value.args[0] == "binsize must be larger than or equal to 1"
# Bad binning method
with pytest.raises(ValueError) as err:
Periodogram([0, 1, 2] * u.Hz, [1, 1, 1] * u.K).bin(method="not-implemented")
assert "method 'not-implemented' is not supported" in err.value.args[0]
# Bad smooth method
with pytest.raises(ValueError) as err:
Periodogram([0, 1, 2] * u.Hz, [1, 1, 1] * u.K).smooth(method="not-implemented")
assert "method 'not-implemented' is not supported" in err.value.args[0]
def test_bls_period():
"""Regression test for #514."""
lc = LightCurve(time=[1, 2, 3], flux=[4, 5, 6])
period = [1, 2, 3, 4, 5]
pg = lc.to_periodogram(method="bls", period=period)
assert_array_equal(pg.period.value, period)
with pytest.raises(ValueError) as err: # NaNs should raise a nice error message
lc.to_periodogram(method="bls", period=[1, 2, 3, np.nan, 4])
assert "period" in err.value.args[0]
def test_masked_flux_nans():
"""Do masked flux NaNs play well with astropy.timeseries.LombScargle?
This is a regression test for
https://github.com/lightkurve/lightkurve/pull/1162#issuecomment-983847177
"""
time = [1, 2, 3, 4]
flux = u.Quantity([1., np.nan, 1., 1.], unit="electron/s")
masked_flux = Masked(flux, mask=[False, True, False, False])
lc = LightCurve(time=time, flux=masked_flux)
pg = lc.to_periodogram()
assert not np.isnan(pg.power).all()
assert (pg.power == 0).all()
| 15,371
| 32.933775
| 145
|
py
|
lightkurve
|
lightkurve-main/tests/test_units.py
|
import pytest
import lightkurve as lk # necessary to enable the units tested below
from astropy import units as u
from .io.test_tasoc import TEST_TIC_ID
def test_custom_units():
"""Are ppt, ppm, and percent enabled AstroPy units?"""
u.Unit("ppt") # custom unit defined in lightkurve.units
u.Unit("ppm") # not enabled by default; enabled in lightkurve.units
u.Unit("percent") # standard AstroPy unit
@pytest.mark.remote_data
def test_tasoc_ppm_units():
"""Regression test for #956."""
lc = lk.search_lightcurve(f"TIC {TEST_TIC_ID}", author='TASOC').download()
assert lc['flux_corr'].unit == "ppm"
assert "Unrecognized" not in repr(lc['flux_corr'].unit)
| 699
| 30.818182
| 78
|
py
|
lightkurve
|
lightkurve-main/tests/test_utils.py
|
import pytest
import warnings
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from lightkurve.utils import KeplerQualityFlags, TessQualityFlags
from lightkurve.utils import module_output_to_channel, channel_to_module_output
from lightkurve.utils import LightkurveWarning
from lightkurve.utils import running_mean, validate_method
from lightkurve.utils import bkjd_to_astropy_time, btjd_to_astropy_time
from lightkurve.utils import centroid_quadratic
from lightkurve.utils import show_citation_instructions
from lightkurve.lightcurve import LightCurve
def test_channel_to_module_output():
assert channel_to_module_output(1) == (2, 1)
assert channel_to_module_output(42) == (13, 2)
assert channel_to_module_output(84) == (24, 4)
assert channel_to_module_output(33) == (11, 1)
with pytest.raises(ValueError):
channel_to_module_output(0) # Invalid channel
def test_module_output_to_channel():
assert module_output_to_channel(2, 1) == 1
assert module_output_to_channel(13, 2) == 42
assert module_output_to_channel(24, 4) == 84
assert module_output_to_channel(11, 1) == 33
with pytest.raises(ValueError):
module_output_to_channel(0, 1) # Invalid module
with pytest.raises(ValueError):
module_output_to_channel(2, 0) # Invalid output
def test_running_mean():
assert_almost_equal(running_mean([1, 2, 3], window_size=1), [1, 2, 3])
assert_almost_equal(running_mean([1, 2, 3], window_size=2), [1.5, 2.5])
assert_almost_equal(running_mean([2, 2, 2], window_size=3), [2])
assert_almost_equal(running_mean([3, 4, 5], window_size=20), [4])
def test_quality_flag_decoding_kepler():
"""Can the QUALITY flags be parsed correctly?"""
flags = list(KeplerQualityFlags.STRINGS.items())
for key, value in flags:
assert KeplerQualityFlags.decode(key)[0] == value
# Can we recover combinations of flags?
assert KeplerQualityFlags.decode(flags[5][0] + flags[7][0]) == [
flags[5][1],
flags[7][1],
]
assert KeplerQualityFlags.decode(flags[3][0] + flags[4][0] + flags[5][0]) == [
flags[3][1],
flags[4][1],
flags[5][1],
]
def test_quality_flag_decoding_tess():
"""Can the QUALITY flags be parsed correctly?"""
flags = list(TessQualityFlags.STRINGS.items())
for key, value in flags:
assert TessQualityFlags.decode(key)[0] == value
# Can we recover combinations of flags?
assert TessQualityFlags.decode(flags[5][0] + flags[7][0]) == [
flags[5][1],
flags[7][1],
]
assert TessQualityFlags.decode(flags[3][0] + flags[4][0] + flags[5][0]) == [
flags[3][1],
flags[4][1],
flags[5][1],
]
def test_quality_flag_decoding_quantity_object():
"""Can a QUALITY flag that is a astropy quantity object be parsed correctly?
This is a regression test for https://github.com/lightkurve/lightkurve/issues/804
"""
from astropy.units.quantity import Quantity
flags = list(TessQualityFlags.STRINGS.items())
for key, value in flags:
assert TessQualityFlags.decode(Quantity(key, dtype="int32"))[0] == value
# Can we recover combinations of flags?
assert TessQualityFlags.decode(
Quantity(flags[5][0], dtype="int32") + Quantity(flags[7][0], dtype="int32")
) == [flags[5][1], flags[7][1]]
assert TessQualityFlags.decode(
Quantity(flags[3][0], dtype="int32")
+ Quantity(flags[4][0], dtype="int32")
+ Quantity(flags[5][0], dtype="int32")
) == [flags[3][1], flags[4][1], flags[5][1]]
def test_quality_mask():
"""Can we create a quality mask using KeplerQualityFlags?"""
quality = np.array([0, 0, 1])
assert np.all(KeplerQualityFlags.create_quality_mask(quality, bitmask=0))
assert np.all(KeplerQualityFlags.create_quality_mask(quality, bitmask=None))
assert np.all(KeplerQualityFlags.create_quality_mask(quality, bitmask="none"))
assert (KeplerQualityFlags.create_quality_mask(quality, bitmask=1)).sum() == 2
assert (
KeplerQualityFlags.create_quality_mask(quality, bitmask="hardest")
).sum() == 2
# Do we see a ValueError if an invalid bitmask is passed?
with pytest.raises(ValueError) as err:
KeplerQualityFlags.create_quality_mask(quality, bitmask="invalidoption")
assert "not supported" in err.value.args[0]
@pytest.mark.xfail # Lightkurve v2.x no longer support NaNs in time values
def test_lightkurve_warning():
"""Can we ignore Lightkurve warnings?"""
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("ignore", LightkurveWarning)
time = np.array([1, 2, 3, np.nan])
flux = np.array([1, 2, 3, 4])
lc = LightCurve(time=time, flux=flux)
assert len(warns) == 0
def test_validate_method():
assert validate_method("foo", ["foo", "bar"]) == "foo"
assert validate_method("FOO", ["foo", "bar"]) == "foo"
with pytest.raises(ValueError):
validate_method("foo", ["bar"])
def test_import():
"""Regression test for #605; `lk.utils` resolved to `lk.seismology.utils`"""
from lightkurve import utils
assert hasattr(utils, "btjd_to_astropy_time")
def test_btjd_bkjd_input():
"""Regression test for #607: are the bkjd/btjd functions tolerant?"""
# Kepler
assert bkjd_to_astropy_time(0).jd[0] == 2454833.0
for user_input in [[0], np.array([0])]:
assert_array_equal(bkjd_to_astropy_time(user_input).jd, np.array([2454833.0]))
# TESS
assert btjd_to_astropy_time(0).jd[0] == 2457000.0
for user_input in [[0], np.array([0])]:
assert_array_equal(btjd_to_astropy_time(user_input).jd, np.array([2457000.0]))
def test_centroid_quadratic():
"""Test basic operation of the quadratic centroiding function."""
# Single bright pixel in the center
data = np.ones((9, 9))
data[2, 5] = 10
col, row = centroid_quadratic(data)
assert np.isclose(row, 2) & np.isclose(col, 5)
# Two equally-bright pixels side by side
data = np.zeros((9, 9))
data[5, 1] = 5
data[5, 2] = 5
col, row = centroid_quadratic(data)
assert np.isclose(row, 5) & np.isclose(col, 1.5)
def test_centroid_quadratic_robustness():
"""Test quadratic centroids in edge cases; regression test for #610."""
# Brightest pixel in upper left
data = np.zeros((5, 5))
data[0, 0] = 1
centroid_quadratic(data)
# Brightest pixel in bottom right
data = np.zeros((5, 5))
data[-1, -1] = 1
centroid_quadratic(data)
# Data contains a NaN
data = np.zeros((5, 5))
data[0, 0] = np.nan
data[-1, -1] = 10
col, row = centroid_quadratic(data)
assert np.isfinite(col) & np.isfinite(row)
def test_show_citation_instructions():
show_citation_instructions()
| 6,830
| 34.952632
| 86
|
py
|
lightkurve
|
lightkurve-main/tests/test_convenience.py
|
from __future__ import division, print_function
import numpy as np
from numpy.testing import assert_almost_equal
from lightkurve.lightcurve import LightCurve
from lightkurve.convenience import estimate_cdpp
def test_cdpp():
"""Tests the estimate_cdpp() convenience function which wraps
`LightCurve.estimate_cdpp()`"""
flux = np.random.normal(loc=1, scale=100e-6, size=10000)
lc = LightCurve(time=np.arange(10000), flux=flux)
assert_almost_equal(estimate_cdpp(flux), lc.estimate_cdpp())
| 510
| 30.9375
| 65
|
py
|
lightkurve
|
lightkurve-main/tests/io/test_detect.py
|
import os
from astropy.io import fits
from lightkurve import PACKAGEDIR
from lightkurve.io import detect_filetype
from .. import TESTDATA
def test_detect_filetype():
"""Can we detect the correct filetype?"""
k2_path = os.path.join(TESTDATA, "test-tpf-star.fits")
tess_path = os.path.join(TESTDATA, "tess25155310-s01-first-cadences.fits.gz")
assert detect_filetype(fits.open(k2_path)) == "KeplerTargetPixelFile"
assert detect_filetype(fits.open(tess_path)) == "TessTargetPixelFile"
| 506
| 28.823529
| 81
|
py
|
lightkurve
|
lightkurve-main/tests/io/test_everest.py
|
import pytest
from lightkurve import search_lightcurve
@pytest.mark.remote_data
def test_search_everest():
"""Can we search and download an EVEREST light curve?"""
search = search_lightcurve("GJ 9827", author="EVEREST", campaign=12)
assert len(search) == 1
assert search.table["author"][0] == "EVEREST"
lc = search.download()
assert type(lc).__name__ == "KeplerLightCurve"
assert lc.campaign == 12
| 429
| 27.666667
| 72
|
py
|
lightkurve
|
lightkurve-main/tests/io/test_k2sff.py
|
import pytest
from astropy.io import fits
import numpy as np
from numpy.testing import assert_array_equal
from lightkurve.io.k2sff import read_k2sff_lightcurve
from lightkurve import search_lightcurve
@pytest.mark.remote_data
def test_read_k2sff():
"""Can we read K2SFF files?"""
url = "http://archive.stsci.edu/hlsps/k2sff/c16/212100000/00236/hlsp_k2sff_k2_lightcurve_212100236-c16_kepler_v1_llc.fits"
f = fits.open(url)
# Verify different extensions
fluxes = []
for ext in ["BESTAPER", "CIRC_APER9"]:
lc = read_k2sff_lightcurve(url, ext=ext)
assert type(lc).__name__ == "KeplerLightCurve"
# Are `time` and `flux` consistent with the FITS file?
assert_array_equal(f[ext].data["T"], lc.time.value)
assert_array_equal(f[ext].data["FCOR"], lc.flux.value)
fluxes.append(lc.flux)
# Different extensions should show different fluxes
assert not np.array_equal(fluxes[0], fluxes[1])
@pytest.mark.remote_data
def test_search_k2sff():
"""Can we search and download a K2SFF light curve?"""
# Try an early campaign
search = search_lightcurve("K2-18", author="K2SFF", campaign=1)
assert len(search) == 1
assert search.table["author"][0] == "K2SFF"
lc = search.download()
assert type(lc).__name__ == "KeplerLightCurve"
assert lc.campaign == 1
# Try a late campaign
lc = search_lightcurve("GJ 9827", author="K2SFF", campaign=19).download()
assert type(lc).__name__ == "KeplerLightCurve"
assert lc.targetid == 246389858
assert lc.campaign == 19
| 1,568
| 34.659091
| 126
|
py
|
lightkurve
|
lightkurve-main/tests/io/test_kepseismic.py
|
import pytest
from astropy.io import fits
import numpy as np
from lightkurve.io.kepseismic import read_kepseismic_lightcurve
from lightkurve.io.detect import detect_filetype
@pytest.mark.remote_data
def test_detect_kepseismic():
"""Can we detect the correct format for KEPSEISMIC files?"""
url = "https://archive.stsci.edu/hlsps/kepseismic/001200000/92147/20d-filter/hlsp_kepseismic_kepler_phot_kplr001292147-20d_kepler_v1_cor-filt-inp.fits"
f = fits.open(url)
assert detect_filetype(f) == "KEPSEISMIC"
@pytest.mark.remote_data
def test_read_kepseismic():
"""Can we read KEPSEISMIC files?"""
url = "https://archive.stsci.edu/hlsps/kepseismic/001200000/92147/20d-filter/hlsp_kepseismic_kepler_phot_kplr001292147-20d_kepler_v1_cor-filt-inp.fits"
with fits.open(url, mode="readonly") as hdulist:
fluxes = hdulist[1].data["FLUX"]
lc = read_kepseismic_lightcurve(url)
flux_lc = lc.flux.value
# print(flux_lc, fluxes)
assert np.sum(fluxes) == np.sum(flux_lc)
| 1,014
| 31.741935
| 155
|
py
|
lightkurve
|
lightkurve-main/tests/io/test_cdips.py
|
import pytest
from astropy.io import fits
import numpy as np
from numpy.testing import assert_array_equal
from lightkurve import search_lightcurve
from lightkurve.io.cdips import read_cdips_lightcurve
from lightkurve.io.detect import detect_filetype
TEST_TIC_ID = 104669918
TEST_FIT_URL = "https://mast.stsci.edu/api/v0.1/Download/file?uri=mast:HLSP/cdips/s0014/cam1_ccd1/hlsp_cdips_tess_ffi_gaiatwo0002030897830235411200-s0014-cam1-ccd1_tess_v01_llc.fits"
@pytest.mark.remote_data
def test_detect_cdips():
"""Can we detect the correct format for CDIPS files?"""
url = TEST_FIT_URL
f = fits.open(url)
assert detect_filetype(f) == "CDIPS"
@pytest.mark.remote_data
def test_read_cdips():
"""Can we read CDIPS files?"""
url = TEST_FIT_URL
f = fits.open(url)
# Verify different extensions
fluxes = []
# Test instrumental flux and magnitude, and detrended magnitudes
exts = [f'IFL{ap}' for ap in [1,2,3]]
exts.extend([f'IRM{ap}' for ap in [1,2,3]])
exts.extend([f'TFA{ap}' for ap in [1,2,3]])
exts.extend([f'PCA{ap}' for ap in [1,2,3]])
for ext in exts:
lc = read_cdips_lightcurve(url, flux_column=ext)
assert type(lc).__name__ == "TessLightCurve"
assert lc.meta["FLUX_ORIGIN"] == ext.lower()
# Are `time` and `flux` consistent with the FITS file?
assert_array_equal(f[1].data['TMID_BJD'][lc.meta['QUALITY_MASK']],
lc.time.value)
assert_array_equal(f[1].data[ext][lc.meta['QUALITY_MASK']],
lc.flux.value)
fluxes.append(lc.flux)
# Different extensions should show different fluxes
for i in range(11):
assert not np.array_equal(fluxes[i].value, fluxes[i+1].value)
@pytest.mark.remote_data
def test_search_cdips():
"""Can we search and download a cdips light curve?"""
search = search_lightcurve(f"TIC {TEST_TIC_ID}", author="CDIPS")
assert len(search) >= 1
assert search.table["author"][0] == "CDIPS"
lc = search.download()
assert type(lc).__name__ == "TessLightCurve"
assert hasattr(lc, "sector")
| 2,117
| 33.16129
| 182
|
py
|
lightkurve
|
lightkurve-main/tests/io/test_tglc.py
|
import numpy as np
import pytest
from astropy.io import fits
from numpy.testing import assert_array_equal
from lightkurve import search_lightcurve
from lightkurve.io.detect import detect_filetype
from lightkurve.io.tglc import read_tglc_lightcurve
@pytest.mark.remote_data
def test_tglc():
"""Can we read in TGLC light curves?"""
url = "https://mast.stsci.edu/api/v0.1/Download/file?uri=mast:HLSP/tglc/s0001/cam4-ccd2/0046/2474/2688/9442/hlsp_tglc_tess_ffi_gaiaid-4624742688944261376-s0001-cam4-ccd2_tess_v1_llc.fits"
with fits.open(url, mode="readonly") as hdulist:
# Can we auto-detect a TGLC file?
assert detect_filetype(hdulist) == "TGLC"
# Are the correct fluxes read in?
lc = read_tglc_lightcurve(url, quality_bitmask=0)
assert lc.meta["AUTHOR"] == "TGLC"
assert lc.meta["FLUX_ORIGIN"] == "cal_psf_flux"
assert_array_equal(lc.flux.value, hdulist[1].data["cal_psf_flux"])
assert np.issubdtype(lc["cadenceno"].dtype, np.integer)
@pytest.mark.remote_data
def test_search_tglc():
"""Can we search and download a TGLC light curve?"""
# Try an early campaign
search = search_lightcurve("TIC 140898436", author="TGLC", sector=1, mission="TESS")
assert len(search) == 1
assert search.table["author"][0] == "TGLC"
lc = search.download()
assert type(lc).__name__ == "TessLightCurve"
assert lc.targetid == 140898436
assert lc.sector == 1
assert lc.camera == 4
assert lc.ccd == 2
| 1,499
| 37.461538
| 191
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.