repo stringlengths 2 99 | file stringlengths 14 239 | code stringlengths 20 3.99M | file_length int64 20 3.99M | avg_line_length float64 9.73 128 | max_line_length int64 11 86.4k | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/loading.py | from pathlib import Path
import mmcv
import numpy as np
from mmcv.fileio import FileClient
from mmedit.core.mask import (bbox2mask, brush_stroke_mask, get_irregular_mask,
random_bbox)
from ..registry import PIPELINES
@PIPELINES.register_module()
class LoadImageFromFile:
"""Load image from file.
Args:
io_backend (str): io backend where images are store. Default: 'disk'.
key (str): Keys in results to find corresponding path. Default: 'gt'.
flag (str): Loading flag for images. Default: 'color'.
channel_order (str): Order of channel, candidates are 'bgr' and 'rgb'.
Default: 'bgr'.
convert_to (str | None): The color space of the output image. If None,
no conversion is conducted. Default: None.
save_original_img (bool): If True, maintain a copy of the image in
`results` dict with name of `f'ori_{key}'`. Default: False.
use_cache (bool): If True, load all images at once. Default: False.
backend (str): The image loading backend type. Options are `cv2`,
`pillow`, and 'turbojpeg'. Default: None.
kwargs (dict): Args for file client.
"""
def __init__(self,
io_backend='disk',
key='gt',
flag='color',
channel_order='bgr',
convert_to=None,
save_original_img=False,
use_cache=False,
backend=None,
**kwargs):
self.io_backend = io_backend
self.key = key
self.flag = flag
self.save_original_img = save_original_img
self.channel_order = channel_order
self.convert_to = convert_to
self.kwargs = kwargs
self.file_client = None
self.use_cache = use_cache
self.cache = None
self.backend = backend
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
filepath = str(results[f'{self.key}_path'])
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
if self.use_cache:
if self.cache is None:
self.cache = dict()
if filepath in self.cache:
img = self.cache[filepath]
else:
img_bytes = self.file_client.get(filepath)
img = mmcv.imfrombytes(
img_bytes,
flag=self.flag,
channel_order=self.channel_order,
backend=self.backend) # HWC
self.cache[filepath] = img
else:
img_bytes = self.file_client.get(filepath)
img = mmcv.imfrombytes(
img_bytes,
flag=self.flag,
channel_order=self.channel_order,
backend=self.backend) # HWC
if self.convert_to is not None:
if self.channel_order == 'bgr' and self.convert_to.lower() == 'y':
img = mmcv.bgr2ycbcr(img, y_only=True)
elif self.channel_order == 'rgb':
img = mmcv.rgb2ycbcr(img, y_only=True)
else:
raise ValueError('Currently support only "bgr2ycbcr" or '
'"bgr2ycbcr".')
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
results[self.key] = img
results[f'{self.key}_path'] = filepath
results[f'{self.key}_ori_shape'] = img.shape
if self.save_original_img:
results[f'ori_{self.key}'] = img.copy()
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f'(io_backend={self.io_backend}, key={self.key}, '
f'flag={self.flag}, save_original_img={self.save_original_img}, '
f'channel_order={self.channel_order}, use_cache={self.use_cache})')
return repr_str
@PIPELINES.register_module()
class LoadImageFromFileList(LoadImageFromFile):
"""Load image from file list.
It accepts a list of path and read each frame from each path. A list
of frames will be returned.
Args:
io_backend (str): io backend where images are store. Default: 'disk'.
key (str): Keys in results to find corresponding path. Default: 'gt'.
flag (str): Loading flag for images. Default: 'color'.
channel_order (str): Order of channel, candidates are 'bgr' and 'rgb'.
Default: 'bgr'.
convert_to (str | None): The color space of the output image. If None,
no conversion is conducted. Default: None.
save_original_img (bool): If True, maintain a copy of the image in
`results` dict with name of `f'ori_{key}'`. Default: False.
kwargs (dict): Args for file client.
"""
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
filepaths = results[f'{self.key}_path']
if not isinstance(filepaths, list):
raise TypeError(
f'filepath should be list, but got {type(filepaths)}')
filepaths = [str(v) for v in filepaths]
imgs = []
shapes = []
if self.save_original_img:
ori_imgs = []
for filepath in filepaths:
img_bytes = self.file_client.get(filepath)
img = mmcv.imfrombytes(
img_bytes, flag=self.flag,
channel_order=self.channel_order) # HWC
# convert to y-channel, if specified
if self.convert_to is not None:
if self.channel_order == 'bgr' and self.convert_to.lower(
) == 'y':
img = mmcv.bgr2ycbcr(img, y_only=True)
elif self.channel_order == 'rgb':
img = mmcv.rgb2ycbcr(img, y_only=True)
else:
raise ValueError('Currently support only "bgr2ycbcr" or '
'"bgr2ycbcr".')
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
imgs.append(img)
shapes.append(img.shape)
if self.save_original_img:
ori_imgs.append(img.copy())
results[self.key] = imgs
results[f'{self.key}_path'] = filepaths
results[f'{self.key}_ori_shape'] = shapes
if self.save_original_img:
results[f'ori_{self.key}'] = ori_imgs
return results
@PIPELINES.register_module()
class RandomLoadResizeBg:
"""Randomly load a background image and resize it.
Required key is "fg", added key is "bg".
Args:
bg_dir (str): Path of directory to load background images from.
io_backend (str): io backend where images are store. Default: 'disk'.
flag (str): Loading flag for images. Default: 'color'.
channel_order (str): Order of channel, candidates are 'bgr' and 'rgb'.
Default: 'bgr'.
kwargs (dict): Args for file client.
"""
def __init__(self,
bg_dir,
io_backend='disk',
flag='color',
channel_order='bgr',
**kwargs):
self.bg_dir = bg_dir
self.bg_list = list(mmcv.scandir(bg_dir))
self.io_backend = io_backend
self.flag = flag
self.channel_order = channel_order
self.kwargs = kwargs
self.file_client = None
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
h, w = results['fg'].shape[:2]
idx = np.random.randint(len(self.bg_list))
filepath = Path(self.bg_dir).joinpath(self.bg_list[idx])
img_bytes = self.file_client.get(filepath)
img = mmcv.imfrombytes(
img_bytes, flag=self.flag, channel_order=self.channel_order) # HWC
bg = mmcv.imresize(img, (w, h), interpolation='bicubic')
results['bg'] = bg
return results
def __repr__(self):
return self.__class__.__name__ + f"(bg_dir='{self.bg_dir}')"
@PIPELINES.register_module()
class LoadMask:
"""Load Mask for multiple types.
For different types of mask, users need to provide the corresponding
config dict.
Example config for bbox:
.. code-block:: python
config = dict(img_shape=(256, 256), max_bbox_shape=128)
Example config for irregular:
.. code-block:: python
config = dict(
img_shape=(256, 256),
num_vertices=(4, 12),
max_angle=4.,
length_range=(10, 100),
brush_width=(10, 40),
area_ratio_range=(0.15, 0.5))
Example config for ff:
.. code-block:: python
config = dict(
img_shape=(256, 256),
num_vertices=(4, 12),
mean_angle=1.2,
angle_range=0.4,
brush_width=(12, 40))
Example config for set:
.. code-block:: python
config = dict(
mask_list_file='xxx/xxx/ooxx.txt',
prefix='/xxx/xxx/ooxx/',
io_backend='disk',
flag='unchanged',
file_client_kwargs=dict()
)
The mask_list_file contains the list of mask file name like this:
test1.jpeg
test2.jpeg
...
...
The prefix gives the data path.
Args:
mask_mode (str): Mask mode in ['bbox', 'irregular', 'ff', 'set',
'file'].
* bbox: square bounding box masks.
* irregular: irregular holes.
* ff: free-form holes from DeepFillv2.
* set: randomly get a mask from a mask set.
* file: get mask from 'mask_path' in results.
mask_config (dict): Params for creating masks. Each type of mask needs
different configs.
"""
def __init__(self, mask_mode='bbox', mask_config=None):
self.mask_mode = mask_mode
self.mask_config = dict() if mask_config is None else mask_config
assert isinstance(self.mask_config, dict)
# set init info if needed in some modes
self._init_info()
def _init_info(self):
if self.mask_mode == 'set':
# get mask list information
self.mask_list = []
mask_list_file = self.mask_config['mask_list_file']
with open(mask_list_file, 'r') as f:
for line in f:
line_split = line.strip().split(' ')
mask_name = line_split[0]
self.mask_list.append(
Path(self.mask_config['prefix']).joinpath(mask_name))
self.mask_set_size = len(self.mask_list)
self.io_backend = self.mask_config['io_backend']
self.flag = self.mask_config['flag']
self.file_client_kwargs = self.mask_config['file_client_kwargs']
self.file_client = None
elif self.mask_mode == 'file':
self.io_backend = 'disk'
self.flag = 'unchanged'
self.file_client_kwargs = dict()
self.file_client = None
def _get_random_mask_from_set(self):
if self.file_client is None:
self.file_client = FileClient(self.io_backend,
**self.file_client_kwargs)
# minus 1 to avoid out of range error
mask_idx = np.random.randint(0, self.mask_set_size)
mask_bytes = self.file_client.get(self.mask_list[mask_idx])
mask = mmcv.imfrombytes(mask_bytes, flag=self.flag) # HWC, BGR
if mask.ndim == 2:
mask = np.expand_dims(mask, axis=2)
else:
mask = mask[:, :, 0:1]
mask[mask > 0] = 1.
return mask
def _get_mask_from_file(self, path):
if self.file_client is None:
self.file_client = FileClient(self.io_backend,
**self.file_client_kwargs)
mask_bytes = self.file_client.get(path)
mask = mmcv.imfrombytes(mask_bytes, flag=self.flag) # HWC, BGR
if mask.ndim == 2:
mask = np.expand_dims(mask, axis=2)
else:
mask = mask[:, :, 0:1]
mask[mask > 0] = 1.
return mask
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if self.mask_mode == 'bbox':
mask_bbox = random_bbox(**self.mask_config)
mask = bbox2mask(self.mask_config['img_shape'], mask_bbox)
results['mask_bbox'] = mask_bbox
elif self.mask_mode == 'irregular':
mask = get_irregular_mask(**self.mask_config)
elif self.mask_mode == 'set':
mask = self._get_random_mask_from_set()
elif self.mask_mode == 'ff':
mask = brush_stroke_mask(**self.mask_config)
elif self.mask_mode == 'file':
mask = self._get_mask_from_file(results['mask_path'])
else:
raise NotImplementedError(
f'Mask mode {self.mask_mode} has not been implemented.')
results['mask'] = mask
return results
def __repr__(self):
return self.__class__.__name__ + f"(mask_mode='{self.mask_mode}')"
@PIPELINES.register_module()
class GetSpatialDiscountMask:
"""Get spatial discounting mask constant.
Spatial discounting mask is first introduced in:
Generative Image Inpainting with Contextual Attention.
Args:
gamma (float, optional): Gamma for computing spatial discounting.
Defaults to 0.99.
beta (float, optional): Beta for computing spatial discounting.
Defaults to 1.5.
"""
def __init__(self, gamma=0.99, beta=1.5):
self.gamma = gamma
self.beta = beta
def spatial_discount_mask(self, mask_width, mask_height):
"""Generate spatial discounting mask constant.
Args:
mask_width (int): The width of bbox hole.
mask_height (int): The height of bbox height.
Returns:
np.ndarray: Spatial discounting mask.
"""
w, h = np.meshgrid(np.arange(mask_width), np.arange(mask_height))
grid_stack = np.stack([h, w], axis=2)
mask_values = (self.gamma**(np.minimum(
grid_stack, [mask_height - 1, mask_width - 1] - grid_stack) *
self.beta)).max(
axis=2, keepdims=True)
return mask_values
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
mask_bbox = results['mask_bbox']
mask = results['mask']
mask_height, mask_width = mask_bbox[-2:]
discount_hole = self.spatial_discount_mask(mask_width, mask_height)
discount_mask = np.zeros_like(mask)
discount_mask[mask_bbox[0]:mask_bbox[0] + mask_height,
mask_bbox[1]:mask_bbox[1] + mask_width,
...] = discount_hole
results['discount_mask'] = discount_mask
return results
def __repr__(self):
return self.__class__.__name__ + (f'(gamma={self.gamma}, '
f'beta={self.beta})')
@PIPELINES.register_module()
class LoadPairedImageFromFile(LoadImageFromFile):
"""Load a pair of images from file.
Each sample contains a pair of images, which are concatenated in the w
dimension (a|b). This is a special loading class for generation paired
dataset. It loads a pair of images as the common loader does and crops
it into two images with the same shape in different domains.
Required key is "pair_path". Added or modified keys are "pair",
"pair_ori_shape", "ori_pair", "img_a", "img_b", "img_a_path",
"img_b_path", "img_a_ori_shape", "img_b_ori_shape", "ori_img_a" and
"ori_img_b".
Args:
io_backend (str): io backend where images are store. Default: 'disk'.
key (str): Keys in results to find corresponding path. Default: 'gt'.
flag (str): Loading flag for images. Default: 'color'.
channel_order (str): Order of channel, candidates are 'bgr' and 'rgb'.
Default: 'bgr'.
save_original_img (bool): If True, maintain a copy of the image in
`results` dict with name of `f'ori_{key}'`. Default: False.
kwargs (dict): Args for file client.
"""
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
filepath = str(results[f'{self.key}_path'])
img_bytes = self.file_client.get(filepath)
img = mmcv.imfrombytes(
img_bytes, flag=self.flag, channel_order=self.channel_order) # HWC
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
results[self.key] = img
results[f'{self.key}_path'] = filepath
results[f'{self.key}_ori_shape'] = img.shape
if self.save_original_img:
results[f'ori_{self.key}'] = img.copy()
# crop pair into a and b
w = img.shape[1]
if w % 2 != 0:
raise ValueError(
f'The width of image pair must be even number, but got {w}.')
new_w = w // 2
img_a = img[:, :new_w, :]
img_b = img[:, new_w:, :]
results['img_a'] = img_a
results['img_b'] = img_b
results['img_a_path'] = filepath
results['img_b_path'] = filepath
results['img_a_ori_shape'] = img_a.shape
results['img_b_ori_shape'] = img_b.shape
if self.save_original_img:
results['ori_img_a'] = img_a.copy()
results['ori_img_b'] = img_b.copy()
return results
| 19,292 | 34.206204 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/crop.py | import math
import random
import mmcv
import numpy as np
from torch.nn.modules.utils import _pair
from ..registry import PIPELINES
from .utils import random_choose_unknown
@PIPELINES.register_module()
class Crop:
"""Crop data to specific size for training.
Args:
keys (Sequence[str]): The images to be cropped.
crop_size (Tuple[int]): Target spatial size (h, w).
random_crop (bool): If set to True, it will random crop
image. Otherwise, it will work as center crop.
is_pad_zeros (bool, optional): Whether to pad the image with 0 if
crop_size is greater than image size. Default: False.
"""
def __init__(self, keys, crop_size, random_crop=True, is_pad_zeros=False):
if not mmcv.is_tuple_of(crop_size, int):
raise TypeError(
'Elements of crop_size must be int and crop_size must be'
f' tuple, but got {type(crop_size[0])} in {type(crop_size)}')
self.keys = keys
self.crop_size = crop_size
self.random_crop = random_crop
self.is_pad_zeros = is_pad_zeros
def _crop(self, data):
if not isinstance(data, list):
data_list = [data]
else:
data_list = data
crop_bbox_list = []
data_list_ = []
for item in data_list:
data_h, data_w = item.shape[:2]
crop_h, crop_w = self.crop_size
if self.is_pad_zeros:
crop_y_offset, crop_x_offset = 0, 0
if crop_h > data_h:
crop_y_offset = (crop_h - data_h) // 2
if crop_w > data_w:
crop_x_offset = (crop_w - data_w) // 2
if crop_y_offset > 0 or crop_x_offset > 0:
pad_width = [(2 * crop_y_offset, 2 * crop_y_offset),
(2 * crop_x_offset, 2 * crop_x_offset)]
if item.ndim == 3:
pad_width.append((0, 0))
item = np.pad(
item,
tuple(pad_width),
mode='constant',
constant_values=0)
data_h, data_w = item.shape[:2]
crop_h = min(data_h, crop_h)
crop_w = min(data_w, crop_w)
if self.random_crop:
x_offset = np.random.randint(0, data_w - crop_w + 1)
y_offset = np.random.randint(0, data_h - crop_h + 1)
else:
x_offset = max(0, (data_w - crop_w)) // 2
y_offset = max(0, (data_h - crop_h)) // 2
crop_bbox = [x_offset, y_offset, crop_w, crop_h]
item_ = item[y_offset:y_offset + crop_h,
x_offset:x_offset + crop_w, ...]
crop_bbox_list.append(crop_bbox)
data_list_.append(item_)
if not isinstance(data, list):
return data_list_[0], crop_bbox_list[0]
return data_list_, crop_bbox_list
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for k in self.keys:
data_, crop_bbox = self._crop(results[k])
results[k] = data_
results[k + '_crop_bbox'] = crop_bbox
results['crop_size'] = self.crop_size
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'keys={self.keys}, crop_size={self.crop_size}, '
f'random_crop={self.random_crop}')
return repr_str
@PIPELINES.register_module()
class RandomResizedCrop(object):
"""Crop data to random size and aspect ratio.
A crop of a random proportion of the original image
and a random aspect ratio of the original aspect ratio is made.
The cropped image is finally resized to a given size specified
by 'crop_size'. Modified keys are the attributes specified in "keys".
This code is partially adopted from
torchvision.transforms.RandomResizedCrop:
[https://pytorch.org/vision/stable/_modules/torchvision/transforms/\
transforms.html#RandomResizedCrop].
Args:
keys (list[str]): The images to be resized and random-cropped.
crop_size (int | tuple[int]): Target spatial size (h, w).
scale (tuple[float], optional): Range of the proportion of the original
image to be cropped. Default: (0.08, 1.0).
ratio (tuple[float], optional): Range of aspect ratio of the crop.
Default: (3. / 4., 4. / 3.).
interpolation (str, optional): Algorithm used for interpolation.
It can be only either one of the following:
"nearest" | "bilinear" | "bicubic" | "area" | "lanczos".
Default: "bilinear".
"""
def __init__(self,
keys,
crop_size,
scale=(0.08, 1.0),
ratio=(3. / 4., 4. / 3.),
interpolation='bilinear'):
assert keys, 'Keys should not be empty.'
if isinstance(crop_size, int):
crop_size = (crop_size, crop_size)
elif not mmcv.is_tuple_of(crop_size, int):
raise TypeError('"crop_size" must be an integer '
'or a tuple of integers, but got '
f'{type(crop_size)}')
if not mmcv.is_tuple_of(scale, float):
raise TypeError('"scale" must be a tuple of float, '
f'but got {type(scale)}')
if not mmcv.is_tuple_of(ratio, float):
raise TypeError('"ratio" must be a tuple of float, '
f'but got {type(ratio)}')
self.keys = keys
self.crop_size = crop_size
self.scale = scale
self.ratio = ratio
self.interpolation = interpolation
def get_params(self, data):
"""Get parameters for a random sized crop.
Args:
data (np.ndarray): Image of type numpy array to be cropped.
Returns:
A tuple containing the coordinates of the top left corner
and the chosen crop size.
"""
data_h, data_w = data.shape[:2]
area = data_h * data_w
for _ in range(10):
target_area = random.uniform(*self.scale) * area
log_ratio = (math.log(self.ratio[0]), math.log(self.ratio[1]))
aspect_ratio = math.exp(random.uniform(*log_ratio))
crop_w = int(round(math.sqrt(target_area * aspect_ratio)))
crop_h = int(round(math.sqrt(target_area / aspect_ratio)))
if 0 < crop_w <= data_w and 0 < crop_h <= data_h:
top = random.randint(0, data_h - crop_h)
left = random.randint(0, data_w - crop_w)
return top, left, crop_h, crop_w
# Fall back to center crop
in_ratio = float(data_w) / float(data_h)
if (in_ratio < min(self.ratio)):
crop_w = data_w
crop_h = int(round(crop_w / min(self.ratio)))
elif (in_ratio > max(self.ratio)):
crop_h = data_h
crop_w = int(round(crop_h * max(self.ratio)))
else: # whole image
crop_w = data_w
crop_h = data_h
top = (data_h - crop_h) // 2
left = (data_w - crop_w) // 2
return top, left, crop_h, crop_w
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for k in self.keys:
top, left, crop_h, crop_w = self.get_params(results[k])
crop_bbox = [top, left, crop_w, crop_h]
results[k] = results[k][top:top + crop_h, left:left + crop_w, ...]
results[k] = mmcv.imresize(
results[k],
self.crop_size,
return_scale=False,
interpolation=self.interpolation)
results[k + '_crop_bbox'] = crop_bbox
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, crop_size={self.crop_size}, '
f'scale={self.scale}, ratio={self.ratio}, '
f'interpolation={self.interpolation})')
return repr_str
@PIPELINES.register_module()
class FixedCrop:
"""Crop paired data (at a specific position) to specific size for training.
Args:
keys (Sequence[str]): The images to be cropped.
crop_size (Tuple[int]): Target spatial size (h, w).
crop_pos (Tuple[int]): Specific position (x, y). If set to None,
random initialize the position to crop paired data batch.
"""
def __init__(self, keys, crop_size, crop_pos=None):
if not mmcv.is_tuple_of(crop_size, int):
raise TypeError(
'Elements of crop_size must be int and crop_size must be'
f' tuple, but got {type(crop_size[0])} in {type(crop_size)}')
if not mmcv.is_tuple_of(crop_pos, int) and (crop_pos is not None):
raise TypeError(
'Elements of crop_pos must be int and crop_pos must be'
f' tuple or None, but got {type(crop_pos[0])} in '
f'{type(crop_pos)}')
self.keys = keys
self.crop_size = crop_size
self.crop_pos = crop_pos
def _crop(self, data, x_offset, y_offset, crop_w, crop_h):
crop_bbox = [x_offset, y_offset, crop_w, crop_h]
data_ = data[y_offset:y_offset + crop_h, x_offset:x_offset + crop_w,
...]
return data_, crop_bbox
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if isinstance(results[self.keys[0]], list):
data_h, data_w = results[self.keys[0]][0].shape[:2]
else:
data_h, data_w = results[self.keys[0]].shape[:2]
crop_h, crop_w = self.crop_size
crop_h = min(data_h, crop_h)
crop_w = min(data_w, crop_w)
if self.crop_pos is None:
x_offset = np.random.randint(0, data_w - crop_w + 1)
y_offset = np.random.randint(0, data_h - crop_h + 1)
else:
x_offset, y_offset = self.crop_pos
crop_w = min(data_w - x_offset, crop_w)
crop_h = min(data_h - y_offset, crop_h)
for k in self.keys:
images = results[k]
is_list = isinstance(images, list)
if not is_list:
images = [images]
cropped_images = []
crop_bbox = None
for image in images:
# In fixed crop for paired images, sizes should be the same
if (image.shape[0] != data_h or image.shape[1] != data_w):
raise ValueError(
'The sizes of paired images should be the same. '
f'Expected ({data_h}, {data_w}), '
f'but got ({image.shape[0]}, '
f'{image.shape[1]}).')
data_, crop_bbox = self._crop(image, x_offset, y_offset,
crop_w, crop_h)
cropped_images.append(data_)
results[k + '_crop_bbox'] = crop_bbox
if not is_list:
cropped_images = cropped_images[0]
results[k] = cropped_images
results['crop_size'] = self.crop_size
results['crop_pos'] = self.crop_pos
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'keys={self.keys}, crop_size={self.crop_size}, '
f'crop_pos={self.crop_pos}')
return repr_str
@PIPELINES.register_module()
class PairedRandomCrop:
"""Paried random crop.
It crops a pair of lq and gt images with corresponding locations.
It also supports accepting lq list and gt list.
Required keys are "scale", "lq", and "gt",
added or modified keys are "lq" and "gt".
Args:
gt_patch_size (int): cropped gt patch size.
"""
def __init__(self, gt_patch_size):
self.gt_patch_size = gt_patch_size
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
scale = results['scale']
lq_patch_size = self.gt_patch_size // scale
lq_is_list = isinstance(results['lq'], list)
if not lq_is_list:
results['lq'] = [results['lq']]
gt_is_list = isinstance(results['gt'], list)
if not gt_is_list:
results['gt'] = [results['gt']]
h_lq, w_lq, _ = results['lq'][0].shape
h_gt, w_gt, _ = results['gt'][0].shape
if h_gt != h_lq * scale or w_gt != w_lq * scale:
raise ValueError(
f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',
f'multiplication of LQ ({h_lq}, {w_lq}).')
if h_lq < lq_patch_size or w_lq < lq_patch_size:
raise ValueError(
f'LQ ({h_lq}, {w_lq}) is smaller than patch size ',
f'({lq_patch_size}, {lq_patch_size}). Please check '
f'{results["lq_path"][0]} and {results["gt_path"][0]}.')
# randomly choose top and left coordinates for lq patch
top = np.random.randint(h_lq - lq_patch_size + 1)
left = np.random.randint(w_lq - lq_patch_size + 1)
# crop lq patch
results['lq'] = [
v[top:top + lq_patch_size, left:left + lq_patch_size, ...]
for v in results['lq']
]
# crop corresponding gt patch
top_gt, left_gt = int(top * scale), int(left * scale)
results['gt'] = [
v[top_gt:top_gt + self.gt_patch_size,
left_gt:left_gt + self.gt_patch_size, ...] for v in results['gt']
]
if not lq_is_list:
results['lq'] = results['lq'][0]
if not gt_is_list:
results['gt'] = results['gt'][0]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(gt_patch_size={self.gt_patch_size})'
return repr_str
@PIPELINES.register_module()
class CropAroundCenter:
"""Randomly crop the images around unknown area in the center 1/4 images.
This cropping strategy is adopted in GCA matting. The `unknown area` is the
same as `semi-transparent area`.
https://arxiv.org/pdf/2001.04069.pdf
It retains the center 1/4 images and resizes the images to 'crop_size'.
Required keys are "fg", "bg", "trimap" and "alpha", added or modified keys
are "crop_bbox", "fg", "bg", "trimap" and "alpha".
Args:
crop_size (int | tuple): Desired output size. If int, square crop is
applied.
"""
def __init__(self, crop_size):
if mmcv.is_tuple_of(crop_size, int):
assert len(crop_size) == 2, 'length of crop_size must be 2.'
elif not isinstance(crop_size, int):
raise TypeError('crop_size must be int or a tuple of int, but got '
f'{type(crop_size)}')
self.crop_size = _pair(crop_size)
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
fg = results['fg']
alpha = results['alpha']
trimap = results['trimap']
bg = results['bg']
h, w = fg.shape[:2]
assert bg.shape == fg.shape, (f'shape of bg {bg.shape} should be the '
f'same as fg {fg.shape}.')
crop_h, crop_w = self.crop_size
# Make sure h >= crop_h, w >= crop_w. If not, rescale imgs
rescale_ratio = max(crop_h / h, crop_w / w)
if rescale_ratio > 1:
new_h = max(int(h * rescale_ratio), crop_h)
new_w = max(int(w * rescale_ratio), crop_w)
fg = mmcv.imresize(fg, (new_w, new_h), interpolation='nearest')
alpha = mmcv.imresize(
alpha, (new_w, new_h), interpolation='nearest')
trimap = mmcv.imresize(
trimap, (new_w, new_h), interpolation='nearest')
bg = mmcv.imresize(bg, (new_w, new_h), interpolation='bicubic')
h, w = new_h, new_w
# resize to 1/4 to ignore small unknown patches
small_trimap = mmcv.imresize(
trimap, (w // 4, h // 4), interpolation='nearest')
# find unknown area in center 1/4 region
margin_h, margin_w = crop_h // 2, crop_w // 2
sample_area = small_trimap[margin_h // 4:(h - margin_h) // 4,
margin_w // 4:(w - margin_w) // 4]
unknown_xs, unknown_ys = np.where(sample_area == 128)
unknown_num = len(unknown_xs)
if unknown_num < 10:
# too few unknown area in the center, crop from the whole image
top = np.random.randint(0, h - crop_h + 1)
left = np.random.randint(0, w - crop_w + 1)
else:
idx = np.random.randint(unknown_num)
top = unknown_xs[idx] * 4
left = unknown_ys[idx] * 4
bottom = top + crop_h
right = left + crop_w
results['fg'] = fg[top:bottom, left:right]
results['alpha'] = alpha[top:bottom, left:right]
results['trimap'] = trimap[top:bottom, left:right]
results['bg'] = bg[top:bottom, left:right]
results['crop_bbox'] = (left, top, right, bottom)
return results
def __repr__(self):
return self.__class__.__name__ + f'(crop_size={self.crop_size})'
@PIPELINES.register_module()
class CropAroundUnknown:
"""Crop around unknown area with a randomly selected scale.
Randomly select the w and h from a list of (w, h).
Required keys are the keys in argument `keys`, added or
modified keys are "crop_bbox" and the keys in argument `keys`.
This class assumes value of "alpha" ranges from 0 to 255.
Args:
keys (Sequence[str]): The images to be cropped. It must contain
'alpha'. If unknown_source is set to 'trimap', then it must also
contain 'trimap'.
crop_sizes (list[int | tuple[int]]): List of (w, h) to be selected.
unknown_source (str, optional): Unknown area to select from. It must be
'alpha' or 'tirmap'. Default to 'alpha'.
interpolations (str | list[str], optional): Interpolation method of
mmcv.imresize. The interpolation operation will be applied when
image size is smaller than the crop_size. If given as a list of
str, it should have the same length as `keys`. Or if given as a
str all the keys will be resized with the same method.
Default to 'bilinear'.
"""
def __init__(self,
keys,
crop_sizes,
unknown_source='alpha',
interpolations='bilinear'):
if 'alpha' not in keys:
raise ValueError(f'"alpha" must be in keys, but got {keys}')
self.keys = keys
if not isinstance(crop_sizes, list):
raise TypeError(
f'Crop sizes must be list, but got {type(crop_sizes)}.')
self.crop_sizes = [_pair(crop_size) for crop_size in crop_sizes]
if not mmcv.is_tuple_of(self.crop_sizes[0], int):
raise TypeError('Elements of crop_sizes must be int or tuple of '
f'int, but got {type(self.crop_sizes[0][0])}.')
if unknown_source not in ['alpha', 'trimap']:
raise ValueError('unknown_source must be "alpha" or "trimap", '
f'but got {unknown_source}')
if unknown_source not in keys:
# it could only be trimap, since alpha is checked before
raise ValueError(
'if unknown_source is "trimap", it must also be set in keys')
self.unknown_source = unknown_source
if isinstance(interpolations, str):
self.interpolations = [interpolations] * len(self.keys)
elif mmcv.is_list_of(interpolations,
str) and len(interpolations) == len(self.keys):
self.interpolations = interpolations
else:
raise TypeError(
'interpolations must be a str or list of str with '
f'the same length as keys, but got {interpolations}')
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
h, w = results[self.keys[0]].shape[:2]
rand_ind = np.random.randint(len(self.crop_sizes))
crop_h, crop_w = self.crop_sizes[rand_ind]
# Make sure h >= crop_h, w >= crop_w. If not, rescale imgs
rescale_ratio = max(crop_h / h, crop_w / w)
if rescale_ratio > 1:
h = max(int(h * rescale_ratio), crop_h)
w = max(int(w * rescale_ratio), crop_w)
for key, interpolation in zip(self.keys, self.interpolations):
results[key] = mmcv.imresize(
results[key], (w, h), interpolation=interpolation)
# Select the cropping top-left point which is an unknown pixel
if self.unknown_source == 'alpha':
unknown = (results['alpha'] > 0) & (results['alpha'] < 255)
else:
unknown = results['trimap'] == 128
top, left = random_choose_unknown(unknown.squeeze(), (crop_h, crop_w))
bottom = top + crop_h
right = left + crop_w
for key in self.keys:
results[key] = results[key][top:bottom, left:right]
results['crop_bbox'] = (left, top, right, bottom)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, crop_sizes={self.crop_sizes}, '
f"unknown_source='{self.unknown_source}', "
f'interpolations={self.interpolations})')
return repr_str
@PIPELINES.register_module()
class CropAroundFg:
"""Crop around the whole foreground in the segmentation mask.
Required keys are "seg" and the keys in argument `keys`.
Meanwhile, "seg" must be in argument `keys`. Added or modified keys are
"crop_bbox" and the keys in argument `keys`.
Args:
keys (Sequence[str]): The images to be cropped. It must contain
'seg'.
bd_ratio_range (tuple, optional): The range of the boundary (bd) ratio
to select from. The boundary ratio is the ratio of the boundary to
the minimal bbox that contains the whole foreground given by
segmentation. Default to (0.1, 0.4).
test_mode (bool): Whether use test mode. In test mode, the tight crop
area of foreground will be extended to the a square.
Default to False.
"""
def __init__(self, keys, bd_ratio_range=(0.1, 0.4), test_mode=False):
if 'seg' not in keys:
raise ValueError(f'"seg" must be in keys, but got {keys}')
if (not mmcv.is_tuple_of(bd_ratio_range, float)
or len(bd_ratio_range) != 2):
raise TypeError('bd_ratio_range must be a tuple of 2 int, but got '
f'{bd_ratio_range}')
self.keys = keys
self.bd_ratio_range = bd_ratio_range
self.test_mode = test_mode
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
seg = results['seg']
height, width = seg.shape[:2]
# get foreground bbox
fg_coor = np.array(np.where(seg))
top, left = np.amin(fg_coor, axis=1)
bottom, right = np.amax(fg_coor, axis=1)
# enlarge bbox
long_side = np.maximum(bottom - top, right - left)
if self.test_mode:
bottom = top + long_side
right = left + long_side
boundary_ratio = np.random.uniform(*self.bd_ratio_range)
boundary = int(np.round(boundary_ratio * long_side))
# NOTE: Different from the original repo, we keep track of the four
# corners of the bbox (left, top, right, bottom) while the original
# repo use (top, left, height, width) to represent bbox. This may
# introduce an difference of 1 pixel.
top = max(top - boundary, 0)
left = max(left - boundary, 0)
bottom = min(bottom + boundary, height)
right = min(right + boundary, width)
for key in self.keys:
results[key] = results[key][top:bottom, left:right]
results['crop_bbox'] = (left, top, right, bottom)
return results
@PIPELINES.register_module()
class ModCrop:
"""Mod crop gt images, used during testing.
Required keys are "scale" and "gt",
added or modified keys are "gt".
"""
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
img = results['gt'].copy()
scale = results['scale']
if img.ndim in [2, 3]:
h, w = img.shape[0], img.shape[1]
h_remainder, w_remainder = h % scale, w % scale
img = img[:h - h_remainder, :w - w_remainder, ...]
else:
raise ValueError(f'Wrong img ndim: {img.ndim}.')
results['gt'] = img
return results
@PIPELINES.register_module()
class CropLike:
"""Crop/pad the image in the target_key according to the size of image
in the reference_key .
Args:
target_key (str): The key needs to be cropped.
reference_key (str | None): The reference key, need its size.
Default: None.
"""
def __init__(self, target_key, reference_key=None):
assert reference_key and target_key
self.target_key = target_key
self.reference_key = reference_key
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Require self.target_key and self.reference_key.
Returns:
dict: A dict containing the processed data and information.
Modify self.target_key.
"""
size = results[self.reference_key].shape
old_image = results[self.target_key]
old_size = old_image.shape
h, w = old_size[:2]
new_size = size[:2] + old_size[2:]
h_cover, w_cover = min(h, size[0]), min(w, size[1])
format_image = np.zeros(new_size, dtype=old_image.dtype)
format_image[:h_cover, :w_cover] = old_image[:h_cover, :w_cover]
results[self.target_key] = format_image
return results
def __repr__(self):
return (self.__class__.__name__ + f' target_key={self.target_key}, ' +
f'reference_key={self.reference_key}')
| 28,291 | 36.722667 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/augmentation.py | import copy
import math
import numbers
import os
import os.path as osp
import random
import cv2
import mmcv
import numpy as np
import torchvision.transforms as transforms
from PIL import Image
from ..registry import PIPELINES
@PIPELINES.register_module()
class Resize:
"""Resize data to a specific size for training or resize the images to fit
the network input regulation for testing.
When used for resizing images to fit network input regulation, the case is
that a network may have several downsample and then upsample operation,
then the input height and width should be divisible by the downsample
factor of the network.
For example, the network would downsample the input for 5 times with
stride 2, then the downsample factor is 2^5 = 32 and the height
and width should be divisible by 32.
Required keys are the keys in attribute "keys", added or modified keys are
"keep_ratio", "scale_factor", "interpolation" and the
keys in attribute "keys".
All keys in "keys" should have the same shape. "test_trans" is used to
record the test transformation to align the input's shape.
Args:
keys (list[str]): The images to be resized.
scale (float | tuple[int]): If scale is tuple[int], target spatial
size (h, w). Otherwise, target spatial size is scaled by input
size.
Note that when it is used, `size_factor` and `max_size` are
useless. Default: None
keep_ratio (bool): If set to True, images will be resized without
changing the aspect ratio. Otherwise, it will resize images to a
given size. Default: False.
Note that it is used togher with `scale`.
size_factor (int): Let the output shape be a multiple of size_factor.
Default:None.
Note that when it is used, `scale` should be set to None and
`keep_ratio` should be set to False.
max_size (int): The maximum size of the longest side of the output.
Default:None.
Note that it is used togher with `size_factor`.
interpolation (str): Algorithm used for interpolation:
"nearest" | "bilinear" | "bicubic" | "area" | "lanczos".
Default: "bilinear".
backend (str | None): The image resize backend type. Options are `cv2`,
`pillow`, `None`. If backend is None, the global imread_backend
specified by ``mmcv.use_backend()`` will be used.
Default: None.
output_keys (list[str] | None): The resized images. Default: None
Note that if it is not `None`, its length should be equal to keys.
"""
def __init__(self,
keys,
scale=None,
keep_ratio=False,
size_factor=None,
max_size=None,
interpolation='bilinear',
backend=None,
output_keys=None):
assert keys, 'Keys should not be empty.'
if output_keys:
assert len(output_keys) == len(keys)
else:
output_keys = keys
if size_factor:
assert scale is None, ('When size_factor is used, scale should ',
f'be None. But received {scale}.')
assert keep_ratio is False, ('When size_factor is used, '
'keep_ratio should be False.')
if max_size:
assert size_factor is not None, (
'When max_size is used, '
f'size_factor should also be set. But received {size_factor}.')
if isinstance(scale, float):
if scale <= 0:
raise ValueError(f'Invalid scale {scale}, must be positive.')
elif mmcv.is_tuple_of(scale, int):
max_long_edge = max(scale)
max_short_edge = min(scale)
if max_short_edge == -1:
# assign np.inf to long edge for rescaling short edge later.
scale = (np.inf, max_long_edge)
elif scale is not None:
raise TypeError(
f'Scale must be None, float or tuple of int, but got '
f'{type(scale)}.')
self.keys = keys
self.output_keys = output_keys
self.scale = scale
self.size_factor = size_factor
self.max_size = max_size
self.keep_ratio = keep_ratio
self.interpolation = interpolation
self.backend = backend
def _resize(self, img):
if self.keep_ratio:
img, self.scale_factor = mmcv.imrescale(
img,
self.scale,
return_scale=True,
interpolation=self.interpolation,
backend=self.backend)
else:
img, w_scale, h_scale = mmcv.imresize(
img,
self.scale,
return_scale=True,
interpolation=self.interpolation,
backend=self.backend)
self.scale_factor = np.array((w_scale, h_scale), dtype=np.float32)
return img
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if self.size_factor:
h, w = results[self.keys[0]].shape[:2]
new_h = h - (h % self.size_factor)
new_w = w - (w % self.size_factor)
if self.max_size:
new_h = min(self.max_size - (self.max_size % self.size_factor),
new_h)
new_w = min(self.max_size - (self.max_size % self.size_factor),
new_w)
self.scale = (new_w, new_h)
for key, out_key in zip(self.keys, self.output_keys):
results[out_key] = self._resize(results[key])
if len(results[out_key].shape) == 2:
results[out_key] = np.expand_dims(results[out_key], axis=2)
results['scale_factor'] = self.scale_factor
results['keep_ratio'] = self.keep_ratio
results['interpolation'] = self.interpolation
results['backend'] = self.backend
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f'(keys={self.keys}, output_keys={self.output_keys}, '
f'scale={self.scale}, '
f'keep_ratio={self.keep_ratio}, size_factor={self.size_factor}, '
f'max_size={self.max_size}, interpolation={self.interpolation})')
return repr_str
@PIPELINES.register_module()
class RandomRotation:
"""Rotate the image by a randomly-chosen angle, measured in degree.
Args:
keys (list[str]): The images to be rotated.
degrees (tuple[float] | tuple[int] | float | int): If it is a tuple,
it represents a range (min, max). If it is a float or int,
the range is constructed as (-degrees, degrees).
"""
def __init__(self, keys, degrees):
if isinstance(degrees, (int, float)):
if degrees < 0.0:
raise ValueError('Degrees must be positive if it is a number.')
else:
degrees = (-degrees, degrees)
elif not mmcv.is_tuple_of(degrees, (int, float)):
raise TypeError(f'Degrees must be float | int or tuple of float | '
'int, but got '
f'{type(degrees)}.')
self.keys = keys
self.degrees = degrees
def __call__(self, results):
angle = random.uniform(self.degrees[0], self.degrees[1])
for k in self.keys:
results[k] = mmcv.imrotate(results[k], angle)
if results[k].ndim == 2:
results[k] = np.expand_dims(results[k], axis=2)
results['degrees'] = self.degrees
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, degrees={self.degrees})')
return repr_str
@PIPELINES.register_module()
class Flip:
"""Flip the input data with a probability.
Reverse the order of elements in the given data with a specific direction.
The shape of the data is preserved, but the elements are reordered.
Required keys are the keys in attributes "keys", added or modified keys are
"flip", "flip_direction" and the keys in attributes "keys".
It also supports flipping a list of images with the same flip.
Args:
keys (list[str]): The images to be flipped.
flip_ratio (float): The propability to flip the images.
direction (str): Flip images horizontally or vertically. Options are
"horizontal" | "vertical". Default: "horizontal".
"""
_directions = ['horizontal', 'vertical']
def __init__(self, keys, flip_ratio=0.5, direction='horizontal'):
if direction not in self._directions:
raise ValueError(f'Direction {direction} is not supported.'
f'Currently support ones are {self._directions}')
self.keys = keys
self.flip_ratio = flip_ratio
self.direction = direction
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
flip = np.random.random() < self.flip_ratio
if flip:
for key in self.keys:
if isinstance(results[key], list):
for v in results[key]:
mmcv.imflip_(v, self.direction)
else:
mmcv.imflip_(results[key], self.direction)
results['flip'] = flip
results['flip_direction'] = self.direction
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, flip_ratio={self.flip_ratio}, '
f'direction={self.direction})')
return repr_str
@PIPELINES.register_module()
class Pad:
"""Pad the images to align with network downsample factor for testing.
See `Reshape` for more explanation. `numpy.pad` is used for the pad
operation.
Required keys are the keys in attribute "keys", added or
modified keys are "test_trans" and the keys in attribute
"keys". All keys in "keys" should have the same shape. "test_trans" is used
to record the test transformation to align the input's shape.
Args:
keys (list[str]): The images to be padded.
ds_factor (int): Downsample factor of the network. The height and
weight will be padded to a multiple of ds_factor. Default: 32.
kwargs (option): any keyword arguments to be passed to `numpy.pad`.
"""
def __init__(self, keys, ds_factor=32, **kwargs):
self.keys = keys
self.ds_factor = ds_factor
self.kwargs = kwargs
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
h, w = results[self.keys[0]].shape[:2]
new_h = self.ds_factor * ((h - 1) // self.ds_factor + 1)
new_w = self.ds_factor * ((w - 1) // self.ds_factor + 1)
pad_h = new_h - h
pad_w = new_w - w
if new_h != h or new_w != w:
pad_width = ((0, pad_h), (0, pad_w), (0, 0))
for key in self.keys:
results[key] = np.pad(results[key],
pad_width[:results[key].ndim],
**self.kwargs)
results['pad'] = (pad_h, pad_w)
return results
def __repr__(self):
repr_str = self.__class__.__name__
kwargs_str = ', '.join(
[f'{key}={val}' for key, val in self.kwargs.items()])
repr_str += (f'(keys={self.keys}, ds_factor={self.ds_factor}, '
f'{kwargs_str})')
return repr_str
@PIPELINES.register_module()
class RandomAffine:
"""Apply random affine to input images.
This class is adopted from
https://github.com/pytorch/vision/blob/v0.5.0/torchvision/transforms/
transforms.py#L1015
It should be noted that in
https://github.com/Yaoyi-Li/GCA-Matting/blob/master/dataloader/
data_generator.py#L70
random flip is added. See explanation of `flip_ratio` below.
Required keys are the keys in attribute "keys", modified keys
are keys in attribute "keys".
Args:
keys (Sequence[str]): The images to be affined.
degrees (float | tuple[float]): Range of degrees to select from. If it
is a float instead of a tuple like (min, max), the range of degrees
will be (-degrees, +degrees). Set to 0 to deactivate rotations.
translate (tuple, optional): Tuple of maximum absolute fraction for
horizontal and vertical translations. For example translate=(a, b),
then horizontal shift is randomly sampled in the range
-img_width * a < dx < img_width * a and vertical shift is randomly
sampled in the range -img_height * b < dy < img_height * b.
Default: None.
scale (tuple, optional): Scaling factor interval, e.g (a, b), then
scale is randomly sampled from the range a <= scale <= b.
Default: None.
shear (float | tuple[float], optional): Range of shear degrees to
select from. If shear is a float, a shear parallel to the x axis
and a shear parallel to the y axis in the range (-shear, +shear)
will be applied. Else if shear is a tuple of 2 values, a x-axis
shear and a y-axis shear in (shear[0], shear[1]) will be applied.
Default: None.
flip_ratio (float, optional): Probability of the image being flipped.
The flips in horizontal direction and vertical direction are
independent. The image may be flipped in both directions.
Default: None.
"""
def __init__(self,
keys,
degrees,
translate=None,
scale=None,
shear=None,
flip_ratio=None):
self.keys = keys
if isinstance(degrees, numbers.Number):
assert degrees >= 0, ('If degrees is a single number, '
'it must be positive.')
self.degrees = (-degrees, degrees)
else:
assert isinstance(degrees, tuple) and len(degrees) == 2, \
'degrees should be a tuple and it must be of length 2.'
self.degrees = degrees
if translate is not None:
assert isinstance(translate, tuple) and len(translate) == 2, \
'translate should be a tuple and it must be of length 2.'
for t in translate:
assert 0.0 <= t <= 1.0, ('translation values should be '
'between 0 and 1.')
self.translate = translate
if scale is not None:
assert isinstance(scale, tuple) and len(scale) == 2, \
'scale should be a tuple and it must be of length 2.'
for s in scale:
assert s > 0, 'scale values should be positive.'
self.scale = scale
if shear is not None:
if isinstance(shear, numbers.Number):
assert shear >= 0, ('If shear is a single number, '
'it must be positive.')
self.shear = (-shear, shear)
else:
assert isinstance(shear, tuple) and len(shear) == 2, \
'shear should be a tuple and it must be of length 2.'
# X-Axis and Y-Axis shear with (min, max)
self.shear = shear
else:
self.shear = shear
if flip_ratio is not None:
assert isinstance(flip_ratio,
float), 'flip_ratio should be a float.'
self.flip_ratio = flip_ratio
else:
self.flip_ratio = 0
@staticmethod
def _get_params(degrees, translate, scale_ranges, shears, flip_ratio,
img_size):
"""Get parameters for affine transformation.
Returns:
paras (tuple): Params to be passed to the affine transformation.
"""
angle = np.random.uniform(degrees[0], degrees[1])
if translate is not None:
max_dx = translate[0] * img_size[0]
max_dy = translate[1] * img_size[1]
translations = (np.round(np.random.uniform(-max_dx, max_dx)),
np.round(np.random.uniform(-max_dy, max_dy)))
else:
translations = (0, 0)
if scale_ranges is not None:
scale = (np.random.uniform(scale_ranges[0], scale_ranges[1]),
np.random.uniform(scale_ranges[0], scale_ranges[1]))
else:
scale = (1.0, 1.0)
if shears is not None:
shear = np.random.uniform(shears[0], shears[1])
else:
shear = 0.0
# Because `flip` is used as a multiplier in line 479 and 480,
# so -1 stands for flip and 1 stands for no flip. Thus `flip`
# should be an 'inverse' flag as the result of the comparison.
flip = (np.random.rand(2) > flip_ratio).astype(np.int32) * 2 - 1
return angle, translations, scale, shear, flip
@staticmethod
def _get_inverse_affine_matrix(center, angle, translate, scale, shear,
flip):
"""Helper method to compute inverse matrix for affine transformation.
As it is explained in PIL.Image.rotate, we need compute INVERSE of
affine transformation matrix: M = T * C * RSS * C^-1 where
T is translation matrix:
[1, 0, tx | 0, 1, ty | 0, 0, 1];
C is translation matrix to keep center:
[1, 0, cx | 0, 1, cy | 0, 0, 1];
RSS is rotation with scale and shear matrix.
It is different from the original function in torchvision.
1. The order are changed to flip -> scale -> rotation -> shear.
2. x and y have different scale factors.
RSS(shear, a, scale, f) =
[ cos(a + shear)*scale_x*f -sin(a + shear)*scale_y 0]
[ sin(a)*scale_x*f cos(a)*scale_y 0]
[ 0 0 1]
Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1.
"""
angle = math.radians(angle)
shear = math.radians(shear)
scale_x = 1.0 / scale[0] * flip[0]
scale_y = 1.0 / scale[1] * flip[1]
# Inverted rotation matrix with scale and shear
d = math.cos(angle + shear) * math.cos(angle) + math.sin(
angle + shear) * math.sin(angle)
matrix = [
math.cos(angle) * scale_x,
math.sin(angle + shear) * scale_x, 0, -math.sin(angle) * scale_y,
math.cos(angle + shear) * scale_y, 0
]
matrix = [m / d for m in matrix]
# Apply inverse of translation and of center translation:
# RSS^-1 * C^-1 * T^-1
matrix[2] += matrix[0] * (-center[0] - translate[0]) + matrix[1] * (
-center[1] - translate[1])
matrix[5] += matrix[3] * (-center[0] - translate[0]) + matrix[4] * (
-center[1] - translate[1])
# Apply center translation: C * RSS^-1 * C^-1 * T^-1
matrix[2] += center[0]
matrix[5] += center[1]
return matrix
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
h, w = results[self.keys[0]].shape[:2]
# if image is too small, set degree to 0 to reduce introduced dark area
if np.maximum(h, w) < 1024:
params = self._get_params((0, 0), self.translate, self.scale,
self.shear, self.flip_ratio, (h, w))
else:
params = self._get_params(self.degrees, self.translate, self.scale,
self.shear, self.flip_ratio, (h, w))
center = (w * 0.5 - 0.5, h * 0.5 - 0.5)
M = self._get_inverse_affine_matrix(center, *params)
M = np.array(M).reshape((2, 3))
for key in self.keys:
results[key] = cv2.warpAffine(
results[key],
M, (w, h),
flags=cv2.INTER_NEAREST + cv2.WARP_INVERSE_MAP)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, degrees={self.degrees}, '
f'translate={self.translate}, scale={self.scale}, '
f'shear={self.shear}, flip_ratio={self.flip_ratio})')
return repr_str
@PIPELINES.register_module()
class RandomJitter:
"""Randomly jitter the foreground in hsv space.
The jitter range of hue is adjustable while the jitter ranges of saturation
and value are adaptive to the images. Side effect: the "fg" image will be
converted to `np.float32`.
Required keys are "fg" and "alpha", modified key is "fg".
Args:
hue_range (float | tuple[float]): Range of hue jittering. If it is a
float instead of a tuple like (min, max), the range of hue
jittering will be (-hue_range, +hue_range). Default: 40.
"""
def __init__(self, hue_range=40):
if isinstance(hue_range, numbers.Number):
assert hue_range >= 0, ('If hue_range is a single number, '
'it must be positive.')
self.hue_range = (-hue_range, hue_range)
else:
assert isinstance(hue_range, tuple) and len(hue_range) == 2, \
'hue_range should be a tuple and it must be of length 2.'
self.hue_range = hue_range
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
fg, alpha = results['fg'], results['alpha']
# convert to HSV space;
# convert to float32 image to keep precision during space conversion.
fg = mmcv.bgr2hsv(fg.astype(np.float32) / 255)
# Hue noise
hue_jitter = np.random.randint(self.hue_range[0], self.hue_range[1])
fg[:, :, 0] = np.remainder(fg[:, :, 0] + hue_jitter, 360)
# Saturation noise
sat_mean = fg[:, :, 1][alpha > 0].mean()
# jitter saturation within range (1.1 - sat_mean) * [-0.1, 0.1]
sat_jitter = (1.1 - sat_mean) * (np.random.rand() * 0.2 - 0.1)
sat = fg[:, :, 1]
sat = np.abs(sat + sat_jitter)
sat[sat > 1] = 2 - sat[sat > 1]
fg[:, :, 1] = sat
# Value noise
val_mean = fg[:, :, 2][alpha > 0].mean()
# jitter value within range (1.1 - val_mean) * [-0.1, 0.1]
val_jitter = (1.1 - val_mean) * (np.random.rand() * 0.2 - 0.1)
val = fg[:, :, 2]
val = np.abs(val + val_jitter)
val[val > 1] = 2 - val[val > 1]
fg[:, :, 2] = val
# convert back to BGR space
fg = mmcv.hsv2bgr(fg)
results['fg'] = fg * 255
return results
def __repr__(self):
return self.__class__.__name__ + f'hue_range={self.hue_range}'
@PIPELINES.register_module()
class ColorJitter:
"""An interface for torch color jitter so that it can be invoked in
mmediting pipeline.
Randomly change the brightness, contrast and saturation of an image.
Modified keys are the attributes specified in "keys".
Args:
keys (list[str]): The images to be resized.
to_rgb (bool): Whether to convert channels from BGR to RGB.
Default: False.
"""
def __init__(self, keys, to_rgb=False, **kwargs):
assert keys, 'Keys should not be empty.'
self.keys = keys
self.to_rgb = to_rgb
self.transform = transforms.ColorJitter(**kwargs)
def __call__(self, results):
for k in self.keys:
if self.to_rgb:
results[k] = results[k][..., ::-1]
results[k] = Image.fromarray(results[k])
results[k] = self.transform(results[k])
results[k] = np.asarray(results[k])
results[k] = results[k][..., ::-1]
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, to_rgb={self.to_rgb})')
return repr_str
class BinarizeImage:
"""Binarize image.
Args:
keys (Sequence[str]): The images to be binarized.
binary_thr (float): Threshold for binarization.
to_int (bool): If True, return image as int32, otherwise
return image as float32.
"""
def __init__(self, keys, binary_thr, to_int=False):
self.keys = keys
self.binary_thr = binary_thr
self.to_int = to_int
def _binarize(self, img):
type_ = np.float32 if not self.to_int else np.int32
img = (img[..., :] > self.binary_thr).astype(type_)
return img
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for k in self.keys:
results[k] = self._binarize(results[k])
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, binary_thr={self.binary_thr}, '
f'to_int={self.to_int})')
return repr_str
@PIPELINES.register_module()
class RandomMaskDilation:
"""Randomly dilate binary masks.
Args:
keys (Sequence[str]): The images to be resized.
get_binary (bool): If True, according to binary_thr, reset final
output as binary mask. Otherwise, return masks directly.
binary_thr (float): Threshold for obtaining binary mask.
kernel_min (int): Min size of dilation kernel.
kernel_max (int): Max size of dilation kernel.
"""
def __init__(self, keys, binary_thr=0., kernel_min=9, kernel_max=49):
self.keys = keys
self.kernel_min = kernel_min
self.kernel_max = kernel_max
self.binary_thr = binary_thr
def _random_dilate(self, img):
kernel_size = np.random.randint(self.kernel_min, self.kernel_max + 1)
kernel = np.ones((kernel_size, kernel_size), dtype=np.uint8)
dilate_kernel_size = kernel_size
img_ = cv2.dilate(img, kernel, iterations=1)
img_ = (img_ > self.binary_thr).astype(np.float32)
return img_, dilate_kernel_size
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for k in self.keys:
results[k], d_kernel = self._random_dilate(results[k])
if len(results[k].shape) == 2:
results[k] = np.expand_dims(results[k], axis=2)
results[k + '_dilate_kernel_size'] = d_kernel
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, kernel_min={self.kernel_min}, '
f'kernel_max={self.kernel_max})')
return repr_str
@PIPELINES.register_module()
class RandomTransposeHW:
"""Randomly transpose images in H and W dimensions with a probability.
(TransposeHW = horizontal flip + anti-clockwise rotatation by 90 degrees)
When used with horizontal/vertical flips, it serves as a way of rotation
augmentation.
It also supports randomly transposing a list of images.
Required keys are the keys in attributes "keys", added or modified keys are
"transpose" and the keys in attributes "keys".
Args:
keys (list[str]): The images to be transposed.
transpose_ratio (float): The propability to transpose the images.
"""
def __init__(self, keys, transpose_ratio=0.5):
self.keys = keys
self.transpose_ratio = transpose_ratio
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
transpose = np.random.random() < self.transpose_ratio
if transpose:
for key in self.keys:
if isinstance(results[key], list):
results[key] = [v.transpose(1, 0, 2) for v in results[key]]
else:
results[key] = results[key].transpose(1, 0, 2)
results['transpose'] = transpose
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f'(keys={self.keys}, transpose_ratio={self.transpose_ratio})')
return repr_str
@PIPELINES.register_module()
class GenerateFrameIndiceswithPadding:
"""Generate frame index with padding for REDS dataset and Vid4 dataset
during testing.
Required keys: lq_path, gt_path, key, num_input_frames, max_frame_num
Added or modified keys: lq_path, gt_path
Args:
padding (str): padding mode, one of
'replicate' | 'reflection' | 'reflection_circle' | 'circle'.
Examples: current_idx = 0, num_input_frames = 5
The generated frame indices under different padding mode:
replicate: [0, 0, 0, 1, 2]
reflection: [2, 1, 0, 1, 2]
reflection_circle: [4, 3, 0, 1, 2]
circle: [3, 4, 0, 1, 2]
filename_tmpl (str): Template for file name. Default: '{:08d}'.
"""
def __init__(self, padding, filename_tmpl='{:08d}'):
if padding not in ('replicate', 'reflection', 'reflection_circle',
'circle'):
raise ValueError(f'Wrong padding mode {padding}.'
'Should be "replicate", "reflection", '
'"reflection_circle", "circle"')
self.padding = padding
self.filename_tmpl = filename_tmpl
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
clip_name, frame_name = results['key'].split(os.sep)
current_idx = int(frame_name)
max_frame_num = results['max_frame_num'] - 1 # start from 0
num_input_frames = results['num_input_frames']
num_pad = num_input_frames // 2
frame_list = []
for i in range(current_idx - num_pad, current_idx + num_pad + 1):
if i < 0:
if self.padding == 'replicate':
pad_idx = 0
elif self.padding == 'reflection':
pad_idx = -i
elif self.padding == 'reflection_circle':
pad_idx = current_idx + num_pad - i
else:
pad_idx = num_input_frames + i
elif i > max_frame_num:
if self.padding == 'replicate':
pad_idx = max_frame_num
elif self.padding == 'reflection':
pad_idx = max_frame_num * 2 - i
elif self.padding == 'reflection_circle':
pad_idx = (current_idx - num_pad) - (i - max_frame_num)
else:
pad_idx = i - num_input_frames
else:
pad_idx = i
frame_list.append(pad_idx)
lq_path_root = results['lq_path']
gt_path_root = results['gt_path']
lq_paths = [
osp.join(lq_path_root, clip_name,
f'{self.filename_tmpl.format(idx)}.png')
for idx in frame_list
]
gt_paths = [osp.join(gt_path_root, clip_name, f'{frame_name}.png')]
results['lq_path'] = lq_paths
results['gt_path'] = gt_paths
return results
def __repr__(self):
repr_str = self.__class__.__name__ + f"(padding='{self.padding}')"
return repr_str
@PIPELINES.register_module()
class GenerateFrameIndices:
"""Generate frame index for REDS datasets. It also performs
temporal augmention with random interval.
Required keys: lq_path, gt_path, key, num_input_frames
Added or modified keys: lq_path, gt_path, interval, reverse
Args:
interval_list (list[int]): Interval list for temporal augmentation.
It will randomly pick an interval from interval_list and sample
frame index with the interval.
frames_per_clip(int): Number of frames per clips. Default: 99 for
REDS dataset.
"""
def __init__(self, interval_list, frames_per_clip=99):
self.interval_list = interval_list
self.frames_per_clip = frames_per_clip
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
clip_name, frame_name = results['key'].split(
os.sep) # key example: 000/00000000
center_frame_idx = int(frame_name)
num_half_frames = results['num_input_frames'] // 2
max_frame_num = results.get('max_frame_num', self.frames_per_clip + 1)
frames_per_clip = min(self.frames_per_clip, max_frame_num - 1)
interval = np.random.choice(self.interval_list)
# ensure not exceeding the borders
start_frame_idx = center_frame_idx - num_half_frames * interval
end_frame_idx = center_frame_idx + num_half_frames * interval
while (start_frame_idx < 0) or (end_frame_idx > frames_per_clip):
center_frame_idx = np.random.randint(0, frames_per_clip + 1)
start_frame_idx = center_frame_idx - num_half_frames * interval
end_frame_idx = center_frame_idx + num_half_frames * interval
frame_name = f'{center_frame_idx:08d}'
neighbor_list = list(
range(center_frame_idx - num_half_frames * interval,
center_frame_idx + num_half_frames * interval + 1, interval))
lq_path_root = results['lq_path']
gt_path_root = results['gt_path']
lq_path = [
osp.join(lq_path_root, clip_name, f'{v:08d}.png')
for v in neighbor_list
]
gt_path = [osp.join(gt_path_root, clip_name, f'{frame_name}.png')]
results['lq_path'] = lq_path
results['gt_path'] = gt_path
results['interval'] = interval
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(interval_list={self.interval_list}, '
f'frames_per_clip={self.frames_per_clip})')
return repr_str
@PIPELINES.register_module()
class TemporalReverse:
"""Reverse frame lists for temporal augmentation.
Required keys are the keys in attributes "lq" and "gt",
added or modified keys are "lq", "gt" and "reverse".
Args:
keys (list[str]): The frame lists to be reversed.
reverse_ratio (float): The propability to reverse the frame lists.
Default: 0.5.
"""
def __init__(self, keys, reverse_ratio=0.5):
self.keys = keys
self.reverse_ratio = reverse_ratio
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
reverse = np.random.random() < self.reverse_ratio
if reverse:
for key in self.keys:
results[key].reverse()
results['reverse'] = reverse
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(keys={self.keys}, reverse_ratio={self.reverse_ratio})'
return repr_str
@PIPELINES.register_module()
class GenerateSegmentIndices:
"""Generate frame indices for a segment. It also performs temporal
augmention with random interval.
Required keys: lq_path, gt_path, key, num_input_frames, sequence_length
Added or modified keys: lq_path, gt_path, interval, reverse
Args:
interval_list (list[int]): Interval list for temporal augmentation.
It will randomly pick an interval from interval_list and sample
frame index with the interval.
start_idx (int): The index corresponds to the first frame in the
sequence. Default: 0.
filename_tmpl (str): Template for file name. Default: '{:08d}.png'.
"""
def __init__(self, interval_list, start_idx=0, filename_tmpl='{:08d}.png'):
self.interval_list = interval_list
self.filename_tmpl = filename_tmpl
self.start_idx = start_idx
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
# key example: '000', 'calendar' (sequence name)
clip_name = results['key']
interval = np.random.choice(self.interval_list)
self.sequence_length = results['sequence_length']
num_input_frames = results.get('num_input_frames',
self.sequence_length)
# randomly select a frame as start
if self.sequence_length - num_input_frames * interval < 0:
raise ValueError('The input sequence is not long enough to '
'support the current choice of [interval] or '
'[num_input_frames].')
start_frame_idx = np.random.randint(
0, self.sequence_length - num_input_frames * interval + 1)
end_frame_idx = start_frame_idx + num_input_frames * interval
neighbor_list = list(range(start_frame_idx, end_frame_idx, interval))
neighbor_list = [v + self.start_idx for v in neighbor_list]
# add the corresponding file paths
lq_path_root = results['lq_path']
gt_path_root = results['gt_path']
lq_path = [
osp.join(lq_path_root, clip_name, self.filename_tmpl.format(v))
for v in neighbor_list
]
gt_path = [
osp.join(gt_path_root, clip_name, self.filename_tmpl.format(v))
for v in neighbor_list
]
results['lq_path'] = lq_path
results['gt_path'] = gt_path
results['interval'] = interval
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(interval_list={self.interval_list})')
return repr_str
@PIPELINES.register_module()
class MirrorSequence:
"""Extend short sequences (e.g. Vimeo-90K) by mirroring the sequences
Given a sequence with N frames (x1, ..., xN), extend the sequence to
(x1, ..., xN, xN, ..., x1).
Args:
keys (list[str]): The frame lists to be extended.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for key in self.keys:
if isinstance(results[key], list):
results[key] = results[key] + results[key][::-1]
else:
raise TypeError('The input must be of class list[nparray]. '
f'Got {type(results[key])}.')
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys})')
return repr_str
@PIPELINES.register_module()
class CopyValues:
"""Copy the value of a source key to a destination key.
It does the following: results[dst_key] = results[src_key] for
(src_key, dst_key) in zip(src_keys, dst_keys).
Added keys are the keys in the attribute "dst_keys".
Args:
src_keys (list[str]): The source keys.
dst_keys (list[str]): The destination keys.
"""
def __init__(self, src_keys, dst_keys):
if not isinstance(src_keys, list) or not isinstance(dst_keys, list):
raise AssertionError('"src_keys" and "dst_keys" must be lists.')
if len(src_keys) != len(dst_keys):
raise ValueError('"src_keys" and "dst_keys" should have the same'
'number of elements.')
self.src_keys = src_keys
self.dst_keys = dst_keys
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict with a key added/modified.
"""
for (src_key, dst_key) in zip(self.src_keys, self.dst_keys):
results[dst_key] = copy.deepcopy(results[src_key])
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(src_keys={self.src_keys})')
repr_str += (f'(dst_keys={self.dst_keys})')
return repr_str
@PIPELINES.register_module()
class Quantize:
"""Quantize and clip the image to [0, 1].
It is assumed that the the input has range [0, 1].
Modified keys are the attributes specified in "keys".
Args:
keys (list[str]): The keys whose values are clipped.
"""
def __init__(self, keys):
self.keys = keys
def _quantize_clip(self, input_):
is_single_image = False
if isinstance(input_, np.ndarray):
is_single_image = True
input_ = [input_]
# quantize and clip
input_ = [np.clip((v * 255.0).round(), 0, 255) / 255. for v in input_]
if is_single_image:
input_ = input_[0]
return input_
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict with the values of the specified keys are rounded
and clipped.
"""
for key in self.keys:
results[key] = self._quantize_clip(results[key])
return results
def __repr__(self):
return self.__class__.__name__
@PIPELINES.register_module()
class UnsharpMasking:
"""Apply unsharp masking to an image or a sequence of images.
Args:
kernel_size (int): The kernel_size of the Gaussian kernel.
sigma (float): The standard deviation of the Gaussian.
weight (float): The weight of the "details" in the final output.
threshold (float): Pixel differences larger than this value are
regarded as "details".
keys (list[str]): The keys whose values are processed.
Added keys are "xxx_unsharp", where "xxx" are the attributes specified
in "keys".
"""
def __init__(self, kernel_size, sigma, weight, threshold, keys):
if kernel_size % 2 == 0:
raise ValueError('kernel_size must be an odd number, but '
f'got {kernel_size}.')
self.kernel_size = kernel_size
self.sigma = sigma
self.weight = weight
self.threshold = threshold
self.keys = keys
kernel = cv2.getGaussianKernel(kernel_size, sigma)
self.kernel = np.matmul(kernel, kernel.transpose())
def _unsharp_masking(self, imgs):
is_single_image = False
if isinstance(imgs, np.ndarray):
is_single_image = True
imgs = [imgs]
outputs = []
for img in imgs:
residue = img - cv2.filter2D(img, -1, self.kernel)
mask = np.float32(np.abs(residue) * 255 > self.threshold)
soft_mask = cv2.filter2D(mask, -1, self.kernel)
sharpened = np.clip(img + self.weight * residue, 0, 1)
outputs.append(soft_mask * sharpened + (1 - soft_mask) * img)
if is_single_image:
outputs = outputs[0]
return outputs
def __call__(self, results):
for key in self.keys:
results[f'{key}_unsharp'] = self._unsharp_masking(results[key])
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, kernel_size={self.kernel_size}, '
f'sigma={self.sigma}, weight={self.weight}, '
f'threshold={self.threshold})')
return repr_str
| 46,436 | 35.081585 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/matting_aug.py | import os.path as osp
import random
import cv2
import mmcv
import numpy as np
from mmcv.fileio import FileClient
from ..registry import PIPELINES
from .utils import adjust_gamma, random_choose_unknown
def add_gaussian_noise(img, mu, sigma):
img = img.astype(np.float32)
gauss_noise = np.random.normal(mu, sigma, img.shape)
noisy_img = img + gauss_noise
noisy_img = np.clip(noisy_img, 0, 255)
return noisy_img
@PIPELINES.register_module()
class MergeFgAndBg:
"""Composite foreground image and background image with alpha.
Required keys are "alpha", "fg" and "bg", added key is "merged".
"""
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
alpha = results['alpha'][..., None].astype(np.float32) / 255.
fg = results['fg']
bg = results['bg']
merged = fg * alpha + (1. - alpha) * bg
results['merged'] = merged
return results
def __repr__(self) -> str:
repr_str = self.__class__.__name__
return repr_str
@PIPELINES.register_module()
class GenerateTrimap:
"""Using random erode/dilate to generate trimap from alpha matte.
Required key is "alpha", added key is "trimap".
Args:
kernel_size (int | tuple[int]): The range of random kernel_size of
erode/dilate; int indicates a fixed kernel_size. If `random` is set
to False and kernel_size is a tuple of length 2, then it will be
interpreted as (erode kernel_size, dilate kernel_size). It should
be noted that the kernel of the erosion and dilation has the same
height and width.
iterations (int | tuple[int], optional): The range of random iterations
of erode/dilate; int indicates a fixed iterations. If `random` is
set to False and iterations is a tuple of length 2, then it will be
interpreted as (erode iterations, dilate iterations). Default to 1.
random (bool, optional): Whether use random kernel_size and iterations
when generating trimap. See `kernel_size` and `iterations` for more
information.
"""
def __init__(self, kernel_size, iterations=1, random=True):
if isinstance(kernel_size, int):
kernel_size = kernel_size, kernel_size + 1
elif not mmcv.is_tuple_of(kernel_size, int) or len(kernel_size) != 2:
raise ValueError('kernel_size must be an int or a tuple of 2 int, '
f'but got {kernel_size}')
if isinstance(iterations, int):
iterations = iterations, iterations + 1
elif not mmcv.is_tuple_of(iterations, int) or len(iterations) != 2:
raise ValueError('iterations must be an int or a tuple of 2 int, '
f'but got {iterations}')
self.random = random
if self.random:
min_kernel, max_kernel = kernel_size
self.iterations = iterations
self.kernels = [
cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (size, size))
for size in range(min_kernel, max_kernel)
]
else:
erode_ksize, dilate_ksize = kernel_size
self.iterations = iterations
self.kernels = [
cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
(erode_ksize, erode_ksize)),
cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
(dilate_ksize, dilate_ksize))
]
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
alpha = results['alpha']
if self.random:
kernel_num = len(self.kernels)
erode_kernel_idx = np.random.randint(kernel_num)
dilate_kernel_idx = np.random.randint(kernel_num)
min_iter, max_iter = self.iterations
erode_iter = np.random.randint(min_iter, max_iter)
dilate_iter = np.random.randint(min_iter, max_iter)
else:
erode_kernel_idx, dilate_kernel_idx = 0, 1
erode_iter, dilate_iter = self.iterations
eroded = cv2.erode(
alpha, self.kernels[erode_kernel_idx], iterations=erode_iter)
dilated = cv2.dilate(
alpha, self.kernels[dilate_kernel_idx], iterations=dilate_iter)
trimap = np.zeros_like(alpha)
trimap.fill(128)
trimap[eroded >= 255] = 255
trimap[dilated <= 0] = 0
results['trimap'] = trimap.astype(np.float32)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(kernels={self.kernels}, iterations={self.iterations}, '
f'random={self.random})')
return repr_str
@PIPELINES.register_module()
class GenerateTrimapWithDistTransform:
"""Generate trimap with distance transform function.
Args:
dist_thr (int, optional): Distance threshold. Area with alpha value
between (0, 255) will be considered as initial unknown area. Then
area with distance to unknown area smaller than the distance
threshold will also be consider as unknown area. Defaults to 20.
random (bool, optional): If True, use random distance threshold from
[1, dist_thr). If False, use `dist_thr` as the distance threshold
directly. Defaults to True.
"""
def __init__(self, dist_thr=20, random=True):
if not (isinstance(dist_thr, int) and dist_thr >= 1):
raise ValueError('dist_thr must be an int that is greater than 1, '
f'but got {dist_thr}')
self.dist_thr = dist_thr
self.random = random
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
alpha = results['alpha']
# image dilation implemented by Euclidean distance transform
known = (alpha == 0) | (alpha == 255)
dist_to_unknown = cv2.distanceTransform(
known.astype(np.uint8), cv2.DIST_L2, cv2.DIST_MASK_PRECISE)
dist_thr = np.random.randint(
1, self.dist_thr) if self.random else self.dist_thr
unknown = dist_to_unknown <= dist_thr
trimap = (alpha == 255) * 255
trimap[unknown] = 128
results['trimap'] = trimap.astype(np.uint8)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += f'(dist_thr={self.dist_thr}, random={self.random})'
return repr_str
@PIPELINES.register_module()
class CompositeFg:
"""Composite foreground with a random foreground.
This class composites the current training sample with additional data
randomly (could be from the same dataset). With probability 0.5, the sample
will be composited with a random sample from the specified directory.
The composition is performed as:
.. math::
fg_{new} = \\alpha_1 * fg_1 + (1 - \\alpha_1) * fg_2
\\alpha_{new} = 1 - (1 - \\alpha_1) * (1 - \\alpha_2)
where :math:`(fg_1, \\alpha_1)` is from the current sample and
:math:`(fg_2, \\alpha_2)` is the randomly loaded sample. With the above
composition, :math:`\\alpha_{new}` is still in `[0, 1]`.
Required keys are "alpha" and "fg". Modified keys are "alpha" and "fg".
Args:
fg_dirs (str | list[str]): Path of directories to load foreground
images from.
alpha_dirs (str | list[str]): Path of directories to load alpha mattes
from.
interpolation (str): Interpolation method of `mmcv.imresize` to resize
the randomly loaded images.
"""
def __init__(self,
fg_dirs,
alpha_dirs,
interpolation='nearest',
io_backend='disk',
**kwargs):
self.fg_dirs = fg_dirs if isinstance(fg_dirs, list) else [fg_dirs]
self.alpha_dirs = alpha_dirs if isinstance(alpha_dirs,
list) else [alpha_dirs]
self.interpolation = interpolation
self.fg_list, self.alpha_list = self._get_file_list(
self.fg_dirs, self.alpha_dirs)
self.io_backend = io_backend
self.file_client = None
self.kwargs = kwargs
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if self.file_client is None:
self.file_client = FileClient(self.io_backend, **self.kwargs)
fg = results['fg']
alpha = results['alpha'].astype(np.float32) / 255.
h, w = results['fg'].shape[:2]
# randomly select fg
if np.random.rand() < 0.5:
idx = np.random.randint(len(self.fg_list))
fg2_bytes = self.file_client.get(self.fg_list[idx])
fg2 = mmcv.imfrombytes(fg2_bytes)
alpha2_bytes = self.file_client.get(self.alpha_list[idx])
alpha2 = mmcv.imfrombytes(alpha2_bytes, flag='grayscale')
alpha2 = alpha2.astype(np.float32) / 255.
fg2 = mmcv.imresize(fg2, (w, h), interpolation=self.interpolation)
alpha2 = mmcv.imresize(
alpha2, (w, h), interpolation=self.interpolation)
# the overlap of two 50% transparency will be 75%
alpha_tmp = 1 - (1 - alpha) * (1 - alpha2)
# if the result alpha is all-one, then we avoid composition
if np.any(alpha_tmp < 1):
# composite fg with fg2
fg = fg.astype(np.float32) * alpha[..., None] \
+ fg2.astype(np.float32) * (1 - alpha[..., None])
alpha = alpha_tmp
fg.astype(np.uint8)
results['fg'] = fg
results['alpha'] = (alpha * 255).astype(np.uint8)
return results
@staticmethod
def _get_file_list(fg_dirs, alpha_dirs):
all_fg_list = list()
all_alpha_list = list()
for fg_dir, alpha_dir in zip(fg_dirs, alpha_dirs):
fg_list = sorted(mmcv.scandir(fg_dir))
alpha_list = sorted(mmcv.scandir(alpha_dir))
# we assume the file names for fg and alpha are the same
assert len(fg_list) == len(alpha_list), (
f'{fg_dir} and {alpha_dir} should have the same number of '
f'images ({len(fg_list)} differs from ({len(alpha_list)})')
fg_list = [osp.join(fg_dir, fg) for fg in fg_list]
alpha_list = [osp.join(alpha_dir, alpha) for alpha in alpha_list]
all_fg_list.extend(fg_list)
all_alpha_list.extend(alpha_list)
return all_fg_list, all_alpha_list
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(fg_dirs={self.fg_dirs}, alpha_dirs={self.alpha_dirs}, '
f"interpolation='{self.interpolation}')")
return repr_str
@PIPELINES.register_module()
class GenerateSeg:
"""Generate segmentation mask from alpha matte.
Args:
kernel_size (int, optional): Kernel size for both erosion and
dilation. The kernel will have the same height and width.
Defaults to 5.
erode_iter_range (tuple, optional): Iteration of erosion.
Defaults to (10, 20).
dilate_iter_range (tuple, optional): Iteration of dilation.
Defaults to (15, 30).
num_holes_range (tuple, optional): Range of number of holes to
randomly select from. Defaults to (0, 3).
hole_sizes (list, optional): List of (h, w) to be selected as the
size of the rectangle hole.
Defaults to [(15, 15), (25, 25), (35, 35), (45, 45)].
blur_ksizes (list, optional): List of (h, w) to be selected as the
kernel_size of the gaussian blur.
Defaults to [(21, 21), (31, 31), (41, 41)].
"""
def __init__(self,
kernel_size=5,
erode_iter_range=(10, 20),
dilate_iter_range=(15, 30),
num_holes_range=(0, 3),
hole_sizes=[(15, 15), (25, 25), (35, 35), (45, 45)],
blur_ksizes=[(21, 21), (31, 31), (41, 41)]):
self.kernel_size = kernel_size
self.erode_iter_range = erode_iter_range
self.dilate_iter_range = dilate_iter_range
self.num_holes_range = num_holes_range
self.hole_sizes = hole_sizes
self.blur_ksizes = blur_ksizes
@staticmethod
def _crop_hole(img, start_point, hole_size):
"""Create a all-zero rectangle hole in the image.
Args:
img (np.ndarray): Source image.
start_point (tuple[int]): The top-left point of the rectangle.
hole_size (tuple[int]): The height and width of the rectangle hole.
Return:
np.ndarray: The cropped image.
"""
top, left = start_point
bottom = top + hole_size[0]
right = left + hole_size[1]
height, weight = img.shape[:2]
if top < 0 or bottom > height or left < 0 or right > weight:
raise ValueError(f'crop area {(left, top, right, bottom)} exceeds '
f'image size {(height, weight)}')
img[top:bottom, left:right] = 0
return img
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
alpha = results['alpha']
trimap = results['trimap']
# generete segmentation mask
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
(self.kernel_size,
self.kernel_size))
seg = (alpha > 0.5).astype(np.float32)
seg = cv2.erode(
seg, kernel, iterations=np.random.randint(*self.erode_iter_range))
seg = cv2.dilate(
seg, kernel, iterations=np.random.randint(*self.dilate_iter_range))
# generate some holes in segmentation mask
num_holes = np.random.randint(*self.num_holes_range)
for _ in range(num_holes):
hole_size = random.choice(self.hole_sizes)
unknown = trimap == 128
start_point = random_choose_unknown(unknown, hole_size)
seg = self._crop_hole(seg, start_point, hole_size)
trimap = self._crop_hole(trimap, start_point, hole_size)
# perform gaussian blur to segmentation mask
seg = cv2.GaussianBlur(seg, random.choice(self.blur_ksizes), 0)
results['seg'] = seg.astype(np.uint8)
results['num_holes'] = num_holes
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f'(kernel_size={self.kernel_size}, '
f'erode_iter_range={self.erode_iter_range}, '
f'dilate_iter_range={self.dilate_iter_range}, '
f'num_holes_range={self.num_holes_range}, '
f'hole_sizes={self.hole_sizes}, blur_ksizes={self.blur_ksizes}')
return repr_str
@PIPELINES.register_module()
class PerturbBg:
"""Randomly add gaussian noise or gamma change to background image.
Required key is "bg", added key is "noisy_bg".
Args:
gamma_ratio (float, optional): The probability to use gamma correction
instead of gaussian noise. Defaults to 0.6.
"""
def __init__(self, gamma_ratio=0.6):
if gamma_ratio < 0 or gamma_ratio > 1:
raise ValueError('gamma_ratio must be a float between [0, 1], '
f'but got {gamma_ratio}')
self.gamma_ratio = gamma_ratio
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
if np.random.rand() >= self.gamma_ratio:
# generate gaussian noise with random gaussian N([-7, 7), [2, 6))
mu = np.random.randint(-7, 7)
sigma = np.random.randint(2, 6)
results['noisy_bg'] = add_gaussian_noise(results['bg'], mu, sigma)
else:
# adjust gamma in a range of N(1, 0.12)
gamma = np.random.normal(1, 0.12)
results['noisy_bg'] = adjust_gamma(results['bg'], gamma)
return results
def __repr__(self):
return self.__class__.__name__ + f'(gamma_ratio={self.gamma_ratio})'
@PIPELINES.register_module()
class GenerateSoftSeg:
"""Generate soft segmentation mask from input segmentation mask.
Required key is "seg", added key is "soft_seg".
Args:
fg_thr (float, optional): Threshold of the foreground in the normalized
input segmentation mask. Defaults to 0.2.
border_width (int, optional): Width of border to be padded to the
bottom of the mask. Defaults to 25.
erode_ksize (int, optional): Fixed kernel size of the erosion.
Defaults to 5.
dilate_ksize (int, optional): Fixed kernel size of the dilation.
Defaults to 5.
erode_iter_range (tuple, optional): Iteration of erosion.
Defaults to (10, 20).
dilate_iter_range (tuple, optional): Iteration of dilation.
Defaults to (3, 7).
blur_ksizes (list, optional): List of (h, w) to be selected as the
kernel_size of the gaussian blur.
Defaults to [(21, 21), (31, 31), (41, 41)].
"""
def __init__(self,
fg_thr=0.2,
border_width=25,
erode_ksize=3,
dilate_ksize=5,
erode_iter_range=(10, 20),
dilate_iter_range=(3, 7),
blur_ksizes=[(21, 21), (31, 31), (41, 41)]):
if not isinstance(fg_thr, float):
raise TypeError(f'fg_thr must be a float, but got {type(fg_thr)}')
if not isinstance(border_width, int):
raise TypeError(
f'border_width must be an int, but got {type(border_width)}')
if not isinstance(erode_ksize, int):
raise TypeError(
f'erode_ksize must be an int, but got {type(erode_ksize)}')
if not isinstance(dilate_ksize, int):
raise TypeError(
f'dilate_ksize must be an int, but got {type(dilate_ksize)}')
if (not mmcv.is_tuple_of(erode_iter_range, int)
or len(erode_iter_range) != 2):
raise TypeError('erode_iter_range must be a tuple of 2 int, '
f'but got {erode_iter_range}')
if (not mmcv.is_tuple_of(dilate_iter_range, int)
or len(dilate_iter_range) != 2):
raise TypeError('dilate_iter_range must be a tuple of 2 int, '
f'but got {dilate_iter_range}')
if not mmcv.is_list_of(blur_ksizes, tuple):
raise TypeError(
f'blur_ksizes must be a list of tuple, but got {blur_ksizes}')
self.fg_thr = fg_thr
self.border_width = border_width
self.erode_ksize = erode_ksize
self.dilate_ksize = dilate_ksize
self.erode_iter_range = erode_iter_range
self.dilate_iter_range = dilate_iter_range
self.blur_ksizes = blur_ksizes
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
seg = results['seg'].astype(np.float32) / 255
height, _ = seg.shape[:2]
seg[seg > self.fg_thr] = 1
# to align with the original repo, pad the bottom of the mask
seg = cv2.copyMakeBorder(seg, 0, self.border_width, 0, 0,
cv2.BORDER_REPLICATE)
# erode/dilate segmentation mask
erode_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
(self.erode_ksize,
self.erode_ksize))
dilate_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
(self.dilate_ksize,
self.dilate_ksize))
seg = cv2.erode(
seg,
erode_kernel,
iterations=np.random.randint(*self.erode_iter_range))
seg = cv2.dilate(
seg,
dilate_kernel,
iterations=np.random.randint(*self.dilate_iter_range))
# perform gaussian blur to segmentation mask
seg = cv2.GaussianBlur(seg, random.choice(self.blur_ksizes), 0)
# remove the padded rows
seg = (seg * 255).astype(np.uint8)
seg = np.delete(seg, range(height, height + self.border_width), 0)
results['soft_seg'] = seg
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(fg_thr={self.fg_thr}, '
f'border_width={self.border_width}, '
f'erode_ksize={self.erode_ksize}, '
f'dilate_ksize={self.dilate_ksize}, '
f'erode_iter_range={self.erode_iter_range}, '
f'dilate_iter_range={self.dilate_iter_range}, '
f'blur_ksizes={self.blur_ksizes})')
return repr_str
@PIPELINES.register_module()
class TransformTrimap:
"""Transform trimap into two-channel and six-channel.
This class will generate a two-channel trimap composed of definite
foreground and background masks and encode it into a six-channel trimap
using Gaussian blurs of the generated two-channel trimap at three
different scales. The transformed trimap has 6 channels.
Required key is "trimap", added key is "transformed_trimap" and
"two_channel_trimap".
Adopted from the following repository:
https://github.com/MarcoForte/FBA_Matting/blob/master/networks/transforms.py.
"""
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
trimap = results['trimap']
assert len(trimap.shape) == 2
h, w = trimap.shape[:2]
# generate two-channel trimap
trimap2 = np.zeros((h, w, 2), dtype=np.uint8)
trimap2[trimap == 0, 0] = 255
trimap2[trimap == 255, 1] = 255
trimap_trans = np.zeros((h, w, 6), dtype=np.float32)
factor = np.array([[[0.02, 0.08, 0.16]]], dtype=np.float32)
for k in range(2):
if np.any(trimap2[:, :, k]):
dt_mask = -cv2.distanceTransform(255 - trimap2[:, :, k],
cv2.DIST_L2, 0)**2
dt_mask = dt_mask[..., None]
L = 320
trimap_trans[..., 3 * k:3 * k +
3] = np.exp(dt_mask / (2 * ((factor * L)**2)))
results['transformed_trimap'] = trimap_trans
results['two_channel_trimap'] = trimap2
return results
def __repr__(self):
repr_str = self.__class__.__name__
return repr_str
| 24,538 | 37.766193 | 81 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/compose.py | from collections.abc import Sequence
from mmcv.utils import build_from_cfg
from ..registry import PIPELINES
@PIPELINES.register_module()
class Compose:
"""Compose a data pipeline with a sequence of transforms.
Args:
transforms (list[dict | callable]):
Either config dicts of transforms or transform objects.
"""
def __init__(self, transforms):
assert isinstance(transforms, Sequence)
self.transforms = []
for transform in transforms:
if isinstance(transform, dict):
transform = build_from_cfg(transform, PIPELINES)
self.transforms.append(transform)
elif callable(transform):
self.transforms.append(transform)
else:
raise TypeError(f'transform must be callable or a dict, '
f'but got {type(transform)}')
def __call__(self, data):
"""Call function.
Args:
data (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for t in self.transforms:
data = t(data)
if data is None:
return None
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += f' {t}'
format_string += '\n)'
return format_string
| 1,620 | 29.018519 | 73 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/random_degradations.py | import io
import logging
import random
import cv2
import numpy as np
from mmedit.datasets.pipelines import blur_kernels as blur_kernels
from ..registry import PIPELINES
try:
import av
has_av = True
except ImportError:
has_av = False
@PIPELINES.register_module()
class RandomBlur:
"""Apply random blur to the input.
Modified keys are the attributed specified in "keys".
Args:
params (dict): A dictionary specifying the degradation settings.
keys (list[str]): A list specifying the keys whose values are
modified.
"""
def __init__(self, params, keys):
self.keys = keys
self.params = params
def get_kernel(self, num_kernels):
kernel_type = np.random.choice(
self.params['kernel_list'], p=self.params['kernel_prob'])
kernel_size = random.choice(self.params['kernel_size'])
sigma_x_range = self.params.get('sigma_x', [0, 0])
sigma_x = np.random.uniform(sigma_x_range[0], sigma_x_range[1])
sigma_x_step = self.params.get('sigma_x_step', 0)
sigma_y_range = self.params.get('sigma_y', [0, 0])
sigma_y = np.random.uniform(sigma_y_range[0], sigma_y_range[1])
sigma_y_step = self.params.get('sigma_y_step', 0)
rotate_angle_range = self.params.get('rotate_angle', [-np.pi, np.pi])
rotate_angle = np.random.uniform(rotate_angle_range[0],
rotate_angle_range[1])
rotate_angle_step = self.params.get('rotate_angle_step', 0)
beta_gau_range = self.params.get('beta_gaussian', [0.5, 4])
beta_gau = np.random.uniform(beta_gau_range[0], beta_gau_range[1])
beta_gau_step = self.params.get('beta_gaussian_step', 0)
beta_pla_range = self.params.get('beta_plateau', [1, 2])
beta_pla = np.random.uniform(beta_pla_range[0], beta_pla_range[1])
beta_pla_step = self.params.get('beta_plateau_step', 0)
omega_range = self.params.get('omega', None)
omega_step = self.params.get('omega_step', 0)
if omega_range is None: # follow Real-ESRGAN settings if not specified
if kernel_size < 13:
omega_range = [np.pi / 3., np.pi]
else:
omega_range = [np.pi / 5., np.pi]
omega = np.random.uniform(omega_range[0], omega_range[1])
# determine blurring kernel
kernels = []
for _ in range(0, num_kernels):
kernel = blur_kernels.random_mixed_kernels(
[kernel_type],
[1],
kernel_size,
[sigma_x, sigma_x],
[sigma_y, sigma_y],
[rotate_angle, rotate_angle],
[beta_gau, beta_gau],
[beta_pla, beta_pla],
[omega, omega],
None,
)
kernels.append(kernel)
# update kernel parameters
sigma_x += np.random.uniform(-sigma_x_step, sigma_x_step)
sigma_y += np.random.uniform(-sigma_y_step, sigma_y_step)
rotate_angle += np.random.uniform(-rotate_angle_step,
rotate_angle_step)
beta_gau += np.random.uniform(-beta_gau_step, beta_gau_step)
beta_pla += np.random.uniform(-beta_pla_step, beta_pla_step)
omega += np.random.uniform(-omega_step, omega_step)
sigma_x = np.clip(sigma_x, sigma_x_range[0], sigma_x_range[1])
sigma_y = np.clip(sigma_y, sigma_y_range[0], sigma_y_range[1])
rotate_angle = np.clip(rotate_angle, rotate_angle_range[0],
rotate_angle_range[1])
beta_gau = np.clip(beta_gau, beta_gau_range[0], beta_gau_range[1])
beta_pla = np.clip(beta_pla, beta_pla_range[0], beta_pla_range[1])
omega = np.clip(omega, omega_range[0], omega_range[1])
return kernels
def _apply_random_blur(self, imgs):
is_single_image = False
if isinstance(imgs, np.ndarray):
is_single_image = True
imgs = [imgs]
# get kernel and blur the input
kernels = self.get_kernel(num_kernels=len(imgs))
imgs = [
cv2.filter2D(img, -1, kernel)
for img, kernel in zip(imgs, kernels)
]
if is_single_image:
imgs = imgs[0]
return imgs
def __call__(self, results):
if np.random.uniform() > self.params.get('prob', 1):
return results
for key in self.keys:
results[key] = self._apply_random_blur(results[key])
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(params={self.params}, keys={self.keys})')
return repr_str
@PIPELINES.register_module()
class RandomResize:
"""Randomly resize the input.
Modified keys are the attributed specified in "keys".
Args:
params (dict): A dictionary specifying the degradation settings.
keys (list[str]): A list specifying the keys whose values are
modified.
"""
def __init__(self, params, keys):
self.keys = keys
self.params = params
self.resize_dict = dict(
bilinear=cv2.INTER_LINEAR,
bicubic=cv2.INTER_CUBIC,
area=cv2.INTER_AREA,
lanczos=cv2.INTER_LANCZOS4)
def _random_resize(self, imgs):
is_single_image = False
if isinstance(imgs, np.ndarray):
is_single_image = True
imgs = [imgs]
h, w = imgs[0].shape[:2]
resize_opt = self.params['resize_opt']
resize_prob = self.params['resize_prob']
resize_opt = np.random.choice(resize_opt, p=resize_prob).lower()
if resize_opt not in self.resize_dict:
raise NotImplementedError(f'resize_opt [{resize_opt}] is not '
'implemented')
resize_opt = self.resize_dict[resize_opt]
resize_step = self.params.get('resize_step', 0)
# determine the target size, if not provided
target_size = self.params.get('target_size', None)
if target_size is None:
resize_mode = np.random.choice(['up', 'down', 'keep'],
p=self.params['resize_mode_prob'])
resize_scale = self.params['resize_scale']
if resize_mode == 'up':
scale_factor = np.random.uniform(1, resize_scale[1])
elif resize_mode == 'down':
scale_factor = np.random.uniform(resize_scale[0], 1)
else:
scale_factor = 1
# determine output size
h_out, w_out = h * scale_factor, w * scale_factor
if self.params.get('is_size_even', False):
h_out, w_out = 2 * (h_out // 2), 2 * (w_out // 2)
target_size = (int(h_out), int(w_out))
else:
resize_step = 0
# resize the input
if resize_step == 0: # same target_size for all input images
outputs = [
cv2.resize(img, target_size[::-1], interpolation=resize_opt)
for img in imgs
]
else: # different target_size for each input image
outputs = []
for img in imgs:
img = cv2.resize(
img, target_size[::-1], interpolation=resize_opt)
outputs.append(img)
# update scale
scale_factor += np.random.uniform(-resize_step, resize_step)
scale_factor = np.clip(scale_factor, resize_scale[0],
resize_scale[1])
# determine output size
h_out, w_out = h * scale_factor, w * scale_factor
if self.params.get('is_size_even', False):
h_out, w_out = 2 * (h_out // 2), 2 * (w_out // 2)
target_size = (int(h_out), int(w_out))
if is_single_image:
outputs = outputs[0]
return outputs
def __call__(self, results):
if np.random.uniform() > self.params.get('prob', 1):
return results
for key in self.keys:
results[key] = self._random_resize(results[key])
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(params={self.params}, keys={self.keys})')
return repr_str
@PIPELINES.register_module()
class RandomNoise:
"""Apply random noise to the input.
Currently support Gaussian noise and Poisson noise.
Modified keys are the attributed specified in "keys".
Args:
params (dict): A dictionary specifying the degradation settings.
keys (list[str]): A list specifying the keys whose values are
modified.
"""
def __init__(self, params, keys):
self.keys = keys
self.params = params
def _apply_gaussian_noise(self, imgs):
sigma_range = self.params['gaussian_sigma']
sigma = np.random.uniform(sigma_range[0], sigma_range[1]) / 255.
sigma_step = self.params.get('gaussian_sigma_step', 0)
gray_noise_prob = self.params['gaussian_gray_noise_prob']
is_gray_noise = np.random.uniform() < gray_noise_prob
outputs = []
for img in imgs:
noise = np.float32(np.random.randn(*(img.shape))) * sigma
if is_gray_noise:
noise = noise[:, :, :1]
outputs.append(img + noise)
# update noise level
sigma += np.random.uniform(-sigma_step, sigma_step) / 255.
sigma = np.clip(sigma, sigma_range[0] / 255.,
sigma_range[1] / 255.)
return outputs
def _apply_poisson_noise(self, imgs):
scale_range = self.params['poisson_scale']
scale = np.random.uniform(scale_range[0], scale_range[1])
scale_step = self.params.get('poisson_scale_step', 0)
gray_noise_prob = self.params['poisson_gray_noise_prob']
is_gray_noise = np.random.uniform() < gray_noise_prob
outputs = []
for img in imgs:
noise = img.copy()
if is_gray_noise:
noise = cv2.cvtColor(noise[..., [2, 1, 0]], cv2.COLOR_BGR2GRAY)
noise = noise[..., np.newaxis]
noise = np.clip((noise * 255.0).round(), 0, 255) / 255.
unique_val = 2**np.ceil(np.log2(len(np.unique(noise))))
noise = np.random.poisson(noise * unique_val) / unique_val - noise
outputs.append(img + noise * scale)
# update noise level
scale += np.random.uniform(-scale_step, scale_step)
scale = np.clip(scale, scale_range[0], scale_range[1])
return outputs
def _apply_random_noise(self, imgs):
noise_type = np.random.choice(
self.params['noise_type'], p=self.params['noise_prob'])
is_single_image = False
if isinstance(imgs, np.ndarray):
is_single_image = True
imgs = [imgs]
if noise_type.lower() == 'gaussian':
imgs = self._apply_gaussian_noise(imgs)
elif noise_type.lower() == 'poisson':
imgs = self._apply_poisson_noise(imgs)
else:
raise NotImplementedError(f'"noise_type" [{noise_type}] is '
'not implemented.')
if is_single_image:
imgs = imgs[0]
return imgs
def __call__(self, results):
if np.random.uniform() > self.params.get('prob', 1):
return results
for key in self.keys:
results[key] = self._apply_random_noise(results[key])
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(params={self.params}, keys={self.keys})')
return repr_str
@PIPELINES.register_module()
class RandomJPEGCompression:
"""Apply random JPEG compression to the input.
Modified keys are the attributed specified in "keys".
Args:
params (dict): A dictionary specifying the degradation settings.
keys (list[str]): A list specifying the keys whose values are
modified.
"""
def __init__(self, params, keys):
self.keys = keys
self.params = params
def _apply_random_compression(self, imgs):
is_single_image = False
if isinstance(imgs, np.ndarray):
is_single_image = True
imgs = [imgs]
# determine initial compression level and the step size
quality = self.params['quality']
quality_step = self.params.get('quality_step', 0)
jpeg_param = round(np.random.uniform(quality[0], quality[1]))
# apply jpeg compression
outputs = []
for img in imgs:
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), jpeg_param]
_, img_encoded = cv2.imencode('.jpg', img * 255., encode_param)
outputs.append(np.float32(cv2.imdecode(img_encoded, 1)) / 255.)
# update compression level
jpeg_param += np.random.uniform(-quality_step, quality_step)
jpeg_param = round(np.clip(jpeg_param, quality[0], quality[1]))
if is_single_image:
outputs = outputs[0]
return outputs
def __call__(self, results):
if np.random.uniform() > self.params.get('prob', 1):
return results
for key in self.keys:
results[key] = self._apply_random_compression(results[key])
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(params={self.params}, keys={self.keys})')
return repr_str
@PIPELINES.register_module()
class RandomVideoCompression:
"""Apply random video compression to the input.
Modified keys are the attributed specified in "keys".
Args:
params (dict): A dictionary specifying the degradation settings.
keys (list[str]): A list specifying the keys whose values are
modified.
"""
def __init__(self, params, keys):
assert has_av, 'Please install av to use video compression.'
self.keys = keys
self.params = params
logging.getLogger('libav').setLevel(50)
def _apply_random_compression(self, imgs):
codec = random.choices(self.params['codec'],
self.params['codec_prob'])[0]
bitrate = self.params['bitrate']
bitrate = np.random.randint(bitrate[0], bitrate[1] + 1)
buf = io.BytesIO()
with av.open(buf, 'w', 'mp4') as container:
stream = container.add_stream(codec, rate=1)
stream.height = imgs[0].shape[0]
stream.width = imgs[0].shape[1]
stream.pix_fmt = 'yuv420p'
stream.bit_rate = bitrate
for img in imgs:
img = (255 * img).astype(np.uint8)
frame = av.VideoFrame.from_ndarray(img, format='rgb24')
frame.pict_type = 'NONE'
for packet in stream.encode(frame):
container.mux(packet)
# Flush stream
for packet in stream.encode():
container.mux(packet)
outputs = []
with av.open(buf, 'r', 'mp4') as container:
if container.streams.video:
for frame in container.decode(**{'video': 0}):
outputs.append(
frame.to_rgb().to_ndarray().astype(np.float32) / 255.)
return outputs
def __call__(self, results):
if np.random.uniform() > self.params.get('prob', 1):
return results
for key in self.keys:
results[key] = self._apply_random_compression(results[key])
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(params={self.params}, keys={self.keys})')
return repr_str
allowed_degradations = {
'RandomBlur': RandomBlur,
'RandomResize': RandomResize,
'RandomNoise': RandomNoise,
'RandomJPEGCompression': RandomJPEGCompression,
'RandomVideoCompression': RandomVideoCompression,
}
@PIPELINES.register_module()
class DegradationsWithShuffle:
"""Apply random degradations to input, with degradations being shuffled.
Degradation groups are supported. The order of degradations within the same
group is preserved. For example, if we have degradations = [a, b, [c, d]]
and shuffle_idx = None, then the possible orders are
::
[a, b, [c, d]]
[a, [c, d], b]
[b, a, [c, d]]
[b, [c, d], a]
[[c, d], a, b]
[[c, d], b, a]
Modified keys are the attributed specified in "keys".
Args:
degradations (list[dict]): The list of degradations.
keys (list[str]): A list specifying the keys whose values are
modified.
shuffle_idx (list | None, optional): The degradations corresponding to
these indices are shuffled. If None, all degradations are shuffled.
"""
def __init__(self, degradations, keys, shuffle_idx=None):
self.keys = keys
self.degradations = self._build_degradations(degradations)
if shuffle_idx is None:
self.shuffle_idx = list(range(0, len(degradations)))
else:
self.shuffle_idx = shuffle_idx
def _build_degradations(self, degradations):
for i, degradation in enumerate(degradations):
if isinstance(degradation, (list, tuple)):
degradations[i] = self._build_degradations(degradation)
else:
degradation_ = allowed_degradations[degradation['type']]
degradations[i] = degradation_(degradation['params'],
self.keys)
return degradations
def __call__(self, results):
# shuffle degradations
if len(self.shuffle_idx) > 0:
shuffle_list = [self.degradations[i] for i in self.shuffle_idx]
np.random.shuffle(shuffle_list)
for i, idx in enumerate(self.shuffle_idx):
self.degradations[idx] = shuffle_list[i]
# apply degradations to input
for degradation in self.degradations:
if isinstance(degradation, (tuple, list)):
for subdegrdation in degradation:
results = subdegrdation(results)
else:
results = degradation(results)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(degradations={self.degradations}, '
f'keys={self.keys}, '
f'shuffle_idx={self.shuffle_idx})')
return repr_str
| 18,941 | 33.007181 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/utils.py | import logging
import numpy as np
import torch
from mmcv.utils import print_log
_integer_types = (
np.byte,
np.ubyte, # 8 bits
np.short,
np.ushort, # 16 bits
np.intc,
np.uintc, # 16 or 32 or 64 bits
np.int_,
np.uint, # 32 or 64 bits
np.longlong,
np.ulonglong) # 64 bits
_integer_ranges = {
t: (np.iinfo(t).min, np.iinfo(t).max)
for t in _integer_types
}
dtype_range = {
np.bool_: (False, True),
np.bool8: (False, True),
np.float16: (-1, 1),
np.float32: (-1, 1),
np.float64: (-1, 1)
}
dtype_range.update(_integer_ranges)
def dtype_limits(image, clip_negative=False):
"""Return intensity limits, i.e. (min, max) tuple, of the image's dtype.
This function is adopted from skimage:
https://github.com/scikit-image/scikit-image/blob/
7e4840bd9439d1dfb6beaf549998452c99f97fdd/skimage/util/dtype.py#L35
Args:
image (ndarray): Input image.
clip_negative (bool, optional): If True, clip the negative range
(i.e. return 0 for min intensity) even if the image dtype allows
negative values.
Returns
tuple: Lower and upper intensity limits.
"""
imin, imax = dtype_range[image.dtype.type]
if clip_negative:
imin = 0
return imin, imax
def adjust_gamma(image, gamma=1, gain=1):
"""Performs Gamma Correction on the input image.
This function is adopted from skimage:
https://github.com/scikit-image/scikit-image/blob/
7e4840bd9439d1dfb6beaf549998452c99f97fdd/skimage/exposure/
exposure.py#L439-L494
Also known as Power Law Transform.
This function transforms the input image pixelwise according to the
equation ``O = I**gamma`` after scaling each pixel to the range 0 to 1.
Args:
image (ndarray): Input image.
gamma (float, optional): Non negative real number. Defaults to 1.
gain (float, optional): The constant multiplier. Defaults to 1.
Returns:
ndarray: Gamma corrected output image.
"""
if np.any(image < 0):
raise ValueError('Image Correction methods work correctly only on '
'images with non-negative values. Use '
'skimage.exposure.rescale_intensity.')
dtype = image.dtype.type
if gamma < 0:
raise ValueError('Gamma should be a non-negative real number.')
scale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0])
out = ((image / scale)**gamma) * scale * gain
return out.astype(dtype)
def random_choose_unknown(unknown, crop_size):
"""Randomly choose an unknown start (top-left) point for a given crop_size.
Args:
unknown (np.ndarray): The binary unknown mask.
crop_size (tuple[int]): The given crop size.
Returns:
tuple[int]: The top-left point of the chosen bbox.
"""
h, w = unknown.shape
crop_h, crop_w = crop_size
delta_h = center_h = crop_h // 2
delta_w = center_w = crop_w // 2
# mask out the validate area for selecting the cropping center
mask = np.zeros_like(unknown)
mask[delta_h:h - delta_h, delta_w:w - delta_w] = 1
if np.any(unknown & mask):
center_h_list, center_w_list = np.where(unknown & mask)
elif np.any(unknown):
center_h_list, center_w_list = np.where(unknown)
else:
print_log('No unknown pixels found!', level=logging.WARNING)
center_h_list = [center_h]
center_w_list = [center_w]
num_unknowns = len(center_h_list)
rand_ind = np.random.randint(num_unknowns)
center_h = center_h_list[rand_ind]
center_w = center_w_list[rand_ind]
# make sure the top-left point is valid
top = np.clip(center_h - delta_h, 0, h - crop_h)
left = np.clip(center_w - delta_w, 0, w - crop_w)
return top, left
def make_coord(shape, ranges=None, flatten=True):
""" Make coordinates at grid centers.
Args:
shape (tuple): shape of image.
ranges (tuple): range of coordinate value. Default: None.
flatten (bool): flatten to (n, 2) or Not. Default: True.
return:
coord (Tensor): coordinates.
"""
coord_seqs = []
for i, n in enumerate(shape):
if ranges is None:
v0, v1 = -1, 1
else:
v0, v1 = ranges[i]
r = (v1 - v0) / (2 * n)
seq = v0 + r + (2 * r) * torch.arange(n).float()
coord_seqs.append(seq)
coord = torch.stack(torch.meshgrid(*coord_seqs), dim=-1)
if flatten:
coord = coord.view(-1, coord.shape[-1])
return coord
| 4,623 | 28.832258 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/random_down_sampling.py | import math
import numpy as np
import torch
from mmcv import imresize
from ..registry import PIPELINES
@PIPELINES.register_module()
class RandomDownSampling:
"""Generate LQ image from GT (and crop), which will randomly pick a scale.
Args:
scale_min (float): The minimum of upsampling scale, inclusive.
Default: 1.0.
scale_max (float): The maximum of upsampling scale, exclusive.
Default: 4.0.
patch_size (int): The cropped lr patch size.
Default: None, means no crop.
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
backend, "nearest", "bilinear", "bicubic", "box", "lanczos",
"hamming" for 'pillow' backend.
Default: "bicubic".
backend (str | None): The image resize backend type. Options are `cv2`,
`pillow`, `None`. If backend is None, the global imread_backend
specified by ``mmcv.use_backend()`` will be used.
Default: "pillow".
Scale will be picked in the range of [scale_min, scale_max).
"""
def __init__(self,
scale_min=1.0,
scale_max=4.0,
patch_size=None,
interpolation='bicubic',
backend='pillow'):
assert scale_max >= scale_min
self.scale_min = scale_min
self.scale_max = scale_max
self.patch_size = patch_size
self.interpolation = interpolation
self.backend = backend
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation. 'gt' is required.
Returns:
dict: A dict containing the processed data and information.
modified 'gt', supplement 'lq' and 'scale' to keys.
"""
img = results['gt']
scale = np.random.uniform(self.scale_min, self.scale_max)
if self.patch_size is None:
h_lr = math.floor(img.shape[-3] / scale + 1e-9)
w_lr = math.floor(img.shape[-2] / scale + 1e-9)
img = img[:round(h_lr * scale), :round(w_lr * scale), :]
img_down = resize_fn(img, (w_lr, h_lr), self.interpolation,
self.backend)
crop_lr, crop_hr = img_down, img
else:
w_lr = self.patch_size
w_hr = round(w_lr * scale)
x0 = np.random.randint(0, img.shape[-3] - w_hr)
y0 = np.random.randint(0, img.shape[-2] - w_hr)
crop_hr = img[x0:x0 + w_hr, y0:y0 + w_hr, :]
crop_lr = resize_fn(crop_hr, w_lr, self.interpolation,
self.backend)
results['gt'] = crop_hr
results['lq'] = crop_lr
results['scale'] = scale
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f' scale_min={self.scale_min}, '
f'scale_max={self.scale_max}, '
f'patch_size={self.patch_size}, '
f'interpolation={self.interpolation}, '
f'backend={self.backend}')
return repr_str
def resize_fn(img, size, interpolation='bicubic', backend='pillow'):
"""Resize the given image to a given size.
Args:
img (ndarray | torch.Tensor): The input image.
size (int | tuple[int]): Target size w or (w, h).
interpolation (str): Interpolation method, accepted values are
"nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2'
backend, "nearest", "bilinear", "bicubic", "box", "lanczos",
"hamming" for 'pillow' backend.
Default: "bicubic".
backend (str | None): The image resize backend type. Options are `cv2`,
`pillow`, `None`. If backend is None, the global imread_backend
specified by ``mmcv.use_backend()`` will be used.
Default: "pillow".
Returns:
ndarray | torch.Tensor: `resized_img`, whose type is same as `img`.
"""
if isinstance(size, int):
size = (size, size)
if isinstance(img, np.ndarray):
return imresize(
img, size, interpolation=interpolation, backend=backend)
elif isinstance(img, torch.Tensor):
image = imresize(
img.numpy(), size, interpolation=interpolation, backend=backend)
return torch.from_numpy(image)
else:
raise TypeError('img should got np.ndarray or torch.Tensor,'
f'but got {type(img)}')
| 4,735 | 36.587302 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/formating.py | from collections.abc import Sequence
import mmcv
import numpy as np
import torch
from mmcv.parallel import DataContainer as DC
from torch.nn import functional as F
from ..registry import PIPELINES
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if isinstance(data, torch.Tensor):
return data
if isinstance(data, np.ndarray):
return torch.from_numpy(data)
if isinstance(data, Sequence) and not mmcv.is_str(data):
return torch.tensor(data)
if isinstance(data, int):
return torch.LongTensor([data])
if isinstance(data, float):
return torch.FloatTensor([data])
raise TypeError(f'type {type(data)} cannot be converted to tensor.')
@PIPELINES.register_module()
class ToTensor:
"""Convert some values in results dict to `torch.Tensor` type
in data loader pipeline.
Args:
keys (Sequence[str]): Required keys to be converted.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for key in self.keys:
results[key] = to_tensor(results[key])
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
@PIPELINES.register_module()
class ImageToTensor:
"""Convert image type to `torch.Tensor` type.
Args:
keys (Sequence[str]): Required keys to be converted.
to_float32 (bool): Whether convert numpy image array to np.float32
before converted to tensor. Default: True.
"""
def __init__(self, keys, to_float32=True):
self.keys = keys
self.to_float32 = to_float32
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for key in self.keys:
# deal with gray scale img: expand a color channel
if len(results[key].shape) == 2:
results[key] = results[key][..., None]
if self.to_float32 and not isinstance(results[key], np.float32):
results[key] = results[key].astype(np.float32)
results[key] = to_tensor(results[key].transpose(2, 0, 1))
return results
def __repr__(self):
return self.__class__.__name__ + (
f'(keys={self.keys}, to_float32={self.to_float32})')
@PIPELINES.register_module()
class FramesToTensor(ImageToTensor):
"""Convert frames type to `torch.Tensor` type.
It accepts a list of frames, converts each to `torch.Tensor` type and then
concatenates in a new dimension (dim=0).
Args:
keys (Sequence[str]): Required keys to be converted.
to_float32 (bool): Whether convert numpy image array to np.float32
before converted to tensor. Default: True.
"""
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for key in self.keys:
if not isinstance(results[key], list):
raise TypeError(f'results["{key}"] should be a list, '
f'but got {type(results[key])}')
for idx, v in enumerate(results[key]):
# deal with gray scale img: expand a color channel
if len(v.shape) == 2:
v = v[..., None]
if self.to_float32 and not isinstance(v, np.float32):
v = v.astype(np.float32)
results[key][idx] = to_tensor(v.transpose(2, 0, 1))
results[key] = torch.stack(results[key], dim=0)
if results[key].size(0) == 1:
results[key].squeeze_()
return results
@PIPELINES.register_module()
class GetMaskedImage:
"""Get masked image.
Args:
img_name (str): Key for clean image.
mask_name (str): Key for mask image. The mask shape should be
(h, w, 1) while '1' indicate holes and '0' indicate valid
regions.
"""
def __init__(self, img_name='gt_img', mask_name='mask'):
self.img_name = img_name
self.mask_name = mask_name
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
clean_img = results[self.img_name]
mask = results[self.mask_name]
masked_img = clean_img * (1. - mask)
results['masked_img'] = masked_img
return results
def __repr__(self):
return self.__class__.__name__ + (
f"(img_name='{self.img_name}', mask_name='{self.mask_name}')")
@PIPELINES.register_module()
class FormatTrimap:
"""Convert trimap (tensor) to one-hot representation.
It transforms the trimap label from (0, 128, 255) to (0, 1, 2). If
``to_onehot`` is set to True, the trimap will convert to one-hot tensor of
shape (3, H, W). Required key is "trimap", added or modified key are
"trimap" and "to_onehot".
Args:
to_onehot (bool): whether convert trimap to one-hot tensor. Default:
``False``.
"""
def __init__(self, to_onehot=False):
self.to_onehot = to_onehot
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
trimap = results['trimap'].squeeze()
trimap[trimap == 128] = 1
trimap[trimap == 255] = 2
if self.to_onehot:
trimap = F.one_hot(trimap.to(torch.long), num_classes=3)
trimap = trimap.permute(2, 0, 1)
else:
trimap = trimap[None, ...] # expand the channels dimension
results['trimap'] = trimap.float()
results['meta'].data['to_onehot'] = self.to_onehot
return results
def __repr__(self):
return self.__class__.__name__ + f'(to_onehot={self.to_onehot})'
@PIPELINES.register_module()
class Collect:
"""Collect data from the loader relevant to the specific task.
This is usually the last stage of the data loader pipeline. Typically keys
is set to some subset of "img", "gt_labels".
The "img_meta" item is always populated. The contents of the "meta"
dictionary depends on "meta_keys".
Args:
keys (Sequence[str]): Required keys to be collected.
meta_keys (Sequence[str]): Required keys to be collected to "meta".
Default: None.
"""
def __init__(self, keys, meta_keys=None):
self.keys = keys
self.meta_keys = meta_keys
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
data = {}
img_meta = {}
for key in self.meta_keys:
img_meta[key] = results[key]
data['meta'] = DC(img_meta, cpu_only=True)
for key in self.keys:
data[key] = results[key]
return data
def __repr__(self):
return self.__class__.__name__ + (
f'(keys={self.keys}, meta_keys={self.meta_keys})')
| 8,262 | 30.299242 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/matlab_like_resize.py | # This code is referenced from matlab_imresize with modifications
import numpy as np
from ..registry import PIPELINES
def get_size_from_scale(input_size, scale_factor):
"""Get the output size given input size and scale factor.
Args:
input_size (tuple): The size of the input image.
scale_factor (float): The resize factor.
Returns:
list[int]: The size of the output image.
"""
output_shape = [
int(np.ceil(scale * shape))
for (scale, shape) in zip(scale_factor, input_size)
]
return output_shape
def get_scale_from_size(input_size, output_size):
"""Get the scale factor given input size and output size.
Args:
input_size (tuple(int)): The size of the input image.
output_size (tuple(int)): The size of the output image.
Returns:
list[float]: The scale factor of each dimension.
"""
scale = [
1.0 * output_shape / input_shape
for (input_shape, output_shape) in zip(input_size, output_size)
]
return scale
def _cubic(x):
""" Cubic function.
Args:
x (ndarray): The distance from the center position.
Returns:
ndarray: The weight corresponding to a particular distance.
"""
x = np.array(x, dtype=np.float32)
x_abs = np.abs(x)
x_abs_sq = x_abs**2
x_abs_cu = x_abs_sq * x_abs
# if |x| <= 1: y = 1.5|x|^3 - 2.5|x|^2 + 1
# if 1 < |x| <= 2: -0.5|x|^3 + 2.5|x|^2 - 4|x| + 2
f = (1.5 * x_abs_cu - 2.5 * x_abs_sq + 1) * (x_abs <= 1) + (
-0.5 * x_abs_cu + 2.5 * x_abs_sq - 4 * x_abs + 2) * ((1 < x_abs) &
(x_abs <= 2))
return f
def get_weights_indices(input_length, output_length, scale, kernel,
kernel_width):
"""Get weights and indices for interpolation.
Args:
input_length (int): Length of the input sequence.
output_length (int): Length of the output sequence.
scale (float): Scale factor.
kernel (func): The kernel used for resizing.
kernel_width (int): The width of the kernel.
Returns:
list[ndarray]: The weights and the indices for interpolation.
"""
if scale < 1: # modified kernel for antialiasing
def h(x):
return scale * kernel(scale * x)
kernel_width = 1.0 * kernel_width / scale
else:
h = kernel
kernel_width = kernel_width
# coordinates of output
x = np.arange(1, output_length + 1).astype(np.float32)
# coordinates of input
u = x / scale + 0.5 * (1 - 1 / scale)
left = np.floor(u - kernel_width / 2) # leftmost pixel
p = int(np.ceil(kernel_width)) + 2 # maximum number of pixels
# indices of input pixels
ind = left[:, np.newaxis, ...] + np.arange(p)
indices = ind.astype(np.int32)
# weights of input pixels
weights = h(u[:, np.newaxis, ...] - indices - 1)
weights = weights / np.sum(weights, axis=1)[:, np.newaxis, ...]
# remove all-zero columns
aux = np.concatenate(
(np.arange(input_length), np.arange(input_length - 1, -1,
step=-1))).astype(np.int32)
indices = aux[np.mod(indices, aux.size)]
ind2store = np.nonzero(np.any(weights, axis=0))
weights = weights[:, ind2store]
indices = indices[:, ind2store]
return weights, indices
def resize_along_dim(img_in, weights, indices, dim):
"""Resize along a specific dimension.
Args:
img_in (ndarray): The input image.
weights (ndarray): The weights used for interpolation, computed from
[get_weights_indices].
indices (ndarray): The indices used for interpolation, computed from
[get_weights_indices].
dim (int): Which dimension to undergo interpolation.
Returns:
ndarray: Interpolated (along one dimension) image.
"""
img_in = img_in.astype(np.float32)
w_shape = weights.shape
output_shape = list(img_in.shape)
output_shape[dim] = w_shape[0]
img_out = np.zeros(output_shape)
if dim == 0:
for i in range(w_shape[0]):
w = weights[i, :][np.newaxis, ...]
ind = indices[i, :]
img_slice = img_in[ind, :]
img_out[i] = np.sum(np.squeeze(img_slice, axis=0) * w.T, axis=0)
elif dim == 1:
for i in range(w_shape[0]):
w = weights[i, :][:, :, np.newaxis]
ind = indices[i, :]
img_slice = img_in[:, ind]
img_out[:, i] = np.sum(np.squeeze(img_slice, axis=1) * w.T, axis=1)
if img_in.dtype == np.uint8:
img_out = np.clip(img_out, 0, 255)
return np.around(img_out).astype(np.uint8)
else:
return img_out
@PIPELINES.register_module()
class MATLABLikeResize:
"""Resize the input image using MATLAB-like downsampling.
Currently support bicubic interpolation only. Note that the output of
this function is slightly different from the official MATLAB function.
Required keys are the keys in attribute "keys". Added or modified keys
are "scale" and "output_shape", and the keys in attribute "keys".
Args:
keys (list[str]): A list of keys whose values are modified.
scale (float | None, optional): The scale factor of the resize
operation. If None, it will be determined by output_shape.
Default: None.
output_shape (tuple(int) | None, optional): The size of the output
image. If None, it will be determined by scale. Note that if
scale is provided, output_shape will not be used.
Default: None.
kernel (str, optional): The kernel for the resize operation.
Currently support 'bicubic' only. Default: 'bicubic'.
kernel_width (float): The kernel width. Currently support 4.0 only.
Default: 4.0.
"""
def __init__(self,
keys,
scale=None,
output_shape=None,
kernel='bicubic',
kernel_width=4.0):
if kernel.lower() != 'bicubic':
raise ValueError('Currently support bicubic kernel only.')
if float(kernel_width) != 4.0:
raise ValueError('Current support only width=4 only.')
if scale is None and output_shape is None:
raise ValueError('"scale" and "output_shape" cannot be both None')
self.kernel_func = _cubic
self.keys = keys
self.scale = scale
self.output_shape = output_shape
self.kernel = kernel
self.kernel_width = kernel_width
def _resize(self, img):
weights = {}
indices = {}
# compute scale and output_size
if self.scale is not None:
scale = float(self.scale)
scale = [scale, scale]
output_size = get_size_from_scale(img.shape, scale)
else:
scale = get_scale_from_size(img.shape, self.output_shape)
output_size = list(self.output_shape)
# apply cubic interpolation along two dimensions
order = np.argsort(np.array(scale))
for k in range(2):
key = (img.shape[k], output_size[k], scale[k], self.kernel_func,
self.kernel_width)
weight, index = get_weights_indices(img.shape[k], output_size[k],
scale[k], self.kernel_func,
self.kernel_width)
weights[key] = weight
indices[key] = index
output = np.copy(img)
if output.ndim == 2: # grayscale image
output = output[:, :, np.newaxis]
for k in range(2):
dim = order[k]
key = (img.shape[dim], output_size[dim], scale[dim],
self.kernel_func, self.kernel_width)
output = resize_along_dim(output, weights[key], indices[key], dim)
return output
def __call__(self, results):
for key in self.keys:
is_single_image = False
if isinstance(results[key], np.ndarray):
is_single_image = True
results[key] = [results[key]]
results[key] = [self._resize(img) for img in results[key]]
if is_single_image:
results[key] = results[key][0]
results['scale'] = self.scale
results['output_shape'] = self.output_shape
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (
f'(keys={self.keys}, scale={self.scale}, '
f'output_shape={self.output_shape}, '
f'kernel={self.kernel}, kernel_width={self.kernel_width})')
return repr_str
| 9,022 | 31.692029 | 88 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/generate_assistant.py | import numpy as np
import torch
from ..registry import PIPELINES
from .utils import make_coord
@PIPELINES.register_module()
class GenerateHeatmap:
"""Generate heatmap from keypoint.
Args:
keypoint (str): Key of keypoint in dict.
ori_size (int | Tuple[int]): Original image size of keypoint.
target_size (int | Tuple[int]): Target size of heatmap.
sigma (float): Sigma parameter of heatmap. Default: 1.0
"""
def __init__(self, keypoint, ori_size, target_size, sigma=1.0):
if isinstance(ori_size, int):
ori_size = (ori_size, ori_size)
else:
ori_size = ori_size[:2]
if isinstance(target_size, int):
target_size = (target_size, target_size)
else:
target_size = target_size[:2]
self.size_ratio = (target_size[0] / ori_size[0],
target_size[1] / ori_size[1])
self.keypoint = keypoint
self.sigma = sigma
self.target_size = target_size
self.ori_size = ori_size
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation. Require keypoint.
Returns:
dict: A dict containing the processed data and information.
Add 'heatmap'.
"""
keypoint_list = [(keypoint[0] * self.size_ratio[0],
keypoint[1] * self.size_ratio[1])
for keypoint in results[self.keypoint]]
heatmap_list = [
self._generate_one_heatmap(keypoint) for keypoint in keypoint_list
]
results['heatmap'] = np.stack(heatmap_list, axis=2)
return results
def _generate_one_heatmap(self, keypoint):
"""Generate One Heatmap.
Args:
landmark (Tuple[float]): Location of a landmark.
results:
heatmap (np.ndarray): A heatmap of landmark.
"""
w, h = self.target_size
x_range = np.arange(start=0, stop=w, dtype=int)
y_range = np.arange(start=0, stop=h, dtype=int)
grid_x, grid_y = np.meshgrid(x_range, y_range)
dist2 = (grid_x - keypoint[0])**2 + (grid_y - keypoint[1])**2
exponent = dist2 / 2.0 / self.sigma / self.sigma
heatmap = np.exp(-exponent)
return heatmap
def __repr__(self):
return (f'{self.__class__.__name__}, '
f'keypoint={self.keypoint}, '
f'ori_size={self.ori_size}, '
f'target_size={self.target_size}, '
f'sigma={self.sigma}')
@PIPELINES.register_module()
class GenerateCoordinateAndCell:
"""Generate coordinate and cell.
Generate coordinate from the desired size of SR image.
Train or val:
1. Generate coordinate from GT.
2. Reshape GT image to (HgWg, 3) and transpose to (3, HgWg).
where `Hg` and `Wg` represent the height and width of GT.
Test:
Generate coordinate from LQ and scale or target_size.
Then generate cell from coordinate.
Args:
sample_quantity (int): The quantity of samples in coordinates.
To ensure that the GT tensors in a batch have the same dimensions.
Default: None.
scale (float): Scale of upsampling.
Default: None.
target_size (tuple[int]): Size of target image.
Default: None.
The priority of getting 'size of target image' is:
1, results['gt'].shape[-2:]
2, results['lq'].shape[-2:] * scale
3, target_size
"""
def __init__(self, sample_quantity=None, scale=None, target_size=None):
self.sample_quantity = sample_quantity
self.scale = scale
self.target_size = target_size
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Require either in results:
1. 'lq' (tensor), whose shape is similar as (3, H, W).
2. 'gt' (tensor), whose shape is similar as (3, H, W).
3. None, the premise is
self.target_size and len(self.target_size) >= 2.
Returns:
dict: A dict containing the processed data and information.
Reshape 'gt' to (-1, 3) and transpose to (3, -1) if 'gt'
in results.
Add 'coord' and 'cell'.
"""
# generate hr_coord (and hr_rgb)
if 'gt' in results:
crop_hr = results['gt']
self.target_size = crop_hr.shape
hr_rgb = crop_hr.contiguous().view(3, -1).permute(1, 0)
results['gt'] = hr_rgb
elif self.scale is not None and 'lq' in results:
_, h_lr, w_lr = results['lq'].shape
self.target_size = (round(h_lr * self.scale),
round(w_lr * self.scale))
else:
assert self.target_size is not None
assert len(self.target_size) >= 2
hr_coord = make_coord(self.target_size[-2:])
if self.sample_quantity is not None and 'gt' in results:
sample_lst = np.random.choice(
len(hr_coord), self.sample_quantity, replace=False)
hr_coord = hr_coord[sample_lst]
results['gt'] = results['gt'][sample_lst]
# Preparations for cell decoding
cell = torch.ones_like(hr_coord)
cell[:, 0] *= 2 / self.target_size[-2]
cell[:, 1] *= 2 / self.target_size[-1]
results['coord'] = hr_coord
results['cell'] = cell
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'sample_quantity={self.sample_quantity}, '
f'scale={self.scale}, target_size={self.target_size}')
return repr_str
| 6,056 | 34.629412 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/pipelines/normalization.py | import mmcv
import numpy as np
from ..registry import PIPELINES
@PIPELINES.register_module()
class Normalize:
"""Normalize images with the given mean and std value.
Required keys are the keys in attribute "keys", added or modified keys are
the keys in attribute "keys" and these keys with postfix '_norm_cfg'.
It also supports normalizing a list of images.
Args:
keys (Sequence[str]): The images to be normalized.
mean (np.ndarray): Mean values of different channels.
std (np.ndarray): Std values of different channels.
to_rgb (bool): Whether to convert channels from BGR to RGB.
"""
def __init__(self, keys, mean, std, to_rgb=False, save_original=False):
self.keys = keys
self.mean = np.array(mean, dtype=np.float32)
self.std = np.array(std, dtype=np.float32)
self.to_rgb = to_rgb
self.save_original = save_original
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for key in self.keys:
if isinstance(results[key], list):
if self.save_original:
results[key + '_unnormalised'] = [
v.copy() for v in results[key]
]
results[key] = [
mmcv.imnormalize(v, self.mean, self.std, self.to_rgb)
for v in results[key]
]
else:
if self.save_original:
results[key + '_unnormalised'] = results[key].copy()
results[key] = mmcv.imnormalize(results[key], self.mean,
self.std, self.to_rgb)
results['img_norm_cfg'] = dict(
mean=self.mean, std=self.std, to_rgb=self.to_rgb)
return results
def __repr__(self):
repr_str = self.__class__.__name__
repr_str += (f'(keys={self.keys}, mean={self.mean}, std={self.std}, '
f'to_rgb={self.to_rgb})')
return repr_str
@PIPELINES.register_module()
class RescaleToZeroOne:
"""Transform the images into a range between 0 and 1.
Required keys are the keys in attribute "keys", added or modified keys are
the keys in attribute "keys".
It also supports rescaling a list of images.
Args:
keys (Sequence[str]): The images to be transformed.
"""
def __init__(self, keys):
self.keys = keys
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and
data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for key in self.keys:
if isinstance(results[key], list):
results[key] = [
v.astype(np.float32) / 255. for v in results[key]
]
else:
results[key] = results[key].astype(np.float32) / 255.
return results
def __repr__(self):
return self.__class__.__name__ + f'(keys={self.keys})'
| 3,396 | 31.663462 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/utils/logger.py | import logging
from mmcv.utils import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get the root logger.
The logger will be initialized if it has not been initialized. By default a
StreamHandler will be added. If `log_file` is specified, a FileHandler will
also be added. The name of the root logger is the top-level package name,
e.g., "mmedit".
Args:
log_file (str | None): The log filename. If specified, a FileHandler
will be added to the root logger.
log_level (int): The root logger level. Note that only the process of
rank 0 is affected, while other processes will set the level to
"Error" and be silent most of the time.
Returns:
logging.Logger: The root logger.
"""
# root logger name: mmedit
logger = get_logger(__name__.split('.')[0], log_file, log_level)
return logger
| 968 | 33.607143 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/utils/cli.py | import re
import sys
import warnings
def modify_args():
for i, v in enumerate(sys.argv):
if i == 0:
assert v.endswith('.py')
elif re.match(r'--\w+_.*', v):
new_arg = v.replace('_', '-')
warnings.warn(
f'command line argument {v} is deprecated, '
f'please use {new_arg} instead.',
category=DeprecationWarning,
)
sys.argv[i] = new_arg
| 511 | 25.947368 | 60 | py |
SLOPpy | SLOPpy-main/SLOPpy_Run.py | import SLOPpy
import argparse
import os
import sys
import collections
if __name__ == '__main__':
SLOPpy.sloppy_run()
| 123 | 11.4 | 26 | py |
SLOPpy | SLOPpy-main/setup.py | from setuptools import setup
# Inspired from here:
# https://hynek.me/articles/sharing-your-labor-of-love-pypi-quick-and-dirty/
# read the contents of your README file
from pathlib import Path
this_directory = Path(__file__).parent
long_description = (this_directory / "README.md").read_text()
setup(
name="SLOPpy-package",
version='1.2',
author="Daniela Sicilia, Luca Malavolta, et al.",
author_email = 'daniela.sicilia@inaf.it, luca.malavolta@unipd.it',
url = 'https://github.com/LucaMalavolta/SLOPpy',
packages =['SLOPpy', 'SLOPpy.subroutines', 'SLOPpy.instruments'],
license = 'MIT License',
description ='SLOPpy: Spectral Lines Of Planets with python',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3'
],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'SLOPpy_run=SLOPpy.sloppy_run:sloppy_run',
]
},
zip_safe=False,
install_requires=[
'numpy>=1.22',
'numba>=0.55.2',
'scipy>=1.8.1',
'matplotlib>=3.5.2',
'astropy>=5.1',
'astroquery>=0.4',
'pyerfa>=2.0',
'argparse>=1.4',
'oyaml>=1.0',
'emcee>=3.1.2',
'pyyaml',
'h5py>=3.7.0',
'tqdm>=4.60',
'pygtc>=0.4.1',
'tinygp>=0.2.2',
'PyAstronomy>=0.18',
'sphinx-book-theme',
'myst-parser',
'myst-nb',
],
setup_requires=['setuptools']
)
| 1,994 | 31.177419 | 90 | py |
SLOPpy | SLOPpy-main/scripts/planetary_velocity_plot.py | """from classes.kepler_exo import *
# Mass of the star HD189733 (in Solar masses)
#Ms = 0.823
Ms = 1.148
# Mass of the planet (in Solar masses)
#Mp = 1.138 / 1.047348644e3
Mp = 0.69 / 1.047348644e3
K1 = kepler_K1(Mp,Ms,3.52474854657,86.59,0.0082)
print K1
## update
"""
import matplotlib.pyplot as plt
import numpy as np
from SLOPpy.subroutines.constants import *
import argparse
from scipy.optimize import fsolve
from SLOPpy.subroutines.kepler_exo import *
def get_mass(M_star2, M_star1, Period, K1, e0):
# M_star1, M_star2 in solar masses
# P in days -> Period is converted in seconds in the routine
# inclination assumed to be 90 degrees
# Gravitational constant is given in m^3 kg^-1 s^-2
# output in m/s
output = K1 - (2. * np.pi * G_grav * M_sun / 86400.0) ** (1.0 / 3.0) * (1.000 / np.sqrt(1.0 - e0 ** 2.0)) * (
Period) ** (
-1.0 / 3.0) * (
M_star2 * (M_star1 + M_star2) ** (-2.0 / 3.0))
return output
star_mass = 0.500
P = 5.
i = 90.
e = 0.00
planet_mass = np.arange(0,20, 0.1)
planet_K = planet_mass*0.
for i_val, m_val in enumerate(planet_mass):
planet_K[i_val] = kepler_K1(m_val * Mjups , star_mass, P, i, e)/ 1000.
plt.plot(planet_mass, planet_K)
plt.show()
#sampler = args.sample[0]
#file_conf = args.config_file[0]
| 1,531 | 26.357143 | 131 | py |
SLOPpy | SLOPpy-main/scripts/absorption_depths.py | import numpy as np
import matplotlib.pyplot as plt
import cPickle as pickle
#w,f1,f2,f3,fall,sigma=np.loadtxt('/home/sicilia/Astro/Wyttenbach/HD189733_589289_1200_transmission_spectrum_TransSpec_atmcorr.rdb',unpack=True,skiprows=2)
#w,t1,sigma=np.loadtxt('/home/sicilia/Astro/Wyttenbach/transmission_spectrum_1.rdb',unpack=True)
#w,t2,sigma=np.loadtxt('/home/sicilia/Astro/Wyttenbach/transmission_spectrum_2.rdb',unpack=True)
#w,t3,sigma=np.loadtxt('/home/sicilia/Astro/Wyttenbach/transmission_spectrum_3.rdb',unpack=True)
#w,t,sigma=np.loadtxt('/home/sicilia/Astro/HD189733/list/average_transmission_spectrum2.txt',unpack=True)
#w,t,sigma=np.loadtxt('/home/sicilia/Astro/Wyttenbach/test.rdb',unpack=True)
#n, w, t, sigma = np.loadtxt('/home/sicilia/Astro/Wyttenbach/Wyttenbach_by_Casasayas.txt',unpack=True)
#our_average = pickle.load(open('/home/sicilia/Astro/HD189733/list/HD1897333_HARPS_transmission_planetRF_average.p', "rb"))
#data_night1 = pickle.load(open('/home/sicilia/Astro/HD189733/list/HD1897333_HARPS_2006-09-07_transmission_planetRF_second_correction.p',"rb"))
#data_night2 = pickle.load(open('/home/sicilia/Astro/HD189733/list/HD1897333_HARPS_2007-07-19_transmission_planetRF_second_correction.p',"rb"))
data_night3 = pickle.load(open('/home/sicilia/Astro/HD189733/list/HD1897333_HARPS_2007-08-28_transmission_planetRF_second_correction.p',"rb"))
'''
w=our_average['wave']
t=(our_average['average']-1.)*100
t[t>1.1]=0.
sigma=our_average['average_err']
'''
w=data_night3['wave']
t=(data_night3['average']-1.)*100
t[t>1.1]=0.
sigma=data_night3['average_err']
#t = (t + 1)
#sigma = sigma/100
#central band
#C6_D2 = lambda wl: ((wl >= 5888.416) and (wl <= 5891.396)) or ((wl >= 5894.389) and (wl <= 5897.369))
w1=5889.906
w2=5895.879
wc=5892.89
#shifting the spectrum and not the passbands
#w = w - 0.04
#w = w + 0.16
#w = w + 0.075
#w = w + 0.05
#blue band & red band
B = (w >= 5874.89) & (w <= 5886.89)
R = (w >= 5898.89) & (w <= 5910.89)
#red band for Casasayas
#R = (w >= 5898.89) & (w <= 5907.89)
#alla c6 aggiungo e sottraggo 1.48
C12 = (w >= 5886.8925) & (w <= 5898.8925)
C6_D2 = (w >= 5888.426) & (w <= 5891.386)
C6_D1 = (w >= 5894.399) & (w <= 5897.359)
C3_D2 = (w >= 5889.156) & (w <= 5890.656)
C3_D1 = (w >= 5895.129) & (w <= 5896.629)
C1_5_D2 = (w >= 5889.531) & (w <= 5890.281)
C1_5_D1 = (w >= 5895.504) & (w <= 5896.254)
C0_75_D2 = (w >= 5889.718) & (w <= 5890.094)
C0_75_D1 = (w >= 5895.691) & (w <= 5896.067)
C0_375_D2 = (w >= 5889.812) & (w <= 5890.000)
C0_375_D1 = (w >= 5895.785) & (w <= 5895.973)
'''
#including -0.16 A
B = (w >= 5874.73) & (w <= 5886.73)
R = (w >= 5898.73) & (w <= 5910.73)
C12 = (w >= 5886.73) & (w <= 5898.73)
C6_D2 = (w >= 5888.246) & (w <= 5891.246)
C6_D1 = (w >= 5894.219) & (w <= 5897.219)
C3_D2 = (w >= 5888.996) & (w <= 5890.496)
C3_D1 = (w >= 5894.969) & (w <= 5896.469)
C1_5_D2 = (w >= 5889.371) & (w <= 5890.121)
C1_5_D1 = (w >= 5895.344) & (w <= 5896.094)
C0_75_D2 = (w >= 5889.558) & (w <= 5889.934)
C0_75_D1 = (w >= 5895.531) & (w <= 5895.907)
C0_375_D2 = (w >= 5889.652) & (w <= 5889.840)
C0_375_D1 = (w >= 5895.625) & (w <= 5895.813)
#sottraendo le bande come dice W e meno 0.16
C12 = (w >= 5886.73) & (w <= 5898.73)
C6_D2 = (w >= 5886.746) & (w <= 5892.746)
C6_D1 = (w >= 5892.719) & (w <= 5898.719)
C3_D2 = (w >= 5888.246) & (w <= 5891.246)
C3_D1 = (w >= 5894.219) & (w <= 5897.219)
C1_5_D2 = (w >= 5888.996) & (w <= 5890.496)
C1_5_D1 = (w >= 5894.969) & (w <= 5896.469)
C0_75_D2 = (w >= 5889.371) & (w <= 5890.121)
C0_75_D1 = (w >= 5895.344) & (w <= 5896.094)
C0_375_D2 = (w >= 5889.558) & (w <= 5889.934)
C0_375_D1 = (w >= 5895.531) & (w <= 5895.907)
'''
flux_C12, sum_weights_C12 = np.average(t[C12],axis=0,weights=1/sigma[C12]**2,returned=True)
flux_C6_D2, sum_weights_C6_D2 = np.average(t[C6_D2],axis=0,weights=1/sigma[C6_D2]**2,returned=True)
#flux_C6_D2, sum_weights_C6_D2 = np.average((np.array(filter(C6_D2, t))),weights=1/np.array(filter(C6_D2, sigma)**2),returned=True)
flux_C6_D1, sum_weights_C6_D1 = np.average(t[C6_D1],axis=0,weights=1/sigma[C6_D1]**2,returned=True)
flux_C3_D2, sum_weights_C3_D2 = np.average(t[C3_D2],axis=0,weights=1/sigma[C3_D2]**2,returned=True)
flux_C3_D1, sum_weights_C3_D1 = np.average(t[C3_D1],axis=0,weights=1/sigma[C3_D1]**2,returned=True)
flux_C1_5_D2, sum_weights_C1_5_D2 = np.average(t[C1_5_D2],axis=0,weights=1/sigma[C1_5_D2]**2,returned=True)
flux_C1_5_D1, sum_weights_C1_5_D1 = np.average(t[C1_5_D1],axis=0,weights=1/sigma[C1_5_D1]**2,returned=True)
flux_C0_75_D2, sum_weights_C0_75_D2 = np.average(t[C0_75_D2],axis=0,weights=1/sigma[C0_75_D2]**2,returned=True)
flux_C0_75_D1, sum_weights_C0_75_D1 = np.average(t[C0_75_D1],axis=0,weights=1/sigma[C0_75_D1]**2,returned=True)
flux_C0_375_D2, sum_weights_C0_375_D2 = np.average(t[C0_375_D2],axis=0,weights=1/sigma[C0_375_D2]**2,returned=True)
flux_C0_375_D1, sum_weights_C0_375_D1 = np.average(t[C0_375_D1],axis=0,weights=1/sigma[C0_375_D1]**2,returned=True)
flux_B, sum_weights_B = np.average(t[B],axis=0,weights=1/sigma[B]**2,returned=True)
flux_R, sum_weights_R = np.average(t[R],axis=0,weights=1/sigma[R]**2,returned=True)
deltaC12 = flux_C12 - (flux_B + flux_R)/2
deltaC6_D2 = flux_C6_D2 - (flux_B + flux_R)/2
deltaC6_D1 = flux_C6_D1 - (flux_B + flux_R)/2
deltaC3_D2 = flux_C3_D2 - (flux_B + flux_R)/2
deltaC3_D1 = flux_C3_D1 - (flux_B + flux_R)/2
deltaC1_5_D2 = flux_C1_5_D2 - (flux_B + flux_R)/2
deltaC1_5_D1 = flux_C1_5_D1 - (flux_B + flux_R)/2
deltaC0_75_D2 = flux_C0_75_D2 - (flux_B + flux_R)/2
deltaC0_75_D1 = flux_C0_75_D1 - (flux_B + flux_R)/2
deltaC0_375_D2 = flux_C0_375_D2 - (flux_B + flux_R)/2
deltaC0_375_D1 = flux_C0_375_D1 - (flux_B + flux_R)/2
delta_medio_6 = (deltaC6_D2 + deltaC6_D1)/2
delta_medio_3 = (deltaC3_D2 + deltaC3_D1)/2
delta_medio_1_5 = (deltaC1_5_D2 + deltaC1_5_D1)/2
delta_medio_0_75 = (deltaC0_75_D2 + deltaC0_75_D1)/2
delta_medio_0_375 = (deltaC0_375_D2 + deltaC0_375_D1)/2
sigma_deltaC12 = np.sqrt(1/sum_weights_C12 + 1/(2*sum_weights_B) + 1/(2*sum_weights_R)) * 100
sigma_deltaC6 = np.sqrt(1/sum_weights_C6_D2 + 1/sum_weights_C6_D1 + 1/sum_weights_B + 1/sum_weights_R)/2 * 100
sigma_deltaC3 = np.sqrt(1/sum_weights_C3_D2 + 1/sum_weights_C3_D1 + 1/sum_weights_B + 1/sum_weights_R)/2 * 100
sigma_deltaC1_5 = np.sqrt(1/sum_weights_C1_5_D2 + 1/sum_weights_C1_5_D1 + 1/sum_weights_B + 1/sum_weights_R)/2 * 100
sigma_deltaC0_75 = np.sqrt(1/sum_weights_C0_75_D2 + 1/sum_weights_C0_75_D1 + 1/sum_weights_B + 1/sum_weights_R)/2 * 100
sigma_deltaC0_375 = np.sqrt(1/sum_weights_C0_375_D2 + 1/sum_weights_C0_375_D1 + 1/sum_weights_B + 1/sum_weights_R)/2 * 100
print 'delta(12) =', deltaC12, ' +- ', sigma_deltaC12
print 'delta(6) = ', delta_medio_6, ' +- ', sigma_deltaC6
print 'delta(3) = ', delta_medio_3, ' +- ', sigma_deltaC3
print 'delta(1.5) = ', delta_medio_1_5, ' +- ', sigma_deltaC1_5
print 'delta(0.75) =', delta_medio_0_75, ' +- ', sigma_deltaC0_75
print 'delta(0.375) =', delta_medio_0_375, ' +- ', sigma_deltaC0_375
fig = plt.figure(figsize=(12, 6))
plt.plot(w,t)
plt.axvline(5892.89,c='k')
plt.axvspan(5886.89,5898.89,facecolor='g',alpha=0.3)
plt.axvspan(5874.89,5886.89,facecolor='b',alpha=0.3)
plt.axvspan(5898.89,5910.89,facecolor='r',alpha=0.3)
plt.axvspan(5888.426,5891.386,facecolor='g',alpha=0.4)
plt.axvspan(5894.399,5897.359,facecolor='g',alpha=0.4)
plt.axvspan(5889.156,5890.656,facecolor='g',alpha=0.5)
plt.axvspan(5895.129,5896.629,facecolor='g',alpha=0.5)
plt.axvspan(5889.531,5890.281,facecolor='g',alpha=0.6)
plt.axvspan(5895.504,5896.254,facecolor='g',alpha=0.6)
plt.axvspan(5889.718,5890.094,facecolor='g',alpha=0.7)
plt.axvspan(5895.691,5896.067,facecolor='g',alpha=0.7)
plt.axvspan(5889.812,5890.000,facecolor='g',alpha=0.8)
plt.axvspan(5895.785,5895.973,facecolor='g',alpha=0.8)
'''
plt.axvspan(5874.89,5886.89,facecolor='b',alpha=0.3)
plt.axvspan(5898.89,5910.89,facecolor='r',alpha=0.3)
plt.axvspan(5888.246,5891.246,facecolor='g',alpha=0.4)
plt.axvspan(5894.219,5897.219,facecolor='g',alpha=0.4)
plt.axvspan(5888.996,5890.496,facecolor='g',alpha=0.5)
plt.axvspan(5894.969,5896.469,facecolor='g',alpha=0.5)
plt.axvspan(5889.371,5890.121,facecolor='g',alpha=0.6)
plt.axvspan(5895.344,5896.094,facecolor='g',alpha=0.6)
plt.axvspan(5886.73,5898.73,facecolor='g',alpha=0.3)
plt.axvspan(5889.558,5889.934,facecolor='g',alpha=0.7)
plt.axvspan(5895.531,5895.907,facecolor='g',alpha=0.7)
plt.axvspan(5889.652,5889.840,facecolor='g',alpha=0.8)
plt.axvspan(5895.625,5895.813,facecolor='g',alpha=0.8)
'''
plt.xlabel('$\lambda$ [$\AA$]')
plt.ylabel('R-1')
plt.show()
| 8,507 | 45.747253 | 155 | py |
SLOPpy | SLOPpy-main/scripts/planetary_velocity.py | """from classes.kepler_exo import *
# Mass of the star HD189733 (in Solar masses)
#Ms = 0.823
Ms = 1.148
# Mass of the planet (in Solar masses)
#Mp = 1.138 / 1.047348644e3
Mp = 0.69 / 1.047348644e3
K1 = kepler_K1(Mp,Ms,3.52474854657,86.59,0.0082)
print K1
## update
"""
import matplotlib.pyplot as plt
import numpy as np
from SLOPpy.subroutines.constants import *
import argparse
from scipy.optimize import fsolve
from SLOPpy.subroutines.kepler_exo import *
def get_mass(M_star2, M_star1, Period, K1, e0):
# M_star1, M_star2 in solar masses
# P in days -> Period is converted in seconds in the routine
# inclination assumed to be 90 degrees
# Gravitational constant is given in m^3 kg^-1 s^-2
# output in m/s
output = K1 - (2. * np.pi * G_grav * M_sun / 86400.0) ** (1.0 / 3.0) * (1.000 / np.sqrt(1.0 - e0 ** 2.0)) * (
Period) ** (
-1.0 / 3.0) * (
M_star2 * (M_star1 + M_star2) ** (-2.0 / 3.0))
return output
parser = argparse.ArgumentParser(prog='planetary_velocity.py', description='Compute the expected semi-amplitude of the planet')
parser.add_argument('star_mass', type=float, nargs=1, help='Stellar mass [solar]')
parser.add_argument('plan_mass', type=float, nargs=1, help='Planet mass [Jupiter/Earth/K units, default Jupiter]')
parser.add_argument('period', type=float, nargs=1, help='Planetary period [days')
parser.add_argument('inclination', type=float, nargs=1, help='Planetary inclination [degrees]')
parser.add_argument('eccentricity', type=float, nargs=1, help='Planetary eccentricity [pure]')
parser.add_argument('-e', type=float, nargs='?', default=False, const=True, help='Planetary mass in Earth units')
parser.add_argument('-k', type=float, nargs='?', default=False, const=True, help='Planetary mass in m/s')
args = parser.parse_args()
star_mass = args.star_mass[0]
P = args.period[0]
i = args.inclination[0]
e = args.eccentricity[0]
if args.e and args.k:
print('Either -k or -e, not both!')
quit()
planet_mass = args.plan_mass[0]
if args.e:
planet_mass *= Mears
elif args.k:
x0 = Mjups
K_input = planet_mass
planet_mass = fsolve(get_mass, x0, args=(star_mass, P, K_input, e))
else:
planet_mass *= Mjups
print(kepler_K1(planet_mass, star_mass, P, i, e))
#sampler = args.sample[0]
#file_conf = args.config_file[0]
| 2,556 | 35.014085 | 131 | py |
SLOPpy | SLOPpy-main/SLOPpy/transmission_spectrum_shortcuts.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.transmission_spectrum import *
from SLOPpy.transmission_spectrum_average import *
#__all__ = ['compute_transmission_spectrum_planetRF_iterative',
# 'plot_transmission_spectrum_planetRF_iterative',
# 'compute_transmission_spectrum_stellarRF_iterative',
# 'plot_transmission_spectrum_stellarRF_iterative',
# 'compute_transmission_spectrum_observerRF_iterative',
# 'plot_transmission_spectrum_observerRF_iterative',
# 'compute_transmission_spectrum_iterative',
# 'plot_transmission_spectrum_iterative']
def compute_transmission_spectrum_planetRF(config_in, lines_label):
compute_transmission_spectrum(config_in, lines_label, reference='planetRF')
def plot_transmission_spectrum_planetRF(config_in, lines_label, night_input, results_input=''):
plot_transmission_spectrum(config_in, lines_label, night_input, results_input, reference='planetRF')
def compute_transmission_spectrum_stellarRF(config_in, lines_label):
compute_transmission_spectrum(config_in, lines_label, reference='stellarRF')
def plot_transmission_spectrum_stellarRF(config_in, lines_label, night_input, results_input=''):
plot_transmission_spectrum(config_in, lines_label, night_input, results_input, reference='stellarRF')
def compute_transmission_spectrum_observerRF(config_in, lines_label):
compute_transmission_spectrum(config_in, lines_label, reference='observerRF')
def plot_transmission_spectrum_observerRF(config_in, lines_label, night_input, results_input=''):
plot_transmission_spectrum(config_in, lines_label, night_input, results_input, reference='observerRF')
def compute_transmission_spectrum_planetRF_iterative(config_in, lines_label):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
compute_transmission_spectrum(config_in, lines_label, reference='planetRF', pca_iteration=it)
def compute_transmission_spectrum_stellarRF_iterative(config_in, lines_label):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
compute_transmission_spectrum(config_in, lines_label, reference='stellarRF', pca_iteration=it)
def compute_transmission_spectrum_observerRF_iterative(config_in, lines_label):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
compute_transmission_spectrum(config_in, lines_label, reference='observerRF', pca_iteration=it)
def compute_transmission_spectrum_iterative(config_in, lines_label):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
compute_transmission_spectrum(config_in, lines_label, reference='planetRF', pca_iteration=it)
def plot_transmission_spectrum_planetRF_iterative(config_in, lines_label, night_input, results_input=''):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
plot_transmission_spectrum(config_in, lines_label, night_input, results_input, reference='planetRF', pca_iteration=it)
def plot_transmission_spectrum_stellarRF_iterative(config_in, lines_label, night_input, results_input=''):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
plot_transmission_spectrum(config_in, lines_label, night_input, results_input, reference='stellarRF', pca_iteration=it)
def plot_transmission_spectrum_observerRF_iterative(config_in, lines_label, night_input, results_input=''):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
plot_transmission_spectrum(config_in, lines_label, night_input, results_input, reference='observerRF', pca_iteration=it)
def plot_transmission_spectrum_iterative(config_in, lines_label, night_input, results_input=''):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
plot_transmission_spectrum(config_in, lines_label, night_input, results_input, reference='planetRF', pca_iteration=it)
def compute_transmission_spectrum_average_planetRF(config_in, lines_label):
compute_transmission_spectrum_average(config_in, lines_label, reference='planetRF')
def compute_transmission_spectrum_average_observerRF(config_in, lines_label):
compute_transmission_spectrum_average(config_in, lines_label, reference='observerRF')
def compute_transmission_spectrum_average_stellarRF(config_in, lines_label):
compute_transmission_spectrum_average(config_in, lines_label, reference='stellarRF')
def plot_transmission_spectrum_average_planetRF(config_in, lines_label, night_input='', results_input=''):
plot_transmission_spectrum_average(config_in, lines_label, night_input, results_input, reference='planetRF')
def plot_transmission_spectrum_average_observerRF(config_in, lines_label, night_input='', results_input=''):
plot_transmission_spectrum_average(config_in, lines_label, night_input, results_input, reference='observerRF')
def plot_transmission_spectrum_average_stellarRF(config_in, lines_label, night_input='', results_input=''):
plot_transmission_spectrum_average(config_in, lines_label, night_input, results_input, reference='stellarRF')
def compute_transmission_spectrum_average_planetRF_iterative(config_in, lines_label):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
compute_transmission_spectrum_average(config_in, lines_label, reference='planetRF', pca_iteration=it)
def compute_transmission_spectrum_average_observerRF_iterative(config_in, lines_label):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
compute_transmission_spectrum_average(config_in, lines_label, reference='observerRF', pca_iteration=it)
def compute_transmission_spectrum_average_stellarRF_iterative(config_in, lines_label):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
compute_transmission_spectrum_average(config_in, lines_label, reference='stellarRF', pca_iteration=it)
def compute_transmission_spectrum_average_iterative(config_in, lines_label):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
compute_transmission_spectrum_average(config_in, lines_label, reference='planetRF', pca_iteration=it)
def plot_transmission_spectrum_average_planetRF_iterative(config_in, lines_label, night_input='', results_input=''):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
plot_transmission_spectrum_average(config_in, lines_label, night_input, results_input, reference='planetRF', pca_iteration=it)
def plot_transmission_spectrum_average_observerRF_iterative(config_in, lines_label, night_input='', results_input=''):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
plot_transmission_spectrum_average(config_in, lines_label, night_input, results_input, reference='observerRF', pca_iteration=it)
def plot_transmission_spectrum_average_stellarRF_iterative(config_in, lines_label, night_input='', results_input=''):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
plot_transmission_spectrum_average(config_in, lines_label, night_input, results_input, reference='stellarRF', pca_iteration=it)
def plot_transmission_spectrum_average_iterative(config_in, lines_label, night_input='', results_input=''):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
plot_transmission_spectrum_average(config_in, lines_label, night_input, results_input, reference='planetRF', pca_iteration=it)
| 8,420 | 45.783333 | 136 | py |
SLOPpy | SLOPpy-main/SLOPpy/pca_preparation.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.plot_subroutines import *
__all__ = ["compute_pca_preparation"]
def compute_pca_preparation(config_in, append_name=None):
if append_name:
subroutine_name = 'pca_preparation_' + append_name
filename = 'pca_preparation_' + append_name
else:
subroutine_name = 'pca_preparation'
filename = 'pca_preparation'
night_dict = from_config_get_nights(config_in)
preparation_dict = {
'fit_iters': 5,
'fit_order': 3,
'fit_sigma': 3
}
for night in night_dict:
try:
preparation = load_from_cpickle(filename, config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving input and calibration data """
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'],
use_refraction=True, use_telluric=False, use_interstellar=False,
use_telluric_spline= False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
obs_ref =lists['observations'][0]
n_obs = len(lists['observations'])
n_orders = input_data[obs_ref]['n_orders']
n_pixels = input_data[obs_ref]['n_pixels']
stack_wave = np.zeros([n_obs, n_orders, n_pixels], dtype=np.double)
stack_e2ds = np.zeros([n_obs, n_orders, n_pixels], dtype=np.double)
stack_e2ds_err = np.zeros([n_obs, n_orders, n_pixels], dtype=np.double)
stack_bjd = np.zeros(n_obs, dtype=np.double)
stack_airmass = np.zeros(n_obs, dtype=np.double)
for i_obs, obs in enumerate(lists['observations']):
blaze_wave_refactoring = 1. / calib_data['blaze'] / (input_data[obs]['step']/np.median(input_data[obs]['step']))
stack_wave[i_obs, :, :] = input_data[obs]['wave']
stack_e2ds[i_obs, :, :] = input_data[obs]['e2ds'] * blaze_wave_refactoring
stack_e2ds_err[i_obs, :, :] = input_data[obs]['e2ds_err'] * blaze_wave_refactoring
stack_bjd[i_obs] = input_data[obs]['BJD']
stack_airmass[i_obs] = input_data[obs]['AIRMASS']
median = np.nanmedian(stack_e2ds[i_obs, :, :], axis=1)
for i_orders in range(0, n_orders):
stack_e2ds[i_obs, i_orders, :] /= median[i_orders]
stack_e2ds_err[i_obs, i_orders, :] /= median[i_orders]
poly_flag = (stack_e2ds > 0.001)
#poly_flag[:, :, :20]= False
#poly_flag[:, :, 20:]= False
stack_polyfit = np.zeros_like(stack_e2ds)
for i_orders in range(0,n_orders):
order_wave = stack_wave[:, i_orders, :]
order_e2ds = stack_e2ds[:, i_orders, :]
order_flag = poly_flag[:, i_orders, :]
for n_iter in range(0, preparation_dict['fit_iters']):
coeff_order = np.polynomial.chebyshev.chebfit(
order_wave[order_flag],
order_e2ds[order_flag],
preparation_dict['fit_order'])
fit_order = \
np.polynomial.chebyshev.chebval(order_wave, coeff_order)
fit_shaped = np.reshape(fit_order, np.shape(order_wave))
residuals = order_e2ds - fit_shaped
if n_iter < preparation_dict['fit_iters'] - 1:
std = np.std(residuals[order_flag])
order_flag = (order_flag) & (residuals > -preparation_dict['fit_sigma'] * std)
stack_e2ds[:, i_orders, :]/=fit_shaped
stack_e2ds_err[:, i_orders, :]/=fit_shaped
stack_polyfit[:, i_orders, :] =fit_shaped
#plt.imshow(stack_e2ds[:, i_orders, :], interpolation='none', aspect='auto', vmin=0.25, vmax=1.5)
#plt.colorbar()
#plt.show()
preparation = {
'stack_wave': stack_wave,
'stack_e2ds': stack_e2ds,
'stack_e2ds_err': stack_e2ds_err,
'stack_bjd': stack_e2ds,
'stack_airmass': stack_e2ds,
'stack_polyfit': stack_polyfit,
'frame': {
'n_obs': n_obs,
'n_orders': n_orders,
'n_pixels': n_pixels,
},
'fit_pams': preparation_dict
}
save_to_cpickle(filename, preparation, config_in['output'], night)
| 5,114 | 37.458647 | 124 | py |
SLOPpy | SLOPpy-main/SLOPpy/transmission_spectrum.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.math_functions import *
from SLOPpy.transmission_spectrum_preparation import compute_transmission_spectrum_preparation
from scipy.signal import savgol_filter
__all__ = ['compute_transmission_spectrum',
'plot_transmission_spectrum']
subroutine_name = 'transmission_spectrum'
sampler_name = 'emcee'
def compute_transmission_spectrum(config_in, lines_label, reference='planetRF', night_input='', preparation_only=False, pca_iteration=-1):
results_list_default = ['user',
'mcmc_night_MED',
'mcmc_night_MAP',
'mcmc_global_MED',
'mcmc_global_MAP']
# compute_transmission_spectrum_preparation(config_in)
night_dict = from_config_get_nights(config_in)
pca_parameters = from_config_get_pca_parameters(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label]
norm_dict = lines_dict.get('normalization', {})
norm_pams = {}
norm_pams['normalize_transmission'] = norm_dict.get('normalize_transmission', True)
norm_pams['normalization_model'] = norm_dict.get('normalization_model', 'polynomial')
""" Normalization parameters for polynomial model"""
norm_pams['model_poly_degree'] = norm_dict.get('model_poly_degree', 2)
norm_pams['spectra_poly_degree'] = norm_dict.get('spectra_poly_degree', 2)
norm_pams['lower_threshold'] = norm_dict.get('lower_threshold', 0.950)
norm_pams['percentile_selection'] = norm_dict.get('percentile_selection', 10)
""" Normalization parameters using Savitzky-Golay filter"""
norm_pams['window_length'] = norm_dict.get('window_length', 101)
norm_pams['polyorder'] = norm_dict.get('polyorder', 3)
norm_pams['mode'] = norm_dict.get('mode', 'nearest')
norm_pams['cval'] = norm_dict.get('cval', 1.0)
shared_data = load_from_cpickle('shared', config_in['output'])
clv_rm_correction = lines_dict.get('clv_rm_correction', True)
""" Using the line-specific range to define the transmission spectrum region """
shared_selection = (shared_data['coadd']['wave'] >= lines_dict['range'][0]) \
& (shared_data['coadd']['wave'] < lines_dict['range'][1])
binned_selection = (shared_data['binned']['wave'] >= lines_dict['range'][0]) \
& (shared_data['binned']['wave'] < lines_dict['range'][1])
transmission_template = {
'subroutine': subroutine_name,
'range': lines_dict['range'],
'wave': shared_data['coadd']['wave'][shared_selection],
'step': shared_data['coadd']['step'][shared_selection],
'size': np.int(np.sum(shared_selection)),
'binned_wave': shared_data['binned']['wave'][binned_selection],
'binned_step': shared_data['binned']['step'][binned_selection],
'binned_size': np.int(np.sum(binned_selection))
}
for night in night_dict:
print()
print("Running {0:45s} for {1:20s} Night:{2:15s} ".format(subroutine_name, lines_label, night))
preparation_input = load_from_cpickle('transmission_preparation', config_in['output'], night)
if preparation_input.get('pca_output', False):
if pca_iteration >= 0:
it_string = str(pca_iteration).zfill(2)
else:
it_string = str(pca_parameters.get('ref_iteration', 3)).zfill(2)
preparation = preparation_input[it_string]
else:
preparation = preparation_input
it_string = ''
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
results_list = results_list_default.copy()
binned_mcmc_night = check_existence_cpickle(
'transmission_binned_mcmc_'+sampler_name+'_results', config_in['output'], night, lines_label, it_string)
binned_mcmc_global = check_existence_cpickle(
'transmission_binned_mcmc_'+sampler_name+'_results', config_in['output'], lines=lines_label, it_string=it_string)
mcmc_night = check_existence_cpickle(
'transmission_mcmc_'+sampler_name+'_results', config_in['output'], night, lines_label, it_string=it_string)
mcmc_global = check_existence_cpickle(
'transmission_mcmc_'+sampler_name+'_results', config_in['output'], lines=lines_label, it_string=it_string)
if mcmc_night and mcmc_global:
mcmc_results_night = load_from_cpickle(
'transmission_mcmc_'+sampler_name+'_results', config_in['output'], night, lines_label, it_string=it_string)
mcmc_results_global = load_from_cpickle(
'transmission_mcmc_'+sampler_name+'_results', config_in['output'], lines=lines_label, it_string=it_string)
print(' Observational parameters from MCMC fit of unbinned data and configuration file')
elif binned_mcmc_night and binned_mcmc_global:
mcmc_results_night = load_from_cpickle(
'transmission_binned_mcmc_'+sampler_name+'_results', config_in['output'], night, lines_label, it_string=it_string)
mcmc_results_global = load_from_cpickle(
'transmission_binned_mcmc_'+sampler_name+'_results', config_in['output'], lines=lines_label, it_string=it_string)
print(' Observational parameters from MCMC fit of binned data and configuration file')
else:
print(' Observational parameters from configuration file')
results_list = ['user']
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
if clv_rm_correction:
try:
clv_rm_models = load_from_cpickle('clv_rm_models', config_in['output'], night, lines_label)
except (FileNotFoundError, IOError):
clv_rm_models = load_from_cpickle('clv_rm_models', config_in['output'], night)
for results_selection in results_list:
try:
transmission = load_from_cpickle(subroutine_name+'_'+reference + '_' +
results_selection, config_in['output'], night, lines_label, it_string=it_string)
print("{0:45s} Night:{1:15s} {2:s} {3:s} {4:s}".format(
subroutine_name, night, lines_label, results_selection, 'Retrieved'))
continue
except (FileNotFoundError, IOError):
print("{0:45s} Night:{1:15s} {2:s} {3:s} {4:s}".format(
subroutine_name, night, lines_label, results_selection, 'Computing'))
transmission = transmission_template.copy()
if len(it_string) > 0:
transmission['pca_output'] = True
else:
transmission['pca_output'] = False
print_warning = True
for obs in lists['observations']:
""" we start from the e2ds file, after correction for blaze and
division by the master-out
Observation data:
wave: input_data[obs]['wave']
step: input_data[obs]['step']
flux: preparation[obs]['deblazed']
ferr: preparation[obs]['deblazed_err']
"""
transmission[obs] = {}
transmission[obs] = {
'BJD': input_data[obs]['BJD'],
'AIRMASS': input_data[obs]['AIRMASS']
}
""" Shift into planetary reference system is the default
choice"""
if results_selection == 'user':
planet_R_factor = observational_pams.get('Rp_factor', 1.00000)
if reference in ['observer', 'observerRF', 'ORF']:
rv_shift = 0.000
rv_shift_clv = -observational_pams[obs]['rv_shift_ORF2SRF']
elif reference in ['stellar', 'stellarRF', 'SRF']:
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF']
rv_shift_clv = 0.0000
else:
rv_shift = observational_pams[obs]['rv_shift_ORF2PRF']
rv_shift_clv = observational_pams[obs]['rv_shift_SRF2PRF']
elif results_selection == 'mcmc_night_MED':
planet_R_factor = mcmc_results_night['results']['planet_R']
if reference in ['observer', 'observerRF', 'ORF']:
rv_shift = 0.000
#rv_shift_clv = mcmc_results_night['results']['observational_pams'][obs]['rv_shift_ORF2SRF']
rv_shift_clv = -observational_pams[obs]['rv_shift_ORF2SRF']
elif reference in ['stellar', 'stellarRF', 'SRF']:
#rv_shift = mcmc_results_night['results']['observational_pams'][obs]['rv_shift_ORF2SRF']
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF']
rv_shift_clv = 0.0000
else:
rv_shift = mcmc_results_night['results']['observational_pams'][obs]['rv_shift_ORF2PRF']
rv_shift_clv = mcmc_results_night['results']['observational_pams'][obs]['rv_shift_SRF2PRF']
elif results_selection == 'mcmc_night_MAP':
planet_R_factor = mcmc_results_night['results_MAP']['planet_R']
if reference in ['observer', 'observerRF', 'ORF']:
rv_shift = 0.000
#rv_shift_clv = mcmc_results_night['results_MAP']['observational_pams'][obs]['rv_shift_ORF2SRF']
rv_shift = -observational_pams[obs]['rv_shift_ORF2SRF']
elif reference in ['stellar', 'stellarRF', 'SRF']:
#rv_shift = mcmc_results_night['results_MAP']['observational_pams'][obs]['rv_shift_ORF2SRF']
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF']
rv_shift_clv = 0.0000
else:
rv_shift = mcmc_results_night['results_MAP']['observational_pams'][obs]['rv_shift_ORF2PRF']
rv_shift_clv = mcmc_results_night['results_MAP']['observational_pams'][obs]['rv_shift_SRF2PRF']
elif results_selection == 'mcmc_global_MED':
planet_R_factor = mcmc_results_global['results']['planet_R']
if reference in ['observer', 'observerRF', 'ORF']:
rv_shift = 0.000
#rv_shift_clv = mcmc_results_global['results']['observational_pams'][obs]['rv_shift_ORF2SRF']
rv_shift = -observational_pams[obs]['rv_shift_ORF2SRF']
elif reference in ['stellar', 'stellarRF', 'SRF']:
#rv_shift = mcmc_results_global['results']['observational_pams'][obs]['rv_shift_ORF2SRF']
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF']
rv_shift_clv = 0.0000
else:
rv_shift = mcmc_results_global['results']['observational_pams'][obs]['rv_shift_ORF2PRF']
rv_shift_clv = mcmc_results_global['results']['observational_pams'][obs]['rv_shift_SRF2PRF']
elif results_selection == 'mcmc_global_MAP':
planet_R_factor = mcmc_results_global['results_MAP']['planet_R']
if reference in ['observer', 'observerRF', 'ORF']:
rv_shift = 0.000
#rv_shift_clv = mcmc_results_global['results_MAP']['observational_pams'][obs]['rv_shift_ORF2SRF']
rv_shift = -observational_pams[obs]['rv_shift_ORF2SRF']
elif reference in ['stellar', 'stellarRF', 'SRF']:
#rv_shift = mcmc_results_global['results_MAP']['observational_pams'][obs]['rv_shift_ORF2SRF']
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF']
rv_shift_clv = 0.0000
else:
rv_shift = mcmc_results_global['results_MAP']['observational_pams'][obs]['rv_shift_ORF2PRF']
rv_shift_clv = mcmc_results_global['results_MAP']['observational_pams'][obs]['rv_shift_SRF2PRF']
""" Step 2): rebin the 2D ratio spectra to 1D """
if transmission['pca_output']:
transmission[obs]['rebinned'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio'],
np.ones_like(calib_data['blaze']),
transmission['wave'],
transmission['step'],
preserve_flux=False,
rv_shift=rv_shift)
transmission[obs]['rebinned_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio_err'],
np.ones_like(calib_data['blaze']),
transmission['wave'],
transmission['step'],
rv_shift=rv_shift,
preserve_flux=False,
is_error=True)
else:
preserve_flux = input_data[obs].get('absolute_flux', True)
transmission[obs]['rebinned'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio'],
calib_data['blaze'],
transmission['wave'],
transmission['step'],
preserve_flux=preserve_flux,
rv_shift=rv_shift)
transmission[obs]['rebinned_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio_err'],
calib_data['blaze'],
transmission['wave'],
transmission['step'],
preserve_flux=preserve_flux,
rv_shift=rv_shift,
is_error=True)
if transmission[obs]['rebinned_err'][0] ==0:
transmission[obs]['rebinned'][0] = transmission[obs]['rebinned'][1]
transmission[obs]['rebinned_err'][0] = transmission[obs]['rebinned_err'][1]
if transmission[obs]['rebinned_err'][-1] ==0:
transmission[obs]['rebinned'][-1] = transmission[obs]['rebinned'][-2]
transmission[obs]['rebinned_err'][-1] = transmission[obs]['rebinned_err'][-2]
#import matplotlib.pyplot as plt
#plt.scatter(transmission['wave'], transmission[obs]['corrected'])
#plt.plot(transmission['wave'], transmission[obs]['continuum'])
#plt.scatter(transmission['wave'][selection], transmission[obs]['corrected'][selection], c='r')
#plt.plot(input_data[obs]['wave'][0,:], preparation[obs]['ratio_err'][0,:])
#plt.scatter(transmission['wave'], transmission[obs]['rebinned_err'], c='b')
#plt.axhline(0.0000, c='C2')
#plt.show()
#quit()
#import matplotlib.pyplot as plt
#plt.scatter(input_data[obs]['wave'], preparation[obs]['ratio'], s=2)
#plt.xlim(lines_dict['range'][0], lines_dict['range'][1])
# plt.show()
if clv_rm_correction:
"""" CLV + RM computation in the planetary reference frame """
transmission[obs]['clv_model_stellarRF'] = interpolate1d_grid_nocheck(planet_R_factor,
clv_rm_models['common']['radius_grid'],
clv_rm_models[obs]['clv_rm_model_convolved_normalized'])
transmission[obs]['clv_model_rebinned'] = \
rebin_1d_to_1d(clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
transmission[obs]['clv_model_stellarRF'],
transmission['wave'],
transmission['step'],
preserve_flux=False,
rv_shift=rv_shift_clv)
#import matplotlib.pyplot as plt
#print(obs, planet_R_factor)
#plt.plot(clv_rm_models['common']['wave'], transmission[obs]['clv_model_stellarRF'], zorder=100, c='C2')
#plt.scatter(transmission['wave'], transmission[obs]['clv_model_rebinned'], s=2)
# plt.show()
transmission[obs]['corrected'] = transmission[obs]['rebinned'] / \
transmission[obs]['clv_model_rebinned']
transmission[obs]['corrected_err'] = transmission[obs]['rebinned_err'] / \
transmission[obs]['clv_model_rebinned']
else:
transmission[obs]['clv_model_rebinned'] = np.ones(transmission['size'])
transmission[obs]['corrected'] = transmission[obs]['rebinned']
transmission[obs]['corrected_err'] = transmission[obs]['rebinned_err']
if print_warning:
print(' *** No CLV correction')
if norm_pams['normalize_transmission'] and norm_pams['normalization_model'] == 'polynomial':
""" Continuum normalization preparatory steps:
1) exclusion of regions with lines of interes
2) exclusion of regions with stellar lines
3) Polynomial fit of selected regions
Boolean array initialized to all True values
"""
transmission[obs]['line_exclusion'] = (transmission['wave'] > 0.)
""" Continuum normalization:
1) exclusion of regions with transmission lines under study, now
in the RF of the lines
"""
for line_key, line_val in lines_dict['lines'].items():
transmission[obs]['line_exclusion'] = transmission[obs]['line_exclusion'] & (
np.abs(transmission['wave']-line_val) > 3.)
""" Continuum normalization:
2) exclusion of regions with planetary lines, taking into account the planetary RV semi-amplitude
"""
if clv_rm_correction:
stellar_spectrum_rebinned = rebin_1d_to_1d(clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
clv_rm_models['common']['norm_convolved'],
transmission['wave'],
transmission['step'],
rv_shift=rv_shift_clv,
preserve_flux=False)
stellar_spectrum_derivative = first_derivative(transmission['wave'], stellar_spectrum_rebinned)
missing_model = (np.abs(stellar_spectrum_rebinned) < 0.0001)
cont_10perc = np.percentile(np.abs(stellar_spectrum_derivative), norm_pams['percentile_selection'])
#transmission[obs]['line_exclusion'] = transmission[obs]['line_exclusion'] \
# & (np.abs(stellar_spectrum_derivative) < cont_10perc) \
# & (stellar_spectrum_rebinned > norm_pams['lower_threshold'])
line_exclusion = transmission[obs]['line_exclusion'] \
& (np.abs(stellar_spectrum_derivative) < cont_10perc) \
& (stellar_spectrum_rebinned > norm_pams['lower_threshold'])
if np.sum(line_exclusion) < len(line_exclusion)/200:
transmission[obs]['line_exclusion'] = transmission[obs]['line_exclusion'] \
& ( missing_model | ((np.abs(stellar_spectrum_derivative) < cont_10perc) \
& (stellar_spectrum_rebinned > norm_pams['lower_threshold'])))
else:
transmission[obs]['line_exclusion'] = line_exclusion
elif print_warning:
print(" No stellar synthetic spectrum from CLV models")
print(" some stellar lines may be included in transmission normalization ")
print_warning = False
""" Continuum normalization:
3) Polynomial fit, everything is hard coded now but personalized
options can be implemented easily in the yaml file
"""
selection = transmission[obs]['line_exclusion'] & (
transmission[obs]['corrected'] > np.std(transmission[obs]['corrected']))
transmission[obs]['continuum_coeff'] = \
np.polynomial.chebyshev.chebfit(transmission['wave'][selection],
transmission[obs]['corrected'][selection],
norm_pams['spectra_poly_degree'])
transmission[obs]['continuum'] = np.polynomial.chebyshev.chebval(
transmission['wave'], transmission[obs]['continuum_coeff'])
transmission[obs]['normalized'] = transmission[obs]['corrected'] / transmission[obs]['continuum']
transmission[obs]['normalized_err'] = transmission[obs]['corrected_err'] / \
transmission[obs]['continuum']
#import matplotlib.pyplot as plt
#plt.scatter(transmission['wave'], transmission[obs]['corrected'])
#plt.plot(transmission['wave'], transmission[obs]['continuum'])
#plt.scatter(transmission['wave'][selection], transmission[obs]['corrected'][selection], c='r')
#plt.scatter(transmission['wave'], transmission[obs]['corrected_err']+0.05, c='b')
#plt.scatter(transmission['wave'], transmission[obs]['normalized_err'], c='r')
#plt.show()
#quit()
transmission[obs]['continuum_uncorrected_coeff'] = \
np.polynomial.chebyshev.chebfit(transmission['wave'][selection],
transmission[obs]['rebinned'][selection],
norm_pams['spectra_poly_degree'])
transmission[obs]['continuum_uncorrected'] = np.polynomial.chebyshev.chebval(
transmission['wave'], transmission[obs]['continuum_uncorrected_coeff'])
transmission[obs]['normalized_uncorrected'] = transmission[obs]['rebinned'] / \
transmission[obs]['continuum_uncorrected']
transmission[obs]['normalized_uncorrected_err'] = transmission[obs]['rebinned_err'] / \
transmission[obs]['continuum_uncorrected']
elif norm_pams['normalize_transmission'] and (
norm_pams['normalization_model'] == 'savgol'
or norm_pams['normalization_model'] == 'savitzky-golay'):
print(' ', obs, ' normalization using Savitzky-Golay filter')
transmission[obs]['continuum_coeff'] = None
transmission[obs]['continuum_uncorrected_coeff'] = None
transmission[obs]['continuum'] = savgol_filter(transmission[obs]['corrected'],
window_length=norm_pams['window_length'],
polyorder=norm_pams['polyorder'],
mode=norm_pams['mode'],
cval=norm_pams['cval'])
transmission[obs]['normalized'] = transmission[obs]['corrected'] / transmission[obs]['continuum']
transmission[obs]['normalized_err'] = transmission[obs]['corrected_err'] / \
transmission[obs]['continuum']
transmission[obs]['continuum_uncorrected'] = savgol_filter(transmission[obs]['rebinned'],
window_length=norm_pams['window_length'],
polyorder=norm_pams['polyorder'],
mode=norm_pams['mode'],
cval=norm_pams['cval'])
transmission[obs]['normalized_uncorrected'] = transmission[obs]['rebinned'] / transmission[obs]['continuum_uncorrected']
transmission[obs]['normalized_uncorrected_err'] = transmission[obs]['rebinned_err'] / \
transmission[obs]['continuum_uncorrected']
else:
transmission[obs]['continuum_coeff'] = None
transmission[obs]['continuum'] = np.ones_like(transmission['wave'])
transmission[obs]['normalized'] = transmission[obs]['corrected'].copy()
transmission[obs]['normalized_err'] = transmission[obs]['corrected_err'].copy()
#import matplotlib.pyplot as plt
#plt.scatter(transmission['wave'], transmission[obs]['corrected'])
#plt.plot(transmission['wave'], transmission[obs]['continuum'])
#plt.scatter(transmission['wave'][selection], transmission[obs]['corrected'][selection], c='r')
# plt.show()
transmission[obs]['continuum_uncorrected_coeff'] = None
transmission[obs]['continuum_uncorrected'] = np.ones_like(transmission['wave'])
transmission[obs]['normalized_uncorrected'] = transmission[obs]['rebinned'].copy()
transmission[obs]['normalized_uncorrected_err'] = transmission[obs]['rebinned_err'].copy()
print_warning = False
transm_average = np.zeros([len(lists['transit_full']), transmission['size']])
weights_average = np.zeros([len(lists['transit_full']), transmission['size']])
clvrm_average = np.zeros([len(lists['transit_full']), transmission['size']])
uncorr_average = np.zeros([len(lists['transit_full']), transmission['size']])
for i, obs in enumerate(lists['transit_full']):
transm_average[i, :] = transmission[obs]['normalized'][:]
weights_average[i, :] = 1./(transmission[obs]['normalized_err']**2.)
clvrm_average[i, :] = transmission[obs]['clv_model_rebinned'][:]
uncorr_average[i, :] = transmission[obs]['normalized_uncorrected'][:]
transmission['average'], transmission['sum_weights'] = np.average(
transm_average, axis=0, weights=weights_average, returned=True)
transmission['average_err'] = 1. / np.sqrt(transmission['sum_weights'])
transmission['average_clv_model'], _ = np.average(
clvrm_average, axis=0, weights=weights_average, returned=True)
transmission['average_uncorrected'], _ = np.average(
uncorr_average, axis=0, weights=weights_average, returned=True)
transmission['binned'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False)
transmission['binned_err'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average_err'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False,
is_error=True)
transmission['binned_clv_model'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average_clv_model'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False)
transmission['binned_uncorrected'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average_uncorrected'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False)
transm_average = np.zeros([len(lists['transit_out']), transmission['size']])
weights_average = np.zeros([len(lists['transit_out']), transmission['size']])
for i, obs in enumerate(lists['transit_out']):
transm_average[i, :] = transmission[obs]['normalized'][:]
weights_average[i, :] = 1./(transmission[obs]['normalized_err']**2.)
transmission['average_out'], transmission['sum_weights_out'] = np.average(
transm_average, axis=0, weights=weights_average, returned=True)
transmission['average_out_err'] = 1./np.sqrt(transmission['sum_weights_out'])
transmission['binned_out'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average_out'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False)
transmission['binned_out_err'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average_out_err'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False,
is_error=True)
#save_to_cpickle('transmission_'+reference+'_processed', processed, config_in['output'], night)
save_to_cpickle(subroutine_name + '_' + reference + '_' + results_selection,
transmission, config_in['output'], night, lines_label, it_string)
# Forcing memory deallocation
transmission = None
# Forcing memory deallocation
clv_rm_models = None
def plot_transmission_spectrum(config_in, lines_label, night_input='', results_input='', reference='planetRF', pca_iteration=-1):
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label]
night_dict = from_config_get_nights(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
if results_input == '':
results_list = ['user',
'mcmc_night_MED',
'mcmc_night_MAP',
'mcmc_global_MED',
'mcmc_global_MAP']
else:
results_list = np.atleast_1d(results_input)
clv_rm_correction = lines_dict.get('clv_rm_correction', True)
os.system('mkdir -p plots')
interactive_plots = from_config_get_interactive_plots(config_in)
for night in night_list:
# Workaround to check if the transmission spectrum has been obtained through PCA iterations
preparation_input = load_from_cpickle('transmission_preparation', config_in['output'], night)
if preparation_input.get('pca_output', False):
if pca_iteration >= 0:
it_string = str(pca_iteration).zfill(2)
else:
it_string = str(preparation_input.get('ref_iteration', 0)).zfill(2)
else:
it_string = ''
preparation_input = None
if clv_rm_correction:
try:
clv_rm_models = load_from_cpickle('clv_rm_models', config_in['output'], night, lines_label)
except (FileNotFoundError, IOError):
clv_rm_models = load_from_cpickle('clv_rm_models', config_in['output'], night)
for results_selection in results_list:
filename_rad = subroutine_name + '_'+reference+'_'+results_selection
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the analysis"""
try:
#processed = load_from_cpickle('transmission_'+reference+'_processed', config_in['output'], night)
transmission = load_from_cpickle(filename_rad, config_in['output'], night, lines_label, it_string)
except (FileNotFoundError, IOError):
print()
print("No transmission spectrum in {0:s}, no plots".format(reference))
continue
""" Creation of the color array, based on the BJD of the observations
"""
bjd = []
am = []
for obs in lists['observations']:
bjd.append(transmission[obs]['BJD'] - 2450000.0)
am.append(transmission[obs]['AIRMASS'])
color_cmap = plt.cm.viridis
color_norm = plt.Normalize(vmin=bjd[0], vmax=bjd[-1])
colors = color_cmap(color_norm(np.asarray(bjd)))
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
# commented out because the plot was too cumbersome
for obs in lists['transit_full']:
color = [color_cmap(color_norm(transmission[obs]['BJD'] - 2450000.0))[:-1]]
ax1.scatter(transmission['wave'],
transmission[obs]['normalized'],
c=color, s=1, zorder=3, alpha=0.25)
for obs in lists['transit_out']:
color = [color_cmap(color_norm(transmission[obs]['BJD'] - 2450000.0))[:-1]]
ax2.scatter(transmission['wave'],
transmission[obs]['normalized'],
c=color, s=1, zorder=3, alpha=0.25)
ax1.set_ylim(0.925, 1.075)
ax2.set_xlabel('$\lambda$ [$\AA$]')
ax2.legend(loc=3)
ax1.set_title('Lines: {0:s} Night: {1:s} \n In-transit transmission spectrum in {2:s} \n Solution {3:s}'.format(
lines_label, night, reference, results_selection))
ax2.set_title('Out-transit transmission spectrum in {0:s}'.format(reference))
try:
ax1.set_xlim(lines_dict['plot_range'][0], lines_dict['plot_range'][1])
except:
ax1.set_xlim(lines_dict['range'][0], lines_dict['range'][1])
sm = plt.cm.ScalarMappable(cmap=color_cmap, norm=color_norm)
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
output_file = get_filename(filename_rad + '_observations',
config_in['output'], night, lines_label, it_string, extension='.pdf')
plt.savefig('plots/'+output_file, bbox_inches='tight', dpi=300)
if interactive_plots:
plt.show()
plt.close()
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
try:
master_out = load_from_cpickle('master_out', config_in['output'], night)
ax2.plot(master_out['wave'],
master_out['rescaled']-0.06,
color='k', zorder=10, label='master-out')
except (FileNotFoundError, IOError):
pass
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
ax2.plot(telluric['template']['input']['wave'],
telluric['template']['input']['flux'] - 0.06,
color='C1', zorder=10, label='telluric')
ax2.plot(telluric['template']['input']['wave'],
(telluric['template']['input']['flux']-1.)*10. + 1. - 0.06,
color='C2', alpha=0.5, zorder=9, label='telluric (x10)')
except (FileNotFoundError, IOError, KeyError):
pass
#master_out = load_from_cpickle('master_out', config_in['output'], night)
# ax1.errorbar(master_out['wave'],
# master_out['rescaled'],
# yerr=master_out['rescaled_err'],
# fmt='.', c='C0', label='master-out ' + night)
ax1.errorbar(transmission['wave'],
transmission['average'],
yerr=transmission['average_err'],
fmt='ko', ms=1, zorder=5, alpha=0.25)
ax1.errorbar(transmission['binned_wave'],
transmission['binned'],
yerr=transmission['binned_err'],
fmt='ro', ms=4, lw=2, zorder=10)
ax2.errorbar(transmission['wave'],
transmission['average_out'],
yerr=transmission['average_out_err'],
fmt='ko', ms=1, zorder=5, alpha=0.25, label='average')
ax2.errorbar(transmission['binned_wave'],
transmission['binned_out'],
yerr=transmission['binned_out_err'],
fmt='ro', ms=4, lw=2, zorder=10, label='binned average')
ax1.set_ylim(0.99, 1.01)
ax2.set_xlabel('$\lambda$ [$\AA$]')
ax2.legend(loc=3)
ax1.set_title('Lines: {0:s} Night: {1:s} \n In-transit transmission spectrum in {2:s} \n Solution {3:s}'.format(
lines_label, night, reference, results_selection))
ax2.set_title('Out-transit transmission spectrum in {0:s}'.format(reference))
sm = plt.cm.ScalarMappable(cmap=color_cmap, norm=color_norm)
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
try:
ax1.set_xlim(lines_dict['plot_range'][0], lines_dict['plot_range'][1])
except:
ax1.set_xlim(lines_dict['range'][0], lines_dict['range'][1])
#ax1.set_xlim(config_in['master-out']['wavelength_range'][0], config_in['master-out']['wavelength_range'][1])
output_file = get_filename(filename_rad + '_binned',
config_in['output'], night, lines_label, it_string, extension='.pdf')
plt.savefig('plots/'+output_file, bbox_inches='tight', dpi=300)
if interactive_plots:
plt.show()
plt.close()
if not clv_rm_correction:
continue
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
# commented out because the plot was too cumbersome
for obs in lists['transit_full']:
color = [color_cmap(color_norm(transmission[obs]['BJD'] - 2450000.0))[:-1]]
ax1.plot(clv_rm_models['common']['wave'],
transmission[obs]['clv_model_stellarRF'],
zorder=3, alpha=0.25)
ax1.scatter(transmission['wave'],
transmission[obs]['clv_model_rebinned'],
c=color, s=1, zorder=10, alpha=0.5)
for obs in lists['transit_out']:
color = [color_cmap(color_norm(transmission[obs]['BJD'] - 2450000.0))[:-1]]
ax2.plot(clv_rm_models['common']['wave'],
transmission[obs]['clv_model_stellarRF'],
zorder=3, alpha=0.25)
ax2.scatter(transmission['wave'],
transmission[obs]['clv_model_rebinned'],
c=color, s=1, zorder=10, alpha=0.5)
ax2.set_xlabel('$\lambda$ [$\AA$]')
ax2.legend(loc=3)
ax1.set_title('Lines: {0:s} Night: {1:s} \n CLV-RM correction in {2:s} \n Solution {3:s}'.format(
lines_label, night, reference, results_selection))
ax2.set_title('Out-transit transmission spectrum in {0:s}'.format(reference))
try:
ax1.set_xlim(lines_dict['plot_range'][0], lines_dict['plot_range'][1])
except:
ax1.set_xlim(lines_dict['range'][0], lines_dict['range'][1])
sm = plt.cm.ScalarMappable(cmap=color_cmap, norm=color_norm)
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
output_file = get_filename(filename_rad + '_clv_rm_models',
config_in['output'], night, lines_label, it_string, extension='.pdf')
plt.savefig('plots/'+output_file, bbox_inches='tight', dpi=300)
if interactive_plots:
plt.show()
plt.close()
| 44,684 | 51.447183 | 146 | py |
SLOPpy | SLOPpy-main/SLOPpy/transmission_binned_mcmc.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.constants import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.math_functions import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.bayesian_emcee import *
# from SLOPpy.subroutines.rebin_subroutines import *
from scipy.signal import savgol_filter
__all__ = ['compute_transmission_binned_mcmc','compute_transmission_binned_mcmc_iterative',
'plot_transmission_binned_mcmc','plot_transmission_binned_mcmc_iterative']
subroutine_name = 'transmission_binned_mcmc'
def compute_transmission_binned_mcmc_iterative(config_in, lines_label):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
compute_transmission_binned_mcmc(config_in, lines_label, reference='planetRF', pca_iteration=it)
def plot_transmission_binned_mcmc_iterative(config_in, lines_label, night_input='', reference='planetRF'):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations',5)):
plot_transmission_binned_mcmc(config_in, lines_label, night_input=night_input, reference=reference, pca_iteration=it)
def compute_transmission_binned_mcmc(config_in, lines_label, reference='planetRF', pca_iteration=-1):
night_dict = from_config_get_nights(config_in)
planet_dict = from_config_get_planet(config_in)
star_dict = from_config_get_star(config_in)
clv_rm_dict = from_config_get_clv_rm(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
""" selection of those parameters that are specific of the spectral line(s)
under analysis
"""
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label]
sampler_pams = lines_dict['sampler_parameters']
sampler_name = sampler_pams.get('sampler_name', 'emcee')
# TODO reference as input parameter
reference = 'planetRF'
"""
- case 0: only one spectral line, default line parameters are contrast, FWHM, rv_shift
- case 1: only one spectral line, no winds
- case 2: only one spectral line, no planetary radius dependance
- case 3: only one spectral line, no winds and no planetary radius dependance
- case 10: more than one spectral lines, all line parameters are free and independent
- case 11: more than one spectral lines, all lines are affected by the same wind
- case 12: more than one spectral lines, all lines have same FWHM
- case 13: more than one spectral lines, all lines are affected by the same wind and have same FWHM
- case 14: more than one spectral lines, no winds
- case 15: more than one spectral lines, no winds, all lines have same FWHM
- case 20: more than one spectral lines, no Rp dependance, all line parameters are free and independent
- case 21: more than one spectral lines, no Rp dependance, all lines are affected by the same wind
- case 22: more than one spectral lines, no Rp dependance, all lines have same FWHM
- case 23: more than one spectral lines, no Rp dependance, all lines are affected by the same wind and have same FWHM
- case 24: more than one spectral lines, no Rp dependance, no winds
- case 25: more than one spectral lines, no Rp dependance, no winds, all lines have same FWHM
free_Rp free_winds shared_winds shared_FWHM
- case 0: True True False False DEFAULT for single line
- case 1: True False False False
- case 2: False True False False
- case 3: False False False False
- case 10: True True False False DEFAULT for multiple lines
- case 11: True True True False
- case 12: True True False True
- case 13: True True True True
- case 14: True False False False
- case 15: True False False True
- case 20: False True False False
- case 21: False True True False
- case 22: False True False True
- case 23: False True True True
- case 24: False False False False
- case 25: False False False True
"""
model_case = 10
norm_dict = lines_dict.get('normalization', clv_rm_dict.get('normalization', {}))
norm_pams={}
norm_pams['normalize_transmission'] = norm_dict.get('normalize_transmission', True)
norm_pams['normalization_model'] = norm_dict.get('normalization_model', 'polynomial')
""" Normalization parameters for polynomial model"""
norm_pams['model_poly_degree'] = norm_dict.get('model_poly_degree', 2)
norm_pams['spectra_poly_degree'] = norm_dict.get('spectra_poly_degree', 2)
norm_pams['lower_threshold'] = norm_dict.get('lower_threshold', 0.950)
norm_pams['percentile_selection'] = norm_dict.get('percentile_selection', 10)
""" Normalization parameters using Savitzky-Golay filter"""
norm_pams['window_length'] = norm_dict.get('window_length', 101)
norm_pams['polyorder'] = norm_dict.get('polyorder', 3)
norm_pams['mode'] = norm_dict.get('mode', 'nearest')
norm_pams['cval'] = norm_dict.get('cval', 1.0)
norm_pams['normalize_rebinned'] = norm_dict.get('normalize_rebinned', False)
# Added back-compatibility to old or "wrong" keys
clv_rm_correction = lines_dict.get('clv_rm_correction', True)
fit_pams = lines_dict['fit_parameters']
free_Rp = fit_pams.get('free_Rp', True) \
and fit_pams.get('free_planet_radius', True) \
and clv_rm_correction
free_winds = fit_pams.get('free_winds', True) \
and fit_pams.get('free_offset', True)
shared_winds = fit_pams.get('shared_winds', False) \
or fit_pams.get('shared_offset', False)
shared_FWHM = fit_pams.get('shared_FWHM', False) \
or fit_pams.get('shared_fwhm', False)
prior_dict = fit_pams.get('priors', {}) \
or fit_pams.get('priors', {})
allow_emission = fit_pams.get('allow_emission', False)
if len(lines_dict['lines']) < 2:
if free_Rp is True and free_winds is True:
model_case = 0
if free_Rp is True and free_winds is False:
model_case = 1
if free_Rp is False and free_winds is True:
model_case = 2
if free_Rp is False and free_winds is False:
model_case = 3
else:
if free_Rp is True:
if free_winds is True:
if shared_winds is False and shared_FWHM is False:
model_case = 10
if shared_winds is True and shared_FWHM is False:
model_case = 11
if shared_winds is False and shared_FWHM is True:
model_case = 12
if shared_winds is True and shared_FWHM is True:
model_case = 13
else:
if shared_winds is False and shared_FWHM is False:
model_case = 14
if shared_winds is False and shared_FWHM is True:
model_case = 15
else:
if free_winds is True:
if shared_winds is False and shared_FWHM is False:
model_case = 20
if shared_winds is True and shared_FWHM is False:
model_case = 21
if shared_winds is False and shared_FWHM is True:
model_case = 22
if shared_winds is True and shared_FWHM is True:
model_case = 23
else:
if shared_winds is False and shared_FWHM is False:
model_case = 24
if shared_winds is False and shared_FWHM is True:
model_case = 25
jitter_flag = fit_pams.get('jitter', True)
pyde_flag = fit_pams.get('pyde', True)
print()
print(' free_Rp: (default: True) ', free_Rp)
print(' free_winds: (default: True) ', free_winds)
print(' shared_winds: (default: False) ', shared_winds)
print(' shared_FWHM: (default: False) ', shared_FWHM)
print(' jitter: (default: True) ', jitter_flag)
print(' # lines: ', len(lines_dict['lines']))
print(' model_case: ', model_case)
""" parameters list:
to be updated
pams_dict = {} # dictionary containing the index of a given parameter
pams_list = [] # list with the parameter names ordered according to their index
boundaries = np.empty([0, 2]) # boundaries for MCMC / nested sampling
theta_start = np.empty(0) # starting point for MCMC
lines_center = np.empty(0) # laboratory wavelength of spectral lines
pam_index = 0 # keep track of the number of variables
for line_key, line_val in lines_dict['lines'].items():
pam_name = line_key + '_contrast'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[0.00, 1.00]], axis=0)
theta_start = np.append(theta_start, 0.010)
pam_index += 1
lines_center = np.append(lines_center, line_val)
# skip the inclusion of FWHM as a free parameter for each line
if the shared FWHM is selected
#
if model_case in [0, 1, 2, 3, 10, 11, 14, 20, 21, 24]:
# if not lines_dict['fit_parameters']['shared_fwhm']:
pam_name = line_key + '_fwhm'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[0.00, 150.00]], axis=0)
theta_start = np.append(theta_start, 5.0)
pam_index += 1
# if lines_dict['fit_parameters']['fixed_separation']: continue
# if not lines_dict['fit_parameters']['lines_shift']: continue
if model_case in [0, 2, 10, 12, 20, 22]:
pam_name = line_key + '_winds'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[-5.00, 5.00]], axis=0)
theta_start = np.append(theta_start, 0.00)
pam_index += 1
if model_case in [12, 13, 15, 22, 23, 25]:
# if lines_dict['fit_parameters']['shared_fwhm']:
pam_name = 'shared_fwhm'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[0.000, 150.00]], axis=0)
theta_start = np.append(theta_start, 5.000)
pam_index += 1
if model_case in [11, 13, 21, 23]:
# if lines_dict['fit_parameters']['fixed_separation'] and lines_dict['fit_parameters']['lines_shift']:
pam_name = 'shared_winds'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[-5.0, 5.0]], axis=0)
theta_start = np.append(theta_start, 0.000)
pam_index += 1
if model_case in [0, 1, 10, 11, 12, 13, 14, 15]:
pams_dict['rp_factor'] = pam_index
pams_list.append('rp_factor')
boundaries = np.append(boundaries, [[0.5, 2.0]], axis=0)
theta_start = np.append(theta_start, 1.0)
pam_index += 1
pams_dict['K_planet'] = pam_index
pams_list.append('K_planet')
boundaries = np.append(boundaries,
[[-300., planet_dict['RV_semiamplitude']
[0]+ 300.]],
axis=0)
theta_start = np.append(
theta_start, planet_dict['RV_semiamplitude'][0])
pam_index += 1
pam_name = 'jitter'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[10**(-12), 0.01]], axis=0)
theta_start = np.append(theta_start, 10**(-11))
pam_index += 1
for ii in range(0, pam_index):
print(pams_list[ii], ' ', boundaries[ii, :],
' ', theta_start[ii])
ndim = pam_index
"""
for night in night_dict:
print()
print("transmission_mcmc Night: {0:s}".format(night))
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle(
'observational_pams', config_in['output'], night)
preparation_input = load_from_cpickle('transmission_preparation', config_in['output'], night)
if preparation_input.get('pca_output', False):
if pca_iteration >= 0:
it_string = str(pca_iteration).zfill(2)
else:
it_string = str(preparation_input.get('ref_iteration', 0)).zfill(2)
preparation = preparation_input[it_string]
else:
preparation = preparation_input
it_string = ''
""" This need to be checked only once, so it's ok to take the output of the last night
and propagate it to the rest of subroutine
"""
if len(it_string) > 0:
pca_output = True
else:
pca_output = False
try:
mcmc_data = load_from_cpickle(subroutine_name + '_data', config_in['output'], night, lines_label, it_string)
clv_rm_radius = mcmc_data['clv_rm_radius']
clv_rm_grid = mcmc_data['clv_rm_grid']
transmission_spec = mcmc_data['transmission_spec']
transmission_spec_err = mcmc_data['transmission_spec_err']
wave_meshgrid = mcmc_data['wave_meshgrid']
time_meshgrid = mcmc_data['time_meshgrid']
planet_RVsinusoid = mcmc_data['planet_RVsinusoid']
jitter_index = mcmc_data['jitter_index']
n_jitter = mcmc_data['n_jitter']
print(" Loading MCMC data array for lines {0:s}, night: {1:s}".format(
lines_label, night))
except FileNotFoundError:
print(" Computing MCMC data array for lines {0:s}, night: {1:s}".format(
lines_label, night))
calib_data = load_from_cpickle(
'calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(
config_in['output'], night, lists['observations'])
if clv_rm_correction:
try:
clv_rm_models = load_from_cpickle(
'clv_rm_models', config_in['output'], night, lines_label)
except (FileNotFoundError, IOError):
clv_rm_models = load_from_cpickle(
'clv_rm_models', config_in['output'], night)
else:
# workaround if CLV correction is not available
clv_rm_models = {'common': {}}
clv_rm_models['common']['n_radius_grid'] = 3
clv_rm_models['common']['radius_grid'] = np.asarray(
[0.5, 1.0, 1.5])
processed = {
'subroutine': subroutine_name,
}
processed['common'] = {
'range': lines_dict['fit_parameters']['range']
}
processed['common']['wave'] = np.arange(processed['common']['range'][0],
processed['common']['range'][1],
lines_dict['fit_parameters']['bin_step'],
dtype=np.double)
processed['common']['size'] = len(processed['common']['wave'])
processed['common']['step'] = np.ones(
processed['common']['size'], dtype=np.double) * lines_dict['fit_parameters']['bin_step']
processed['common_extended'] = {
'range': lines_dict['range']
}
processed['common_extended']['wave'] = np.arange(processed['common_extended']['range'][0],
processed['common_extended']['range'][1],
lines_dict['fit_parameters']['bin_step'],
dtype=np.double)
processed['common_extended']['size'] = len(
processed['common_extended']['wave'])
processed['common_extended']['step'] = np.ones(
processed['common_extended']['size'], dtype=np.double) * lines_dict['fit_parameters']['bin_step']
for obs in lists['observations']:
""" we start from the e2ds file, after correction for blaze and
division by the master-out
Observation data:
wave: input_data[obs]['wave']
step: input_data[obs]['step']
flux: preparation[obs]['ratio']
ferr: preparation[obs]['ratio']
"""
""" First step: we rebin the spectra in the Stellar Reference Frame,
with the step size decided by the user specifically for the fit
"""
if pca_output:
preserve_flux = False
blaze = np.ones_like(calib_data['blaze'])
else:
preserve_flux = input_data[obs].get('absolute_flux', True)
blaze = calib_data['blaze']
processed[obs] = {}
processed[obs]['rebinned'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio'],
blaze,
processed['common']['wave'],
processed['common']['step'],
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'],
preserve_flux=preserve_flux)
processed[obs]['rebinned_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio_err'],
blaze,
processed['common']['wave'],
processed['common']['step'],
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'],
preserve_flux=preserve_flux,
is_error=True)
processed[obs]['rebinned_extended'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio'],
blaze,
processed['common_extended']['wave'],
processed['common_extended']['step'],
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'],
preserve_flux=preserve_flux)
if norm_pams['normalize_transmission'] and norm_pams['normalization_model'] == 'polynomial':
""" Continuum normalization preparatory steps:
1) exclusion of regions with planetary lines
2) exclusion of regions with stellar lines
3) Polynomial fit of selected regions
Boolean array initialized to all True values, fit is
performed on the extended region and then applied to the fit subset
"""
processed['common_extended']['line_exclusion'] = (
processed['common_extended']['wave'] > 0.)
""" Continuum normalization:
1) exclusion of regions with planetary lines, taking into
account the planetary RV semi-amplitude
"""
for line_key, line_val in lines_dict['lines'].items():
line_extension = 1.2 * \
planet_dict['RV_semiamplitude'][0] * \
line_val / speed_of_light_km
processed['common_extended']['line_exclusion'] = processed['common_extended']['line_exclusion'] & (
np.abs(processed['common_extended']['wave']-line_val) > line_extension)
""" Continuum normalization:
2) exclusion of regions with planetary lines, taking into
account the planetary RV semi-amplitude
"""
try:
stellar_spectrum_rebinned = rebin_1d_to_1d(clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
clv_rm_models['common']['norm_convolved'],
processed['common_extended']['wave'],
processed['common_extended']['step'],
preserve_flux=False)
stellar_spectrum_derivative = first_derivative(
processed['common_extended']['wave'], stellar_spectrum_rebinned)
cont_10perc = np.percentile(np.abs(stellar_spectrum_derivative), norm_pams['percentile_selection'])
processed['common_extended']['line_exclusion'] = processed['common_extended']['line_exclusion'] \
& (np.abs(stellar_spectrum_derivative) < cont_10perc) \
& (stellar_spectrum_rebinned > norm_pams['lower_threshold'])
except KeyError:
print(
"No stellar synthetic spectrum from CLV models, some stellar lines may be included transmission normalization ")
for obs in lists['observations']:
selection = processed['common_extended']['line_exclusion'] & (
processed[obs]['rebinned_extended'] > np.std(processed[obs]['rebinned_extended']))
processed[obs]['norm_coeff'] = \
np.polynomial.chebyshev.chebfit(processed['common_extended']['wave'][selection],
processed[obs]['rebinned_extended'][selection],
norm_pams['spectra_poly_degree'])
processed[obs]['continuum'] = np.polynomial.chebyshev.chebval(
processed['common']['wave'], processed[obs]['norm_coeff'])
processed[obs]['normalized'] = processed[obs]['rebinned'] / \
processed[obs]['continuum']
processed[obs]['normalized_err'] = processed[obs]['rebinned_err'] / \
processed[obs]['continuum']
elif norm_pams['normalize_transmission'] and (
norm_pams['normalization_model'] == 'savgol'
or norm_pams['normalization_model'] == 'savitzky-golay'):
print(' Normalization using Savitzky-Golay filter')
for obs in lists['observations']:
if norm_pams['normalize_rebinned']:
processed[obs]['continuum'] = savgol_filter(preparation[obs]['rebinned'],
window_length=norm_pams['window_length'],
polyorder=norm_pams['polyorder'],
mode=norm_pams['mode'],
cval=norm_pams['cval'])
else:
normalization_model = preparation[obs]['ratio'] * 0.00
for order in range(0, observational_pams['n_orders']):
normalization_model[order,:] = savgol_filter(preparation[obs]['ratio'][order,:],
window_length=norm_pams['window_length'],
polyorder=norm_pams['polyorder'],
mode=norm_pams['mode'],
cval=norm_pams['cval'])
processed[obs]['continuum'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
normalization_model,
blaze,
processed['common']['wave'],
processed['common']['step'],
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'],
preserve_flux=preserve_flux)
processed[obs]['continuum_coeff'] = None
processed[obs]['normalized'] = processed[obs]['rebinned'] / \
processed[obs]['continuum']
processed[obs]['normalized_err'] = processed[obs]['rebinned_err'] / \
processed[obs]['continuum']
else:
for obs in lists['observations']:
processed[obs]['continuum_coeff'] = None
processed[obs]['continuum'] = np.ones_like(processed['common']['wave'])
processed[obs]['normalized'] = processed[obs]['rebinned'].copy()
processed[obs]['normalized_err'] = processed[obs]['rebinned_err'].copy()
processed['common']['n_obs'] = len(lists['transit_full'])
processed['common']['n_radius_grid'] = clv_rm_models['common']['n_radius_grid']
processed['common']['radius_grid'] = clv_rm_models['common']['radius_grid']
clv_rm_radius = clv_rm_models['common']['radius_grid']
""" We are moving the values of interest from dictionaries to arrays
in order to speed up the MCMC
1) clv_rm_grid: array with all the CLV models, as a function of the
radius of the planet
2) time_from_transit: BJD_TDB - T0
3) planet_RVsinusoid: Fractional RV of the planet (K=1) - from a meshgrid
"""
clv_rm_grid = np.ones([processed['common']['n_radius_grid'],
processed['common']['n_obs'],
processed['common']['size']],
dtype=np.double)
time_from_transit = np.empty(
processed['common']['n_obs'], dtype=np.double)
transmission_spec = np.empty([processed['common']['n_obs'],
processed['common']['size']],
dtype=np.double)
transmission_spec_err = np.empty([processed['common']['n_obs'],
processed['common']['size']],
dtype=np.double)
for i_obs, obs in enumerate(lists['transit_full']):
time_from_transit[i_obs] = observational_pams[obs]['BJD'] - \
observational_pams['time_of_transit']
# planet_RVsinusoid[i_obs] = np.sin(2*np.pi / planet_dict['period'][0] * time_from_transit[i_obs])
transmission_spec[i_obs, :] = processed[obs]['normalized']
transmission_spec_err[i_obs,
:] = processed[obs]['normalized_err']
if clv_rm_correction is False:
continue
for i_r in range(0, processed['common']['n_radius_grid']):
""" CLV Synthetic models are in the Stellar Reference system,
so no shift is required """
clv_rm_grid[i_r, i_obs, :] = \
rebin_1d_to_1d(clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
clv_rm_models[obs]['clv_rm_model_convolved_normalized'][i_r, :],
processed['common']['wave'],
processed['common']['step'],
preserve_flux=False)
# preserve_flux should be True or False?
# False if the spectra are already normalized
#colors_properties, colors_plot, colors_scatter = make_color_array_matplotlib3(
# lists, observational_pams)
#fig = plt.figure(figsize=(12, 6))
#gs = GridSpec(2, 2, width_ratios=[50, 1])
#ax1 = plt.subplot(gs[0, 0])
#ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
#cbax1 = plt.subplot(gs[:, 1])
#i_r = 0
#for i_obs, obs in enumerate(lists['transit_full']):
# ax1.plot(processed['common']['wave'],
# clv_rm_grid[i_r, i_obs, :],
# color=colors_plot['mBJD'][obs], alpha=0.2)
#i_r = processed['common']['n_radius_grid']-1
#for i_obs, obs in enumerate(lists['transit_full']):
# ax2.plot(processed['common']['wave'],
# clv_rm_grid[i_r, i_obs, :],
# color=colors_plot['mBJD'][obs], alpha=0.2)
#ax1.set_title(
# 'Night: {0:s} \n CLV+RM correction, convolved and normalized '.format(night))
#ax2.set_title('Out of transit')
#ax2.set_xlabel('$\lambda$ [$\AA$]')
#sm = plt.cm.ScalarMappable(
# cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
#sm.set_array([]) # You have to set a dummy-array for this to work...
#cbar = plt.colorbar(sm, cax=cbax1)
#cbar.set_label('BJD - 2450000.0')
#fig.subplots_adjust(wspace=0.05, hspace=0.4)
#plt.show()
#quit()
remove_outliers = (np.abs(transmission_spec - 1.) > 0.5)
transmission_spec[remove_outliers] = 1.0
transmission_spec_err[remove_outliers] = 1.0
wave_meshgrid, time_meshgrid = np.meshgrid(
processed['common']['wave'], time_from_transit)
planet_RVsinusoid = np.sin(
2*np.pi / planet_dict['period'][0] * time_meshgrid)
if jitter_flag:
jitter_index = []
n_jitter = 1
else:
jitter_index = None
n_jitter = 0
mcmc_data = {
'observations': lists['transit_full'],
'common_wave': processed['common']['wave'],
'common_step': processed['common']['step'],
'clv_rm_grid': clv_rm_grid,
'transmission_spec': transmission_spec,
'transmission_spec_err': transmission_spec_err,
'wave_meshgrid': wave_meshgrid,
'time_meshgrid': time_meshgrid,
'planet_RVsinusoid': planet_RVsinusoid,
'clv_rm_radius': clv_rm_models['common']['radius_grid'],
'n_obs': len(lists['transit_full']),
'n_radius_grid': clv_rm_models['common']['n_radius_grid'],
'jitter_index': jitter_index,
'n_jitter': n_jitter
}
save_to_cpickle(subroutine_name + '_data', mcmc_data,
config_in['output'], night, lines_label, it_string)
# Forcing memory deallocation
clv_rm_models = None
mcmc_data = None
print()
print("transmission_binned_mcmc ")
try:
results_dict = load_from_cpickle(subroutine_name+'_'+sampler_name+'_results', config_in['output'], night, lines_label, it_string)
print(" Transmission MCMC analysis for lines {0:s}, night: {1:s} already performed".format(
lines_label, night))
pams_dict = results_dict['pams_dict']
chain_med = results_dict['chain_med']
boundaries = results_dict['boundaries']
start_average = np.average(results_dict['point_start'], axis=0)
ndim = results_dict['ndim']
med_lines_model = results_dict['results']['lines_model']
if 'derived' in results_dict:
recompute_derived = False
else:
recompute_derived = True
results_dict['derived'] = {}
# TODO improve output
print(' *** sampler output ')
for key, val in pams_dict.items():
print('{0:24s} {1:4d} {2:12f} {3:12f} {4:12f} (15-84 p) ([{5:9f}, {6:9f}]) (start: {7:9f})'.format(key, val,
chain_med[val,0],
chain_med[val,2],
chain_med[val,1],
boundaries[val, 0],
boundaries[val, 1],
start_average[val])
)
if recompute_derived and key[-8:]=='contrast':
key_name = key[:-8] + 'Rh'
sample_size = len(results_dict['flat_chain'][:,val])
planet_ratio_sample = np.random.normal(planet_dict['radius_ratio'][0],planet_dict['radius_ratio'][1],size=sample_size)
results_dict['derived'][key_name] = {}
results_dict['derived'][key_name]['flat_chain'] = np.sqrt(results_dict['flat_chain'][:,val]/planet_ratio_sample**2 + 1.)
results_dict['derived'][key_name]['chain_med'] = compute_value_sigma(results_dict['derived'][key_name]['flat_chain'])
# R(h) = np.sqrt(1+h/delta)
# print(key[-8:], key[:3])
print(' *** derived output ')
for key, val in results_dict['derived'].items():
chain_med = results_dict['derived'][key]['chain_med']
print('{0:24s} {1:12f} {2:12f} {3:12f} (15-84 p)'.format(key,
chain_med[0],
chain_med[2],
chain_med[1]))
continue
except FileNotFoundError:
print()
# getting fit parameters
lines_center, pams_dict, pams_list, boundaries, theta_start = define_theta_array(
model_case, lines_dict, planet_dict, n_jitter, allow_emission=allow_emission)
ndim = len(theta_start)
if pyde_flag:
ngen = sampler_pams.get('n_gen', 64000)
else:
ngen = 0
nwalkers_mult = sampler_pams.get('n_walkers_mult', 2)
nwalkers = sampler_pams.get('n_walkers', nwalkers_mult * ndim)
nthin = sampler_pams.get('n_thin', 50)
nsteps = sampler_pams.get('n_steps', 20000)
nburnin = sampler_pams.get('n_burnin', 10000)
ndata = np.size(wave_meshgrid)
if pams_dict.get('rp_factor', False):
pam_id = pams_dict['rp_factor']
boundaries[pam_id, :] = [clv_rm_radius[0], clv_rm_radius[-1]]
print()
print(' PyDE + emcee parameters')
print(' n_dim: {0:9.0f}'.format(ndim))
if pyde_flag:
print(' n_gen: (default: 64000) {0:9.0f}'.format(ngen))
else:
print(' no PyDE optimization, MCMC will start from default values')
print(
' n_walkers: (default: 2*ndim) {0:9.0f}'.format(nwalkers))
print(' n_steps: (default: 20000) {0:9.0f}'.format(nsteps))
print(
' n_burnin: (default: 10000) {0:9.0f}'.format(nburnin))
print(' n_thin: (default: 50) {0:9.0f}'.format(nthin))
population, sampler_chain, sampler_lnprobability, point_start = emcee_lines_fit_functions(
model_case,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
prior_dict,
theta_start, boundaries, ndim, nwalkers, ngen, nsteps, nthin)
flat_chain, flat_lnprob, chain_med, chain_MAP, lnprob_med, lnprob_MAP = \
emcee_flatten_median(population, sampler_chain,
sampler_lnprobability, nburnin, nthin, nwalkers)
emcee_compute_BIC_AIC(lnprob_med, lnprob_MAP, ndata, ndim)
med_lines_model, med_clv_model, med_lines_array, med_planet_K, med_planet_R, med_jitter = \
return_model(model_case,
chain_med[:, 0],
wave_meshgrid,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index)
map_lines_model, map_clv_model, map_lines_array, map_planet_K, map_planet_R, map_jitter = \
return_model(model_case,
chain_MAP,
wave_meshgrid,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index)
results_dict = {
'sampler_name': sampler_name,
'ndim': ndim,
'nwalkers': nwalkers,
'nthin': nthin,
'nsteps': nsteps,
'nburnin': nburnin,
'ndata': ndata,
'pams_dict': pams_dict,
'population': population,
'sampler_chain': sampler_chain,
'sampler_lnprobability': sampler_lnprobability,
'theta_start': theta_start,
'boundaries': boundaries,
'flat_chain': flat_chain,
'flat_lnprob': flat_lnprob,
'chain_med': chain_med,
'chain_MAP': chain_MAP,
'lnprob_med': lnprob_med,
'lnprob_MAP': lnprob_MAP,
'lines_center': lines_center,
'point_start': point_start,
'theta_start': theta_start,
}
results_dict['results'] = {
'lines_model': med_lines_model,
'clv_model': med_clv_model,
'lines_array': med_lines_array,
'planet_K': med_planet_K,
'planet_R': med_planet_R,
'jitter': med_jitter
}
results_dict['results_MAP'] = {
'lines_model': map_lines_model,
'clv_model': map_clv_model,
'lines_array': map_lines_array,
'planet_K': map_planet_K,
'planet_R': map_planet_R,
'jitter': map_jitter
}
results_dict['results']['observational_pams'] = {}
results_dict['results_MAP']['observational_pams'] = {}
for obs in lists['observations']:
results_dict['results']['observational_pams'][obs] = {}
results_dict['results_MAP']['observational_pams'][obs] = {}
""" RV shift from the observer RF to the planet RF
STRONG ASSUMPTIONS:
- there is only the transiting planet in the system
- the planet has null eccentricity
- linear approximation or the orbit near the transit event
Computation is performed by moving to the Solar Barycenter, than to the Stellar System Barycenter
and finally onto the planet
"""
results_dict['results']['observational_pams'][obs]['rv_shift_ORF2PRF'] = \
observational_pams[obs]['BERV'] \
- observational_pams['RV_star']['RV_systemic'] \
- results_dict['results']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
results_dict['results_MAP']['observational_pams'][obs]['rv_shift_ORF2PRF'] = \
observational_pams[obs]['BERV'] \
- observational_pams['RV_star']['RV_systemic'] \
- results_dict['results_MAP']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
""" RV shift from Stellar Rest Frame to Planetary Rest Frame
We have to take into account the RV of star relatively to the Barycenter
"""
results_dict['results']['observational_pams'][obs]['rv_shift_SRF2PRF'] = \
+ observational_pams[obs]['RV_bjdshift'] \
- results_dict['results']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
results_dict['results_MAP']['observational_pams'][obs]['rv_shift_SRF2PRF'] = \
+ observational_pams[obs]['RV_bjdshift'] \
- results_dict['results_MAP']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
results_dict['derived'] = {}
# TODO improve output
print(' *** sampler output ')
start_average = np.average(results_dict['point_start'], axis=0)
for key, val in pams_dict.items():
print('{0:24s} {1:4d} {2:12f} {3:12f} {4:12f} (15-84 p) ([{5:9f}, {6:9f}]) (start: {7:9f})'.format(key, val,
chain_med[val,0],
chain_med[val,2],
chain_med[val,1],
boundaries[val, 0],
boundaries[val, 1],
start_average[val])
)
if key[-8:]=='contrast':
key_name = key[:-8] + 'Rh'
sample_size = len(results_dict['flat_chain'][:,val])
planet_ratio_sample = np.random.normal(planet_dict['radius_ratio'][0],planet_dict['radius_ratio'][1],size=sample_size)
results_dict['derived'][key_name] = {}
results_dict['derived'][key_name]['flat_chain'] = np.sqrt(results_dict['flat_chain'][:,val]/planet_ratio_sample**2 + 1.)
results_dict['derived'][key_name]['chain_med'] = compute_value_sigma(results_dict['derived'][key_name]['flat_chain'])
print(' *** derived output ')
for key, val in results_dict['derived'].items():
chain_med = results_dict['derived'][key]['chain_med']
print('{0:24s} {1:12f} {2:12f} {3:12f} (15-84 p)'.format(key,
chain_med[0],
chain_med[2],
chain_med[1]))
save_to_cpickle(subroutine_name+'_'+sampler_name+'_results',
results_dict, config_in['output'], night, lines_label, it_string)
# print(' *** physical output')
#
# results_dict['results'] = {
# 'lines_model': med_lines_model,
# 'clv_model': med_clv_model,
# 'lines_array': med_lines_array,
# 'planet_K': med_planet_K,
# 'planet_R': med_planet_R,
# 'jitter': med_jitter
# }
""" Analysis of the entire dataset """
print()
try:
all_mcmc_data = load_from_cpickle(subroutine_name+'_data', config_in['output'], night='', lines=lines_label, it_string=it_string)
all_clv_rm_radius = all_mcmc_data['clv_rm_radius']
all_clv_rm_grid = all_mcmc_data['clv_rm_grid']
all_transmission_spec = all_mcmc_data['transmission_spec']
all_transmission_spec_err = all_mcmc_data['transmission_spec_err']
all_wave_meshgrid = all_mcmc_data['wave_meshgrid']
all_time_meshgrid = all_mcmc_data['time_meshgrid']
all_planet_RVsinusoid = all_mcmc_data['planet_RVsinusoid']
all_observations = all_mcmc_data['observations']
all_n_obs = all_mcmc_data['n_obs']
all_n_radius_grid = all_mcmc_data['n_radius_grid']
all_jitter_index = all_mcmc_data['jitter_index']
n_jitter = all_mcmc_data['n_jitter']
except:
n_jitter = 0
for night in night_dict:
mcmc_data = load_from_cpickle(subroutine_name+'_data', config_in['output'], night, lines_label, it_string=it_string)
try:
# Building the arrays for the full analysis
all_clv_rm_grid = np.concatenate(
(all_clv_rm_grid, mcmc_data['clv_rm_grid']), axis=1)
all_transmission_spec = np.concatenate(
(all_transmission_spec, mcmc_data['transmission_spec']))
all_transmission_spec_err = np.concatenate(
(all_transmission_spec_err, mcmc_data['transmission_spec_err']))
all_wave_meshgrid = np.concatenate(
(all_wave_meshgrid, mcmc_data['wave_meshgrid']))
all_time_meshgrid = np.concatenate(
(all_time_meshgrid, mcmc_data['time_meshgrid']))
all_planet_RVsinusoid = np.concatenate(
(all_planet_RVsinusoid, mcmc_data['planet_RVsinusoid']))
all_observations = np.concatenate(
(all_observations, mcmc_data['observations']))
all_n_obs += mcmc_data['n_obs']
if jitter_flag:
all_jitter_index = np.concatenate(
(all_jitter_index, n_jitter*np.ones(np.shape(mcmc_data['wave_meshgrid']), dtype=np.int16)))
n_jitter += 1
except NameError:
""" This error is expected when retrieving the data of the first night"""
all_clv_rm_radius = mcmc_data['clv_rm_radius']
all_clv_rm_grid = mcmc_data['clv_rm_grid']
all_transmission_spec = mcmc_data['transmission_spec']
all_transmission_spec_err = mcmc_data['transmission_spec_err']
all_wave_meshgrid = mcmc_data['wave_meshgrid']
all_time_meshgrid = mcmc_data['time_meshgrid']
all_planet_RVsinusoid = mcmc_data['planet_RVsinusoid']
all_observations = mcmc_data['observations']
all_n_obs = mcmc_data['n_obs']
all_n_radius_grid = mcmc_data['n_radius_grid']
if jitter_flag:
all_jitter_index = n_jitter * \
np.ones(
np.shape(mcmc_data['wave_meshgrid']), dtype=np.int16)
n_jitter += 1
else:
all_jitter_index = None
all_mcmc_data = {
'observations': all_observations,
'clv_rm_grid': all_clv_rm_grid,
'transmission_spec': all_transmission_spec,
'transmission_spec_err': all_transmission_spec_err,
'wave_meshgrid': all_wave_meshgrid,
'time_meshgrid': all_time_meshgrid,
'planet_RVsinusoid': all_planet_RVsinusoid,
'clv_rm_radius': all_clv_rm_radius,
'n_obs': all_n_obs,
'n_radius_grid': all_n_radius_grid,
'jitter_index': all_jitter_index,
'n_jitter': n_jitter
}
save_to_cpickle(subroutine_name+'_data', all_mcmc_data,
config_in['output'], night='', lines=lines_label, it_string=it_string)
try:
results_dict = load_from_cpickle(subroutine_name+ '_'+ sampler_name+'_results',
config_in['output'], night='', lines=lines_label, it_string=it_string)
print(" Transmission MCMC analysis for lines {0:s} already performed ".format(
lines_label))
pams_dict = results_dict['pams_dict']
chain_med = results_dict['chain_med']
boundaries = results_dict['boundaries']
ndim = results_dict['ndim']
start_average = np.average(results_dict['point_start'], axis=0)
if 'derived' in results_dict:
recompute_derived = False
else:
recompute_derived = True
results_dict['derived'] = {}
# TODO improve output
print(' *** sampler output ')
for key, val in pams_dict.items():
print('{0:24s} {1:4d} {2:12f} {3:12f} {4:12f} (15-84 p) ([{5:9f}, {6:9f}]) (start: {7:9f})'.format(key, val,
chain_med[val,0],
chain_med[val,2],
chain_med[val,1],
boundaries[val, 0],
boundaries[val, 1],
start_average[val])
)
if recompute_derived and key[-8:]=='contrast':
key_name = key[:-8] + 'Rh'
sample_size = len(results_dict['flat_chain'][:,val])
planet_ratio_sample = np.random.normal(planet_dict['radius_ratio'][0],planet_dict['radius_ratio'][1],size=sample_size)
results_dict['derived'][key_name] = {}
results_dict['derived'][key_name]['flat_chain'] = np.sqrt(results_dict['flat_chain'][:,val]/planet_ratio_sample**2 + 1.)
results_dict['derived'][key_name]['chain_med'] = compute_value_sigma(results_dict['derived'][key_name]['flat_chain'])
# R(h) = np.sqrt(1+h/delta)
# print(key[-8:], key[:3])
print(' *** derived output ')
for key, val in results_dict['derived'].items():
chain_med = results_dict['derived'][key]['chain_med']
print('{0:24s} {1:12f} {2:12f} {3:12f} (15-84 p)'.format(key,
chain_med[0],
chain_med[2],
chain_med[1]))
except FileNotFoundError:
lines_center, pams_dict, pams_list, boundaries, theta_start = define_theta_array(
model_case, lines_dict, planet_dict, n_jitter, allow_emission=allow_emission)
ndim = len(theta_start)
ngen = sampler_pams.get('n_gen', 64000)
nwalkers_mult = sampler_pams.get('n_walkers_mult', 2)
nwalkers = sampler_pams.get('n_walkers', nwalkers_mult * ndim)
nthin = sampler_pams.get('n_thin', 50)
nsteps = sampler_pams.get('n_steps', 20000)
nburnin = sampler_pams.get('n_burnin', 10000)
ndata = np.size(all_wave_meshgrid)
if pams_dict.get('rp_factor', False):
pam_id = pams_dict['rp_factor']
boundaries[pam_id, :] = [clv_rm_radius[0], clv_rm_radius[-1]]
print()
print(' PyDE + emcee parameters')
print(' n_dim: {0:9.0f}'.format(ndim))
print(
' n_walkers: (default: 2*ndim) {0:9.0f}'.format(nwalkers))
print(' n_gen: (default: 64000) {0:9.0f}'.format(ngen))
print(' n_steps: (default: 20000) {0:9.0f}'.format(nsteps))
print(
' n_burnin: (default: 10000) {0:9.0f}'.format(nburnin))
print(' n_thin: (default: 50) {0:9.0f}'.format(nthin))
population, sampler_chain, sampler_lnprobability, point_start = emcee_lines_fit_functions(
model_case,
all_wave_meshgrid,
all_transmission_spec,
all_transmission_spec_err,
all_clv_rm_radius,
all_clv_rm_grid,
all_planet_RVsinusoid,
lines_center,
all_jitter_index,
prior_dict,
theta_start, boundaries, ndim, nwalkers, ngen, nsteps, nthin)
flat_chain, flat_lnprob, chain_med, chain_MAP, lnprob_med, lnprob_MAP = \
emcee_flatten_median(population, sampler_chain,
sampler_lnprobability, nburnin, nthin, nwalkers)
emcee_compute_BIC_AIC(lnprob_med, lnprob_MAP, ndata, ndim)
med_lines_model, med_clv_model, med_lines_array, med_planet_K, med_planet_R, med_jitter = \
return_model(model_case,
chain_med[:, 0],
all_wave_meshgrid,
all_clv_rm_radius,
all_clv_rm_grid,
all_planet_RVsinusoid,
lines_center,
all_jitter_index)
map_lines_model, map_clv_model, map_lines_array, map_planet_K, map_planet_R, map_jitter = \
return_model(model_case,
chain_MAP,
all_wave_meshgrid,
all_clv_rm_radius,
all_clv_rm_grid,
all_planet_RVsinusoid,
lines_center,
all_jitter_index)
results_dict = {
'sampler_name': sampler_name,
'ndim': ndim,
'nwalkers': nwalkers,
'nthin': nthin,
'nsteps': nsteps,
'nburnin': nburnin,
'ndata': ndata,
'pams_dict': pams_dict,
'population': population,
'sampler_chain': sampler_chain,
'sampler_lnprobability': sampler_lnprobability,
'theta_start': theta_start,
'boundaries': boundaries,
'flat_chain': flat_chain,
'flat_lnprob': flat_lnprob,
'chain_med': chain_med,
'chain_MAP': chain_MAP,
'lnprob_med': lnprob_med,
'lnprob_MAP': lnprob_MAP,
'lines_center': lines_center,
'point_start': point_start,
'theta_start': theta_start
#'BIC': BIC,
#'BIC_map': BIC_map
}
results_dict['results'] = {
'lines_model': med_lines_model,
'clv_model': med_clv_model,
'lines_array': med_lines_array,
'planet_K': med_planet_K,
'planet_R': med_planet_R,
'jitter': med_jitter
}
results_dict['results_MAP'] = {
'lines_model': map_lines_model,
'clv_model': map_clv_model,
'lines_array': map_lines_array,
'planet_K': map_planet_K,
'planet_R': map_planet_R,
'jitter': map_jitter
}
results_dict['results']['observational_pams'] = {}
results_dict['results_MAP']['observational_pams'] = {}
for night in night_dict:
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle(
'observational_pams', config_in['output'], night)
""" No differentiation by night """
for obs in lists['observations']:
results_dict['results']['observational_pams'][obs] = {}
results_dict['results_MAP']['observational_pams'][obs] = {}
""" RV shift from the observer RF to the planet RF
STRONG ASSUMPTIONS:
- there is only the transiting planet in the system
- the planet has null eccentricity
- linear approximation or the orbit near the transit event
Computation is performed by moving to the Solar Barycenter, than to the Stellar System Barycenter
and finally onto the planet
"""
results_dict['results']['observational_pams'][obs]['rv_shift_ORF2PRF'] = \
observational_pams[obs]['BERV'] \
- observational_pams['RV_star']['RV_systemic'] \
- results_dict['results']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
results_dict['results_MAP']['observational_pams'][obs]['rv_shift_ORF2PRF'] = \
observational_pams[obs]['BERV'] \
- observational_pams['RV_star']['RV_systemic'] \
- results_dict['results_MAP']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
""" RV shift from Stellar Rest Frame to Planetary Rest Frame
We have to take into account the RV of star relatively to the Barycenter
"""
results_dict['results']['observational_pams'][obs]['rv_shift_SRF2PRF'] = \
+ observational_pams[obs]['RV_bjdshift'] \
- results_dict['results']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
results_dict['results_MAP']['observational_pams'][obs]['rv_shift_SRF2PRF'] = \
+ observational_pams[obs]['RV_bjdshift'] \
- results_dict['results_MAP']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
start_average = np.average(results_dict['point_start'], axis=0)
results_dict['derived'] = {}
# TODO improve output
print(' *** sampler output ')
for key, val in pams_dict.items():
print('{0:24s} {1:4d} {2:12f} {3:12f} {4:12f} (15-84 p) ([{5:9f}, {6:9f}]) (start: {7:9f})'.format(key, val,
chain_med[val,0],
chain_med[val,2],
chain_med[val,1],
boundaries[val, 0],
boundaries[val, 1],
start_average[val])
)
if key[-8:]=='contrast':
key_name = key[:-8] + 'Rh'
sample_size = len(results_dict['flat_chain'][:,val])
print(sample_size)
planet_ratio_sample = np.random.normal(planet_dict['radius_ratio'][0],planet_dict['radius_ratio'][1],size=sample_size)
results_dict['derived'][key_name] = {}
results_dict['derived'][key_name]['flat_chain'] = np.sqrt(results_dict['flat_chain'][:,val]/planet_ratio_sample**2 + 1.)
results_dict['derived'][key_name]['chain_med'] = compute_value_sigma(results_dict['derived'][key_name]['flat_chain'])
print(' *** derived output ')
for key, val in results_dict['derived'].items():
chain_med = results_dict['derived'][key]['chain_med']
print('{0:24s} {1:12f} {2:12f} {3:12f} (15-84 p)'.format(key,
chain_med[0],
chain_med[2],
chain_med[1]))
save_to_cpickle(subroutine_name +'_'+sampler_name+'_results',
results_dict, config_in['output'], night='', lines=lines_label, it_string=it_string)
print('MCMC completed')
# Update planet parameters
# deprecated
# try:
# _ = load_from_cpickle(
# 'observational', config_in['output'], night, lines_label)
# print(" Transmission MCMC results for lines {0:s} already store in observational array".format(
# lines_label))
# except FileNotFoundError:
#
# results_full = load_from_cpickle('transmission_mcmc_'+sampler_name+'_results',
# config_in['output'], night='', lines=lines_label)
#
# for night in night_dict:
#
# results_night = load_from_cpickle('transmission_mcmc_'+sampler_name+'_results',
# config_in['output'], night=night, lines=lines_label)
# lists = load_from_cpickle('lists', config_in['output'], night)
# observational_pams = load_from_cpickle(
# 'observational_pams', config_in['output'], night)
# for obs in lists['observations']:
#
# """ RV shift from the observer RF to the planet RF
# STRONG ASSUMPTIONS:
# - there is only the transiting planet in the system
# - the planet has null eccentricity
# - linear approximation or the orbit near the transit event
#
# Computation is performed by moving to the Solar Barycenter, than to the Stellar System Barycenter
# and finally onto the planet
# """
# observational_pams[obs]['rv_shift_ORF2PRF'] = \
# observational_pams[obs]['BERV'] \
# - observational_pams['RV_star']['RV_systemic'] \
# - results_full['results']['planet_K'] \
# * (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
# / planet_dict['period'][0] * 2 * np.pi
# """ RV shift from Stellar Rest Frame to Planetary Rest Frame
# We have to take into account the RV of star relatively to the Barycenter
# """
# observational_pams[obs]['rv_shift_SRF2PRF'] = \
# + observational_pams[obs]['RV_bjdshift'] \
# - results_full['results']['planet_K'] \
# * (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
# / planet_dict['period'][0] * 2 * np.pi
# observational_pams['Rp_factor'] = results_full['results']['planet_R']
# observational_pams['lines_array'] = results_full['results']['lines_array']
# observational_pams['jitter'] = results_full['results']['jitter']
# save_to_cpickle('observational', observational_pams,
# config_in['output'], night, lines_label)
def plot_transmission_binned_mcmc(config_in, lines_label, night_input='', reference='planetRF', pca_iteration=-1):
night_dict = from_config_get_nights(config_in)
planet_dict = from_config_get_planet(config_in)
star_dict = from_config_get_star(config_in)
clv_rm_dict = from_config_get_clv_rm(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label]
sampler_pams = lines_dict['sampler_parameters']
sampler_name = sampler_pams.get('sampler_name', 'emcee')
if night_input == '':
night_list = ['']
else:
night_list = np.atleast_1d(night_input)
os.system('mkdir -p plots')
# Workaround to check if the transmission spectrum has been obtained through PCA iterations
for night in night_dict:
preparation_input = load_from_cpickle('transmission_preparation', config_in['output'], night)
if preparation_input.get('pca_output', False):
if pca_iteration >= 0:
it_string = str(pca_iteration).zfill(2)
else:
it_string = str(preparation_input.get('ref_iteration')).zfill(2)
else:
it_string = ''
preparation_input = None
break
for night in night_list:
results_dict = load_from_cpickle(subroutine_name+'_'+sampler_name+'_results', config_in['output'], night, lines_label, it_string)
print(" Transmission MCMC analysis for lines {0:s}, night: {1:s} already performed".format(
lines_label, night))
if night == '':
chains_dir = 'plots/mcmc_binned_chains_full/'
else:
chains_dir = 'plots/mcmc_binned_chains_' + night + '/'
os.system('mkdir -p ' + chains_dir)
pams_dict = results_dict['pams_dict']
chain_med = results_dict['chain_med']
lnprob_med = results_dict['lnprob_med']
boundaries = results_dict['boundaries']
flat_chain = results_dict['flat_chain']
flat_lnprob = results_dict['flat_lnprob']
nthin = results_dict['nthin']
nsteps = results_dict['nsteps']
nburnin = results_dict['nburnin']
sampler_chain = results_dict['sampler_chain']
start_average = np.average(results_dict['point_start'], axis=0)
ndim = results_dict['ndim']
med_lines_model = results_dict['results']['lines_model']
if 'derived' in results_dict:
recompute_derived = False
else:
recompute_derived = True
results_dict['derived'] = {}
# TODO improve output
print(' *** sampler output (plotting the chains)')
sample_size = np.size(flat_chain, axis=0)
dimen_size = np.size(flat_chain, axis=1)
corner_plot = {
'samples': np.zeros([sample_size, dimen_size + 1]),
'labels': [],
'truths': [],
'start': [],
}
i_corner = 0
for key, val in pams_dict.items():
print('{0:24s} {1:4d} {2:12f} {3:12f} {4:12f} (15-84 p) ([{5:9f}, {6:9f}]) (start: {7:9f})'.format(key, val,
chain_med[val,0],
chain_med[val,2],
chain_med[val,1],
boundaries[val, 0],
boundaries[val, 1],
start_average[val])
)
if recompute_derived and key[-8:]=='contrast':
key_name = key[:-8] + 'Rh'
planet_ratio_sample = np.random.normal(planet_dict['radius_ratio'][0],planet_dict['radius_ratio'][1],size=sample_size)
results_dict['derived'][key_name] = {}
results_dict['derived'][key_name]['flat_chain'] = np.sqrt(results_dict['flat_chain'][:,val]/planet_ratio_sample**2 + 1.)
results_dict['derived'][key_name]['chain_med'] = compute_value_sigma(results_dict['derived'][key_name]['flat_chain'])
corner_plot['samples'][:, i_corner] = flat_chain[:, val]
corner_plot['labels'].append(re.sub('_', ' ', key))
corner_plot['truths'].append(chain_med[val, 0])
corner_plot['start'].append(start_average[val])
i_corner += 1
file_name = chains_dir + repr(val) + '.png'
fig = plt.figure(figsize=(12, 12))
plt.title(key)
plt.plot(sampler_chain[:, :, val].T, '-', alpha=0.5)
plt.axvline(nburnin / nthin, c='r')
plt.savefig(file_name, bbox_inches='tight', dpi=300)
plt.close(fig)
corner_plot['samples'][:, -1] = flat_lnprob[:]
corner_plot['labels'].append('ln-prob')
corner_plot['truths'].append(lnprob_med[0])
corner_plot['start'].append(None)
# R(h) = np.sqrt(1+h/delta)
# print(key[-8:], key[:3])
print(' *** derived output ')
for key, val in results_dict['derived'].items():
chain_med = results_dict['derived'][key]['chain_med']
print('{0:24s} {1:12f} {2:12f} {3:12f} (15-84 p)'.format(key,
chain_med[0],
chain_med[2],
chain_med[1]))
print(' *** corner plot using pyGTC output ')
filename_rad = subroutine_name + '_' + reference + '_cornerplot'
output_file = get_filename(filename_rad, config_in['output'], night=night, lines=lines_label, it_string=it_string, extension='.pdf')
print(' *** filename: ', output_file)
try:
GTC = pygtc.plotGTC(chains=corner_plot['samples'],
paramNames=corner_plot['labels'],
truths=[corner_plot['truths'],corner_plot['start']],
GaussianConfLevels=True,
nConfidenceLevels=3,
figureSize=12,
labelRotation= (True,True),
plotName='plots/'+output_file)
except:
GTC = pygtc.plotGTC(chains=corner_plot['samples'],
paramNames=corner_plot['labels'],
truths=[corner_plot['truths'],corner_plot['start']],
GaussianConfLevels=True,
nConfidenceLevels=2,
figureSize=12,
labelRotation= (True,True),
plotName='plots/'+output_file)
GTC = None
continue
def plot_transmission_binned_mcmc_deprecated(config_in, lines_label, night_input=''):
night_dict = from_config_get_nights(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label]
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the analysis"""
try:
clv_rm_corrected = load_from_cpickle(subroutine_name, config_in['output'], night)
mcmc_data = load_from_cpickle(subroutine_name + '_data', config_in['output'], night, lines_label)
clv_rm_radius = mcmc_data['clv_rm_radius']
clv_rm_grid = mcmc_data['clv_rm_grid']
transmission_spec = mcmc_data['transmission_spec']
transmission_spec_err = mcmc_data['transmission_spec_err']
wave_meshgrid = mcmc_data['wave_meshgrid']
time_meshgrid = mcmc_data['time_meshgrid']
planet_RVsinusoid = mcmc_data['planet_RVsinusoid']
except:
print("No transmission spectrum results, no plots")
print()
continue
""" Creation of the color array, based on the BJD of the observations
"""
bjd = []
am = []
for obs in lists['observations']:
bjd.append(clv_rm_corrected[obs]['BJD'] - 2450000.0)
am.append(clv_rm_corrected[obs]['AIRMASS'])
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
cmap = plt.get_cmap('coolwarm')
plot_data = transmission_spec.copy()
from SLOPpy.subroutines.math_functions import interpolate2d_grid_nocheck
plot_data = interpolate2d_grid_nocheck(1.000, clv_rm_radius, clv_rm_grid)
#clv_model = interpolate2d_grid_nocheck(1.000, clv_rm_radius, clv_rm_grid)
vmin = plot_data.min()
vmax = plot_data.max()
levels = MaxNLocator(nbins=15).tick_values(vmin, vmax)
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
plt.figure(figsize=(15, 10))
PCF = plt.contourf(wave_meshgrid, time_meshgrid, plot_data, levels=levels, cmap=cmap)
cbar = plt.colorbar(PCF)
cbar.ax.set_ylabel('Intensity')
plt.show()
levels = MaxNLocator(nbins=15).tick_values(vmin, vmax)
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
plt.figure(figsize=(15, 10))
PCM = plt.pcolormesh(wave_meshgrid, time_meshgrid, plot_data,
vmin=vmin, vmax=vmax, cmap=cmap)
cbar = plt.colorbar(PCM)
cbar.ax.set_ylabel('Intensity')
plt.show()
plot_data = transmission_spec.copy()
plot_data = transmission_spec.copy()
#clv_model = interpolate2d_grid_nocheck(1.000, clv_rm_radius, clv_rm_grid)
vmin = plot_data.min()
vmax = plot_data.max()
levels = MaxNLocator(nbins=15).tick_values(vmin, vmax)
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
plt.figure(figsize=(15, 10))
PCF = plt.contourf(wave_meshgrid, time_meshgrid, plot_data, levels=levels, cmap=cmap)
cbar = plt.colorbar(PCF)
cbar.ax.set_ylabel('Intensity')
plt.show()
levels = MaxNLocator(nbins=15).tick_values(vmin, vmax)
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
plt.figure(figsize=(15, 10))
PCM = plt.pcolormesh(wave_meshgrid, time_meshgrid, plot_data,
vmin=vmin, vmax=vmax, cmap=cmap)
cbar = plt.colorbar(PCM)
cbar.ax.set_ylabel('Intensity')
plt.show()
| 77,242 | 45.475933 | 141 | py |
SLOPpy | SLOPpy-main/SLOPpy/interstellar_lines.bkp.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_interstellar_lines", "plot_interstellar_lines"]
subroutine_name = 'interstellar_lines'
#def plot_identify_stellar_lines(config_in)
def compute_interstellar_lines(config_in):
night_dict = from_config_get_nights(config_in)
interstellar_lines = from_config_get_interstellar_lines(config_in)
if not interstellar_lines:
return
for night in night_dict:
print()
print("compute_interstellar_lines Night: ", night)
try:
interstellar = load_from_cpickle('interstellar_lines', config_in['output'], night)
continue
except:
print()
print(" No interstellar correction file found, computing now ")
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': subroutine_name
}
interstellar = {
'subroutine': subroutine_name,
}
import matplotlib.pyplot as plt
for obs in lists['observations']:
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
interstellar[obs] = {}
""" for plotting purpose only"""
processed[obs]['wave'] = input_data[obs]['wave']
processed[obs]['flux'] = input_data[obs]['e2ds']/calib_data['blaze']/input_data[obs]['step']
processed[obs]['flux_err'] = np.sqrt(input_data[obs]['e2ds'])/calib_data['blaze']/input_data[obs]['step']
if obs in lists['telluric']:
try:
interstellar['flux_total'] += processed[obs]['flux']
interstellar['flux_total_err'] += processed[obs]['flux_err']**2
except:
interstellar['wave'] = input_data[obs]['wave']
interstellar['flux_total'] = processed[obs]['flux'][:,:]
interstellar['flux_total_err'] = processed[obs]['flux_err']**2
interstellar['flux_total_err'] = np.sqrt(interstellar['flux_total_err'])
""" Zero or negative values are identified, flagged and substituted with another value """
interstellar['flux_total'], interstellar['flux_total_err'], interstellar['null'] = \
replace_values_errors(interstellar['flux_total'], interstellar['flux_total_err'], 0.0001)
"""rescaling"""
interstellar['flux_rescaling'], interstellar['flux_rescaled'],interstellar['flux_rescaled_err'] = \
perform_rescaling(interstellar['wave'],
interstellar['flux_total'],
interstellar['flux_total_err'],
observational_pams['wavelength_rescaling'])
interstellar['correction'] = np.ones(np.shape(interstellar['wave']))
for line_name, line in interstellar_lines.items():
interstellar[line_name] = {}
sel1 = (np.abs(interstellar['wave']-line[0])<line[1])
sel2 = (~sel1) & (np.abs(interstellar['wave']-line[0])<line[2])
sel3 = (sel1 | sel2)
poly_coeff = np.polyfit(interstellar['wave'][sel2], interstellar['flux_rescaled'][sel2], 2)
normalized = interstellar['flux_rescaled'][sel3]/np.polyval(poly_coeff, interstellar['wave'][sel3])
interstellar[line_name]['spline_eval'], \
interstellar[line_name]['spline_coeff'], \
interstellar[line_name]['spline_knots'] = \
compute_spline(interstellar['wave'][sel3], normalized, 0.04)
interstellar['correction'][sel1] = sci_int.splev(interstellar['wave'][sel1], interstellar[line_name]['spline_coeff'])
interstellar[line_name]['sel1'] = sel1
interstellar[line_name]['sel2'] = sel2
interstellar[line_name]['sel3'] = sel3
save_to_cpickle('interstellar_lines_processed', processed, config_in['output'], night)
save_to_cpickle('interstellar_lines', interstellar, config_in['output'], night)
def plot_interstellar_lines(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
interstellar_lines = from_config_get_interstellar_lines(config_in)
if not interstellar_lines:
return
if night_input=='':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
print("plot_interstellar_lines Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('interstellar_lines_processed', config_in['output'], night)
interstellar = load_from_cpickle('interstellar_lines', config_in['output'], night)
except:
print()
print('No interstellar correction, no plots')
continue
colors, cmap, line_colors = make_color_array(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
"""rescaling"""
processed[obs]['flux_rescaling'], processed[obs]['flux_rescaled'], processed[obs]['flux_rescaled_err'] = \
perform_rescaling(interstellar['wave'],
processed[obs]['flux'],
processed[obs]['flux_err'],
observational_pams['wavelength_rescaling'])
ax1.scatter(interstellar['wave'], processed[obs]['flux_rescaled'],
s=1, c=line_colors[i])
#ax1.plot(interstellar['wave'], interstellar['correction'], c='black')
ax2.scatter(interstellar['wave'], processed[obs]['flux_rescaled']/interstellar['correction'],
s=1, c=line_colors[i])
for line_name, line in interstellar_lines.items():
#ax1.axvline(line[0], c='k')
ax1.axvline(line[0]-line[1], c='b')
ax1.axvline(line[0]+line[1], c='b')
ax1.axvline(line[0]-line[2], c='g')
ax1.axvline(line[0]+line[2], c='g')
#ax2.axvline(line[0], c='k')
ax2.axvline(line[0]-line[1], c='b')
ax2.axvline(line[0]+line[1], c='b')
ax2.axvline(line[0]-line[2], c='g')
ax2.axvline(line[0]+line[2], c='g')
try:
wave_min = min(wave_min, line[0])
wave_max = max(wave_max, line[0])
range_max = max(range_max, line[2])
except:
wave_min = line[0]
wave_max = line[0]
range_max = line[2]
#ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax1.set_xlim(wave_min-2*range_max, wave_max+2*range_max)
ax2.set_xlabel('$\lambda$ [$\AA$]')
ax1.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show() | 8,408 | 38.478873 | 129 | py |
SLOPpy | SLOPpy-main/SLOPpy/transmission_spectrum_average.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = [
'compute_transmission_spectrum_average',
'plot_transmission_spectrum_average'
]
subroutine_name = 'transmission_spectrum_average'
pick_files = 'transmission_spectrum'
sampler = 'emcee'
def compute_transmission_spectrum_average(config_in, lines_label, reference='planetRF', pca_iteration=-1):
night_dict = from_config_get_nights(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
line_iter_dict = spectral_lines[lines_label]
shared_data = load_from_cpickle('shared', config_in['output'])
total_n_transit_full = 0
total_n_transit_out = 0
total_lists = {}
""" Using the user defined range to define the transmission spectrum region
This range can be larger than the one defined for the MCMC range, and it
MUST include the continuum windws for the transmission lightcurve
"""
shared_selection = (shared_data['coadd']['wave'] >= line_iter_dict['range'][0]) \
& (shared_data['coadd']['wave'] < line_iter_dict['range'][1])
binned_selection = (shared_data['binned']['wave'] >= line_iter_dict['range'][0]) \
& (shared_data['binned']['wave'] < line_iter_dict['range'][1])
transmission_average_template = {
'subroutine': subroutine_name + '_' + reference,
'range': line_iter_dict['range'],
'wave': shared_data['coadd']['wave'][shared_selection],
'step': shared_data['coadd']['step'][shared_selection],
'size': np.int(np.sum(shared_selection)),
'binned_wave': shared_data['binned']['wave'][binned_selection],
'binned_step': shared_data['binned']['step'][binned_selection],
'binned_size': np.int(np.sum(binned_selection))
}
results_list = ['user',
'mcmc_night_MED',
'mcmc_night_MAP',
'mcmc_global_MED',
'mcmc_global_MAP']
for results_selection in results_list:
""" First check to see if we need to compute the average transmission
iteratively when PCA has been employed """
for night in night_dict:
preparation_input = load_from_cpickle('transmission_preparation', config_in['output'], night)
if preparation_input.get('pca_output', False):
if pca_iteration >= 0:
it_string = str(pca_iteration).zfill(2)
else:
it_string = str(pca_parameters.get('ref_iteration')).zfill(2)
else:
it_string = ''
preparation = None
break
transmission_average = transmission_average_template.copy()
try:
transmission_average = load_from_cpickle(subroutine_name + '_' + reference + '_' + results_selection, config_in['output'], lines=lines_label, it_string=it_string)
print("{0:45s} {1:s} {2:s}".format(subroutine_name + '_' + reference, results_selection, 'Retrieved'))
continue
except (FileNotFoundError, IOError):
skip_iteration = False
#print("{0:45s} {1:s} {2:s}".format(subroutine_name + '_' + reference, results_selection, 'Computing'))
#skip_iteration = False
for night in night_dict:
# """ Retrieving the list of observations"""
total_lists[night] = load_from_cpickle('lists', config_in['output'], night)
#try:
# transmission_average[night] = load_from_cpickle('transmission_second_telluric_'+reference, config_in['output'], night)
# print(" Using transmission spectra with second telluric correction for Night: {0:s}".format(night))
#except:
# transmission_average[night] = load_from_cpickle('transmission_'+reference, config_in['output'], night)
try:
transmission_average[night] = load_from_cpickle(pick_files + '_' + reference + '_' + results_selection, config_in['output'], night, lines_label, it_string)
except:
skip_iteration = True
print(skip_iteration)
total_n_transit_full += len(total_lists[night]['transit_full'])
total_n_transit_out += len(total_lists[night]['transit_out'])
if skip_iteration: continue
print("{0:45s} {1:s} {2:s}".format(subroutine_name + '_' + reference, results_selection, 'Computing'))
array_average_in = np.zeros([total_n_transit_full, transmission_average['size']])
weights_average_in = np.zeros([total_n_transit_full, transmission_average['size']])
clvrm_average_in = np.zeros([total_n_transit_full, transmission_average['size']])
uncorr_average_in = np.zeros([total_n_transit_full, transmission_average['size']])
i_total_in = 0
array_average_out = np.zeros([total_n_transit_out, transmission_average['size']])
weights_average_out = np.zeros([total_n_transit_out, transmission_average['size']])
i_total_out = 0
for night in night_dict:
for obs in total_lists[night]['transit_full']:
array_average_in[i_total_in, :] = transmission_average[night][obs]['normalized'][:]
weights_average_in[i_total_in, :] = 1./(transmission_average[night][obs]['normalized_err']**2.)
clvrm_average_in[i_total_in, :] = transmission_average[night][obs]['clv_model_rebinned'][:]
uncorr_average_in[i_total_in, :] = transmission_average[night][obs]['normalized_uncorrected'][:]
i_total_in += 1
for obs in total_lists[night]['transit_out']:
array_average_out[i_total_out, :] = transmission_average[night][obs]['normalized'][:]
weights_average_out[i_total_out, :] = 1. / (transmission_average[night][obs]['normalized_err'] ** 2.)
i_total_out += 1
transmission_average['average'], transmission_average['sum_weights'] = np.average(
array_average_in, axis = 0, weights = weights_average_in, returned = True)
transmission_average['average_err'] = 1./np.sqrt(transmission_average['sum_weights'])
transmission_average['average_clv_model'], _ = np.average(
clvrm_average_in, axis = 0, weights = weights_average_in, returned = True)
transmission_average['average_uncorrected'], _ = np.average(
uncorr_average_in, axis = 0, weights = weights_average_in, returned = True)
transmission_average['binned'] = \
rebin_1d_to_1d(transmission_average['wave'],
transmission_average['step'],
transmission_average['average'],
transmission_average['binned_wave'],
transmission_average['binned_step'],
preserve_flux=False)
transmission_average['binned_err'] = \
rebin_1d_to_1d(transmission_average['wave'],
transmission_average['step'],
transmission_average['average_err'],
transmission_average['binned_wave'],
transmission_average['binned_step'],
preserve_flux=False,
is_error=True)
transmission_average['binned_clv_model'] = \
rebin_1d_to_1d(transmission_average['wave'],
transmission_average['step'],
transmission_average['average_clv_model'],
transmission_average['binned_wave'],
transmission_average['binned_step'],
preserve_flux=False)
transmission_average['binned_uncorrected'] = \
rebin_1d_to_1d(transmission_average['wave'],
transmission_average['step'],
transmission_average['average_uncorrected'],
transmission_average['binned_wave'],
transmission_average['binned_step'],
preserve_flux=False)
transmission_average['average_out'], transmission_average['sum_weights_out'] = np.average(
array_average_out, axis=0, weights=weights_average_out, returned=True)
transmission_average['average_out_err'] = 1./np.sqrt(transmission_average['sum_weights_out'])
transmission_average['binned_out'] = \
rebin_1d_to_1d(transmission_average['wave'],
transmission_average['step'],
transmission_average['average_out'],
transmission_average['binned_wave'],
transmission_average['binned_step'],
preserve_flux=False)
transmission_average['binned_out_err'] = \
rebin_1d_to_1d(transmission_average['wave'],
transmission_average['step'],
transmission_average['average_out_err'],
transmission_average['binned_wave'],
transmission_average['binned_step'],
preserve_flux=False,
is_error=True)
save_to_cpickle(subroutine_name + '_' + reference + '_' + results_selection, transmission_average, config_in['output'], lines=lines_label, it_string=it_string)
def plot_transmission_spectrum_average(config_in, lines_label, night_input='', results_input='', reference='planetRF', pca_iteration=-1):
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label]
if results_input=='':
results_list = ['user',
'mcmc_night_MED',
'mcmc_night_MAP',
'mcmc_global_MED',
'mcmc_global_MAP']
else:
results_list = np.atleast_1d(results_input)
os.system('mkdir -p plots')
interactive_plots = from_config_get_interactive_plots(config_in)
# Workaround to check if the transmission spectrum has been obtained through PCA iterations
night_dict = from_config_get_nights(config_in)
for night in night_dict:
preparation_input = load_from_cpickle('transmission_preparation', config_in['output'], night)
if preparation_input.get('pca_output', False):
if pca_iteration >= 0:
it_string = str(pca_iteration).zfill(2)
else:
it_string = str(preparation_input.get('ref_iteration')).zfill(2)
else:
it_string = ''
preparation_input = None
break
for results_selection in results_list:
try:
transmission_average = load_from_cpickle(subroutine_name + '_' + reference + '_' + results_selection, config_in['output'], lines=lines_label, it_string=it_string)
print("{0:45s} {1:s} {2:s}".format(subroutine_name + '_' + reference, results_selection, 'Plotting'))
except (FileNotFoundError, IOError):
print("{0:45s} {1:s}".format(subroutine_name + '_' + reference, 'Plot skipped'))
return
filename_rad = subroutine_name + '_' + reference + '_' + results_selection
fig = plt.figure(figsize=(12, 9))
gs = GridSpec(2, 1)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
spec_offset = 0.025
ax1.errorbar(transmission_average['wave'],
transmission_average['average'],
yerr=transmission_average['average_err'],
fmt='ko', ms=1, zorder=10, alpha=0.10, label='average')
#ax1.scatter(transmission_average['wave'],
# transmission_average['average'],
# c='black',
# s=2, zorder=15,
# label='average',
# )
ax1.errorbar(transmission_average['binned_wave'],
transmission_average['binned'],
yerr=transmission_average['binned_err'],
fmt='ko', ms=3, zorder=20, label='binned')
#ax1.scatter(transmission_average['wave'],
# transmission_average['average'],
# c='black',
# s=2, zorder=200,
# label='average',
# )
ax2.errorbar(transmission_average['binned_wave'],
transmission_average['binned_out'],
yerr=transmission_average['binned_out_err'],
fmt='ko', ms=3, zorder=20, label='binned out')
ax2.errorbar(transmission_average['wave'],
transmission_average['average_out'],
yerr=transmission_average['average_out_err'],
fmt='ko', ms=1, zorder=10, alpha=0.10, label='average out')
for n_night, night in enumerate(night_dict):
ax1.errorbar(transmission_average['wave'],
transmission_average[night]['average']-spec_offset*(1.+n_night),
yerr=transmission_average[night]['average_err'],
color='C'+repr(n_night),
fmt='o', ms=1, zorder=1, alpha=0.25)
ax1.scatter(transmission_average['wave'],
transmission_average[night]['average']-spec_offset*(1.+n_night),
c='C'+repr(n_night),
s=2, zorder=2,
label=night,
)
ax2.errorbar(transmission_average['wave'],
transmission_average[night]['average_out']-spec_offset*(1.+n_night),
yerr=transmission_average[night]['average_out_err'],
color='C'+repr(n_night),
fmt='o', ms=1, zorder=1, alpha=0.25)
ax2.scatter(transmission_average['wave'],
transmission_average[night]['average_out']-spec_offset*(1.+n_night),
c='C'+repr(n_night),
s=2, zorder=2)
#ax1.set_ylim(0.95-spec_offset*(1.+n_night), 1.05)
#ax1.set_xlim(config_in['master-out']['wavelength_range'][0], config_in['master-out']['wavelength_range'][1])
try:
ax1.set_xlim(lines_dict['plot_range'][0], lines_dict['plot_range'][1])
except:
ax1.set_xlim(lines_dict['range'][0], lines_dict['range'][1])
ax1.set_ylim(0.985, 1.01)
ax2.set_xlabel('$\lambda$ [$\AA$]')
ax1.legend(loc=3)
ax1.set_title('Average in-transit transmission spectrum in {0:s}'.format(reference))
ax2.set_title('Average out-transit transmission spectrum in {0:s}'.format(reference))
output_file = get_filename(filename_rad + '_binned', config_in['output'], night='', lines=lines_label, extension='.pdf', it_string=it_string)
plt.savefig('plots/'+output_file, bbox_inches='tight', dpi=300)
if interactive_plots:
plt.show()
plt.close()
| 15,311 | 44.981982 | 174 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_molecfit.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_telluric_molecfit",
"plot_telluric_molecfit"]
def compute_telluric_molecfit(config_in):
"""
Lazy workaround
:param config_in:
:param kwargs:
:return:
"""
print('UNTESTED PRROCEDUE - I QUIT')
quit()
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
for night in night_dict:
instrument_name = night_dict[night]['instrument']
template_dict = instrument_dict[instrument_name]['telluric_template']
print()
print("compute_telluric_molecfit Night: ", night)
print()
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
continue
except:
print(" No telluric correction file found, computing now ")
print()
print(' instrument :', instrument_name)
print(' template :', template_dict['file'])
print(' fit_range :', template_dict['fit_range'])
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': 'telluric_molecfit',
'n_orders': 0,
'n_pixels': 0,
}
telluric = {
'subroutine': 'telluric_molecfit',
'reference_frame': 'observer'
}
processed['airmass_ref'] = 0.000
processed['telluric'] = {}
processed['rebin'] = {}
"""
Molecfit works on pixel grid, so we must ensure that the spectra are rebinned always on the same wavelength
scale and same wavelength step. We use local arrays for this purpose
"""
rebin_step_unit = 0.01000000
processed['rebin']['wave'] = np.arange(input_data['coadd']['wavelength_range'][0],
input_data['coadd']['wavelength_range'][1],
rebin_step_unit,
dtype=np.double)
processed['rebin']['size'] = np.size(processed['rebin']['wave'])
processed['rebin']['step'] = np.ones(processed['rebin']['size'], dtype=np.double) * rebin_step_unit
processed['rebin'] = {
'wave': input_data['coadd']['wave'],
'size': input_data['coadd']['size'],
'step': input_data['coadd']['step'],
}
print(' Writing data and configuration files for molecfit+calctrans')
print()
"""
We store all the molecfit files in a subdirectory
We save the path of the main directory to a temporary file
"""
os.system('mkdir -p molecfit_'+night)
os.system('mkdir -p molecfit_'+night + '/output/')
# There must be a more elegant way to do this, but I'm, not aware of it
for n_obs, obs in enumerate(lists['observations']):
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
""" e2ds spectra are rescaled and then rebinned while keeping them in the Observer Reference Frame"""
processed[obs]['e2ds_rescaling'], processed[obs]['e2ds_rescaled'], processed[obs]['e2ds_rescaled_err'] = \
perform_rescaling(input_data[obs]['wave'],
input_data[obs]['e2ds'],
input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['rebin_ORF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
processed[obs]['e2ds_rescaled'],
calib_data['blaze'],
processed['rebin']['wave'],
processed['rebin']['step'],
preserve_flux=preserve_flux,
rv_shift=0.00)
""" Molecfit analysis is skipped if the telluric computation has been computed already"""
if os.path.isfile('./molecfit_'+night +'/output/'+obs+'_ORF_s1d_TAC.dat'):
print(' molecfit+calctrans results for ' + obs + ' already available')
continue
""" the spectra is save onto an ASCII file in a format suitable for molecfit """
fileout = open('./molecfit_'+night +'/'+obs+'_ORF_s1d.dat', 'w')
for w, f in zip(processed['rebin']['wave'], processed[obs]['rebin_ORF']):
fileout.write('{0:12.6f} {1:12.6f} \n'.format(w, f))
fileout.close()
"""
processed[obs]['rebin_SRF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
processed[obs]['e2ds_rescaled'],
calib_data['blaze'],
processed['rebin']['wave'],
processed['rebin']['step'],
preserve_flux=preserve_flux,
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF'])
fileout = open('./molecfit_'+night +'/'+obs+'_SRF_s1d.dat','w')
for w, f in zip(processed['rebin']['wave'], processed[obs]['rebin_SRF']):
fileout.write('{0:12.6f} {1:12.6f} \n'.format(w, f))
fileout.close()
"""
# TODO: input from configuration file for molecfit installation path
bash_script = open('./molecfit_'+night +'/molecfit_exec_' + obs + '.source', 'w')
bash_script.write('#!/bin/bash \n')
bash_script.write('echo " " executing molecfit+calctrans on '+obs+' \n')
bash_script.write('/usr/local/eso/bin/molecfit '+obs+'.par > ' + obs +'_molecfit.log\n')
bash_script.write('/usr/local/eso/bin/calctrans '+obs+'.par > ' + obs +'_calctrans.log\n')
bash_script.close()
fileout = open('./molecfit_'+night +'/'+obs+'.par', 'w')
fileout.write("### Driver for MOLECFIT\n")
# user working directory only important for REFLEX workflow and GUI
# not used by molecfit itself.
fileout.write("user_workdir:./\n")
## INPUT DATA
# Data file name (path relative to the current directory or absolute path)
fileout.write("filename: " + obs +"_ORF_s1d.dat\n")
# ASCII list of files to be corrected for telluric absorption using the
# transmission curve derived from the input reference file (path of list and
# listed files relative to the current directory or absolute path; default: "none")
fileout.write("listname: none\n")
# Type of input spectrum -- 1 = transmission (default); 0 = emission
fileout.write("trans: 1\n")
# Names of the file columns (table) or extensions (image) containing:
# Wavelength Flux Flux_Err Mask
# - Flux_Err and/or Mask can be avoided by writing 'NULL'
# - 'NULL' is required for Wavelength if it is given by header keywords
# - parameter list: col_lam, col_flux, col_dflux, and col_mask
fileout.write("columns: Wavelength Flux NULL NULL\n")
# Default error relative to mean for the case that the error column is missing
fileout.write("default_error: 0.001\n")
# Multiplicative factor to convert wavelength to micron
# (e.g. nm -> wlgtomicron = 1e-3)
fileout.write("wlgtomicron: 0.0001\n")
# Wavelengths in vacuum (= vac) or air (= air)
fileout.write("vac_air: air\n")
# TODO: input from configuration file for molecfit installation path
# ASCII or FITS table for wavelength ranges in micron to be fitted
# (path relative to the current directory or absolute path; default: "none")
fileout.write("wrange_include: include_"+night+".dat\n")
# ASCII or FITS table for wavelength ranges in micron to be excluded from the
# fit (path relative to the current directory or absolute path; default: "none")
# wrange_exclude: /Users/malavolta/Astro/ExoAtmospheres/molecfit_test//HIP63901_exclude_w.dat
# ASCII or FITS table for pixel ranges to be excluded from the fit
# (path relative to the current directory or absolute path; default: "none")
# prange_exclude: /Users/malavolta/Astro/ExoAtmospheres/molecfit_test//HIP63901_exclude_p.dat
## RESULTS
# Directory for output files (path relative to the current directory or absolute path)
fileout.write("output_dir:./\n")
# Name for output files
# (supplemented by "_fit" or "_tac" as well as ".asc", ".atm", ".fits",
# ".par, ".ps", and ".res")
fileout.write("output_name: "+ obs + "\n")
# Plot creation: gnuplot is used to create control plots
# W - screen output only (incorporating wxt terminal in gnuplot)
# X - screen output only (incorporating x11 terminal in gnuplot)
# P - postscript file labelled '<output_name>.ps', stored in <output_dir>
# combinations possible, i.e. WP, WX, XP, WXP (however, keep the order!)
# all other input: no plot creation is performed
fileout.write("plot_creation: none\n")
# Create plots for individual fit ranges? -- 1 = yes; 0 = no
fileout.write("plot_range: 0\n")
## FIT PRECISION
# Relative chi2 convergence criterion
fileout.write("ftol: " + input_data[obs]['molecfit']['ftol'] + "\n")
# Relative parameter convergence criterion
fileout.write("xtol: " + input_data[obs]['molecfit']['xtol'] + "\n")
## MOLECULAR COLUMNS
# List of molecules to be included in the model
# (default: 'H2O', N_val: nmolec)
molecules_list = "list_molec:"
for mol in input_data[obs]['molecfit']['molecules']:
molecules_list += " " + mol
fileout.write(molecules_list + "\n")
# Fit flags for molecules -- 1 = yes; 0 = no (N_val: nmolec)
fileout.write("fit_molec: 1 1\n")
# Values of molecular columns, expressed relatively to the input ATM profile
# columns (N_val: nmolec) [1 = 100%]
fileout.write("relcol: 1.0 1.0\n")
## BACKGROUND AND CONTINUUM
# Conversion of fluxes from phot/(s*m2*mum*as2) (emission spectrum only) to
# flux unit of observed spectrum:
# 0: phot/(s*m^2*mum*as^2) [no conversion]
# 1: W/(m^2*mum*as^2)
# 2: erg/(s*cm^2*A*as^2)
# 3: mJy/as^2
# For other units the conversion factor has to be considered as constant term
# of the continuum fit.
fileout.write("flux_unit: 0\n")
# Fit of telescope background -- 1 = yes; 0 = no (emission spectrum only)
fileout.write("fit_back: 0\n")
# Initial value for telescope background fit (range: [0,1])
fileout.write("telback: 0.1\n")
# Polynomial fit of continuum --> degree: cont_n
fileout.write("fit_cont: 1\n")
# Degree of coefficients for continuum fit
fileout.write("cont_n: {0:1.0f}".format(input_data[obs]['molecfit']['cont_n']) + "\n")
# Initial constant term for continuum fit (valid for all fit ranges)
# (emission spectrum: about 1 for correct flux_unit)
fileout.write("cont_const: {0:1.0f}".format(input_data[obs]['molecfit']['cont_const']) + "\n")
## WAVELENGTH SOLUTION
# Refinement of wavelength solution using a polynomial of degree wlc_n
fileout.write("fit_wlc: 1\n")
# Polynomial degree of the refined wavelength solution
fileout.write("wlc_n: {0:1.0f}".format(input_data[obs]['molecfit']['wlc_n']) + "\n")
# Initial constant term for wavelength correction (shift relative to half
# wavelength range)
fileout.write("wlc_const: {0:1.0f}".format(input_data[obs]['molecfit']['wlc_const']) + "\n")
## RESOLUTION
# Fit resolution by boxcar -- 1 = yes; 0 = no
fileout.write("fit_res_box: 0\n")
# Initial value for FWHM of boxcar relative to slit width (>= 0. and <= 2.)
fileout.write("relres_box: 0.0\n")
# Voigt profile approximation instead of independent Gaussian and Lorentzian
# kernels? -- 1 = yes; 0 = no
fileout.write("kernmode: 0\n")
# Fit resolution by Gaussian -- 1 = yes; 0 = no
fileout.write("fit_res_gauss: 1\n")
# Initial value for FWHM of Gaussian in pixels
fileout.write("res_gauss: {0:3.1f}".format(input_data[obs]['molecfit']['res_gauss']) + "\n")
# Fit resolution by Lorentzian -- 1 = yes; 0 = no
fileout.write("fit_res_lorentz: 0\n")
# Initial value for FWHM of Lorentzian in pixels
fileout.write("res_lorentz: 0.0\n")
# Size of Gaussian/Lorentzian/Voigtian kernel in FWHM
fileout.write("kernfac: {0:3.0f}".format(input_data[obs]['molecfit']['kernfac']) + "\n")
# Variable kernel (linear increase with wavelength)? -- 1 = yes; 0 = no
fileout.write("varkern: 0\n")
# ASCII file for kernel elements (one per line; normalisation not required)
# instead of synthetic kernel consisting of boxcar, Gaussian, and Lorentzian
# components (path relative to the current directory or absolute path; default: "none\n")
fileout.write("kernel_file: none\n")
## AMBIENT PARAMETERS
# If the input data file contains a suitable FITS header, the keyword names of
# the following parameters will be read, but the corresponding values will not
# be used. The reading of parameter values from this file can be forced by
# setting keywords to NONE.
# Observing date in years or MJD in days
fileout.write("obsdate: {0:13.5f}".format(input_data[obs]['MJD']) + "\n")
fileout.write("obsdate_key: NONE\n")
# UTC in s
fileout.write("utc: {0:8.1f}".format(input_data[obs]['UTC']) + "\n")
fileout.write("utc_key: NONE\n")
# Telescope altitude angle in deg
fileout.write("telalt: {0:13.5f}".format(input_data[obs]['ELEVATION']) + "\n")
fileout.write("telalt_key: NONE\n")
# Humidity in %
fileout.write("rhum: {0:13.5f}".format(input_data[obs]['HUMIDITY']) + "\n")
fileout.write("rhum_key: NONE\n")
# Pressure in hPa
fileout.write("pres: {0:5.1f}".format(input_data[obs]['PRESSURE']) + "\n")
fileout.write("pres_key: NONE\n")
# Ambient temperature in deg C
# temp: 15.0
fileout.write("temp: {0:4.1f}".format(input_data[obs]['TEMPERATURE_EN']) + "\n")
fileout.write("temp_key: NONE\n")
# Mirror temperature in deg C
# m1temp: 15.0
fileout.write("m1temp: {0:4.1f}".format(input_data[obs]['TEMPERATURE_M1']) + "\n")
fileout.write("m1temp_key: NONE\n")
# Elevation above sea level in m (default is Paranal: 2635m)
# geoelev: 2387.2
fileout.write("geoelev: {0:4.0f}".format(input_data[obs]['GEOELEV']) + "\n")
fileout.write("geoelev_key: NONE\n")
# Longitude (default is Paranal: -70.4051)
# longitude: -17.889
fileout.write("longitude: {0:9.4f}".format(input_data[obs]['GEOLONG']) + "\n")
fileout.write("longitude_key: NONE\n")
# Latitude (default is Paranal: -24.6276)
# latitude: 28.754
fileout.write("latitude: {0:9.4f}".format(input_data[obs]['GEOLAT']) + "\n")
fileout.write("latitude_key: NONE\n")
## INSTRUMENTAL PARAMETERS
# Slit width in arcsec (taken from FITS header if present)
fileout.write("slitw: {0:3.1f}".format(input_data[obs]['molecfit']['slitwidth']) + "\n")
fileout.write("slitw_key: NONE\n")
# Pixel scale in arcsec (taken from this file only)
fileout.write("pixsc: {0:4.2f}".format(input_data[obs]['molecfit']["pixelscale"]) + "\n")
fileout.write("pixsc_key: NONE\n")
## ATMOSPHERIC PROFILES
# Reference atmospheric profile
fileout.write("ref_atm: equ.atm\n")
# Specific GDAS-like input profile (P[hPa] HGT[m] T[K] RELHUM[%]) (path
# relative to the installation directory or absolute path). In the case of "none", no GDAS
# profiles will be considered. The default "auto" performs an automatic
# retrieval.
fileout.write("gdas_dir: data/profiles/grib\n")
fileout.write("gdas_prof: auto\n")
# Grid of layer heights for merging ref_atm and GDAS profile. Fixed grid = 1
# (default) and natural grid = 0.
fileout.write("layers: 0\n")
# Upper mixing height in km (default: 5) for considering data of a local meteo
# station. If emix is below geoelev, rhum, pres, and temp are not used for
# modifying the corresponding profiles.
fileout.write("emix: 5.0\n")
# PWV value in mm for the input water vapour profile. The merged profile
# composed of ref_atm, GDAS, and local meteo data will be scaled to this value
# if pwv > 0 (default: -1 -> no scaling).
fileout.write("pwv: -1.\n")
# internal GUI specific parameter
fileout.write("clean_mflux: 1\n")
fileout.write("end\n")
fileout.close()
os.system('cd molecfit_' + night + '/ && . ./molecfit_exec_' + obs + '.source')
print()
print(' molecfit+calcatrans completed')
for n_obs, obs in enumerate(lists['observations']):
telluric[obs] = {}
""" Loading the telluric spectrum from the output directory of molecfit """
telluric_molecfit = np.genfromtxt('./molecfit_'+night +'/output/'+obs+'_ORF_s1d_TAC.dat', usecols=2)
""" rebinning onto the e2ds wave scale"""
telluric[obs]['spectrum'] = \
rebin_1d_to_2d(processed['rebin']['wave'],
processed['rebin']['step'],
telluric_molecfit,
input_data[obs]['wave'],
input_data[obs]['step'],
preserve_flux=False)
try:
telluric[obs]['spectrum'] = np.nan_to_num(nan=1.0, posinf=1.0, neginf=1.0)
except:
temp = np.isfinite(telluric[obs]['spectrum'])
telluric[obs]['spectrum'][temp] = 1.0
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
" for compatibilty to some plots, even if it doesn't make any sense"
telluric[obs]['airmass_ref'] = 0.000
telluric[obs]['spectrum_noairmass'] = np.power(telluric[obs]['spectrum'],
telluric[obs]['airmass_ref'] - input_data[obs]['AIRMASS'])
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][telluric[obs]['null']] = 1.0
# we just copy the spectrum file, it's it's a model itself
telluric[obs]['spline'] = telluric[obs]['spectrum'].copy()
processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_rescaled'] / telluric[obs]['spectrum']
processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_rescaled_err'] / telluric[obs]['spectrum']
save_to_cpickle('telluric', telluric, config_in['output'], night)
save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
#
#""" After being rescaled for the proper factor, the template telluric spectrum is rebinned onto the 2D
#scale of the observations """
#
#telluric['template']['rebinned']['flux'] = \
# rebin_1d_to_2d(telluric['template']['input']['wave'],
# telluric['template']['input']['step'],
# telluric['template']['input']['flux'],
# telluric['template']['rebinned']['wave'],
# telluric['template']['rebinned']['step'],
# preserve_flux=False)
#
#telluric['template']['rebinned']['ferr'] = \
# rebin_1d_to_2d(telluric['template']['input']['wave'],
# telluric['template']['input']['step'],
# telluric['template']['input']['ferr'],
# telluric['template']['rebinned']['wave'],
# telluric['template']['rebinned']['step'],
# preserve_flux=False,
# is_error=True)
#
#
#sel_out_of_range = ~((telluric['template']['rebinned']['wave'] > telluric['template']['input']['range'][0]+1.) \
# & (telluric['template']['rebinned']['wave'] < telluric['template']['input']['range'][1]-1.))
#telluric['template']['rebinned']['flux'][sel_out_of_range] = 1.
#telluric['template']['rebinned']['ferr'][sel_out_of_range] = 0.1
#
#processed['telluric']['spectrum_noairmass'] = \
# (telluric['template']['rebinned']['flux'] - 1.) * telluric_factor + 1.0
#
#telluric['airmass_ref'] = processed['airmass_ref']
#
#for obs in lists['observations']:
# """ Correction of telluric lines for the average airmass value, following Wyttenbach et al. 2015 """
# processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_rescaled'] / \
# np.power(processed['telluric']['spectrum_noairmass'],
# input_data[obs]['AIRMASS'] - processed['airmass_ref'])
# processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_rescaled_err'] / \
# np.power(processed['telluric']['spectrum_noairmass'],
# input_data[obs]['AIRMASS'] - processed['airmass_ref'])
#
#for obs in lists['observations']:
# # Correction of telluric lines
#
# telluric[obs] = {}
#
# telluric[obs]['spectrum_noairmass'] = processed['telluric']['spectrum_noairmass']
#
# telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
# telluric[obs]['airmass_ref'] = processed['airmass_ref']
#
# """ Set anomalosly low point to one (e.g. when the template is not computed)"""
# telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
# telluric[obs]['spectrum_noairmass'][telluric[obs]['null']] = 1.0
#
# telluric[obs]['spectrum'] = np.power(processed['telluric']['spectrum_noairmass'],
# input_data[obs]['AIRMASS'] - processed['airmass_ref'])
#
# telluric[obs]['spline_noairmass'] = telluric[obs]['spectrum_noairmass'].copy()
#
# """ No need to compute the spline approximation since we are already dealing with a very high SNR template"""
# telluric[obs]['spline'] = np.power(telluric[obs]['spline_noairmass'],
# input_data[obs]['AIRMASS'] - processed['airmass_ref'])
#
# """ copy the keyword for future use"""
# telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
#
# telluric[obs]['telluric_corrected'] = processed[obs]['e2ds_corrected']
# telluric[obs]['telluric_corrected_err'] = processed[obs]['e2ds_corrected_err']
#
# save_to_cpickle('telluric', telluric, config_in['output'], night)
# save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
quit()
def plot_telluric_molecfit(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
#plt.scatter(rescaling_array, computed_std, c='C0', zorder=1)
#plt.scatter(sel_factor, sel_stdev, c='C1', zorder=2)
#plt.plot(rescaling_array, np.polyval(coeff, rescaling_array))
#plt.plot(rescaling_array, 2*rescaling_array*coeff[0] + coeff[1] )
#plt.plot()
print("plot_telluric_template Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('telluric_processed', config_in['output'], night)
telluric = load_from_cpickle('telluric', config_in['output'], night)
except:
print()
print("No telluric correction, no plots")
continue
colors, cmap, line_colors = make_color_array(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
cbax1 = plt.subplot(gs[:, 1])
lift_spectrum = 0.25
for i, obs in enumerate(lists['observations']):
color_array = cmap(i / len(lists['observations']))
_, e2ds_rescaled , _ = \
perform_rescaling(processed[obs]['wave'],
processed[obs]['e2ds'],
processed[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
e2ds_rescaled_corrected_spectrum = e2ds_rescaled / telluric[obs]['spectrum']
e2ds_rescaled_corrected_spline = e2ds_rescaled / telluric[obs]['spline']
for order in range(0, processed[obs]['n_orders']):
if order == 0 and i==0:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5, label='uncorrected')
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array), label='corrected')
else:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5)
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array))
#ax1.plot(processed[obs]['wave'][order, :],
# e2ds_rescaled[order, :]+lift_spectrum,
# c=color_array, lw=1, alpha=0.5)
#ax1.scatter(processed[obs]['wave'][order, :],
# e2ds_rescaled_corrected_spline[order, :]+lift_spectrum,
# s=1, c=np.atleast_2d(color_array))
ax2.plot(processed[obs]['wave'][order, :],
telluric[obs]['spectrum'][order, :],
c=color_array)
ax2.axhline(1.00, c='k')
#ax2.plot(processed[obs]['wave'][order, :],
# telluric[obs]['spline'][order, :]+lift_spectrum,
# c=color_array)
#ax2.axhline(1.00+lift_spectrum, c='k')
#ax2.plot(input_data['coadd']['wave'],telluric['stellarRF']['spline_eval']+0.1,c='k')
#ax2.scatter(input_data['coadd']['wave'],telluric['stellarRF']['spectrum']+0.1,c='r', s=2)
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
try:
instrument = night_dict[night]['instrument']
comparison_file = config_in['instruments'][instrument]['telluric_comparison']
comparison_data = np.genfromtxt(comparison_file, skip_header=1)
if comparison_data[0,0]<1000.0:
nm2Ang = 10.
else:
nm2Ang = 1.
ax1.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
ax2.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
except:
pass
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show() | 30,915 | 44.801481 | 122 | py |
SLOPpy | SLOPpy-main/SLOPpy/quick_transmission.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.plot_subroutines import *
from scipy.interpolate import UnivariateSpline
__all__ = ['compute_quick_transmission']
def compute_quick_transmission(config_in, lines_label):
subroutine_name = 'quick_transmission'
night_dict = from_config_get_nights(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label]
shared_data = load_from_cpickle('shared', config_in['output'])
shared_selection = (shared_data['coadd']['wave'] >= lines_dict['range'][0]) \
& (shared_data['coadd']['wave'] < lines_dict['range'][1])
binned_selection = (shared_data['binned']['wave'] >= lines_dict['range'][0]) \
& (shared_data['binned']['wave'] < lines_dict['range'][1])
transmission_shared= {
'subroutine': subroutine_name,
'binned_wave': shared_data['binned']['wave'][binned_selection],
'binned_step': shared_data['binned']['step'][binned_selection],
'binned_size': np.int(np.sum(binned_selection))
}
import matplotlib.pyplot as plt
for night in night_dict:
#try:
# preparation = load_from_cpickle('transmission_preparation',
# config_in['output'],
# night)
# print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
# continue
#except:
# print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
# print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
if config_in['master-out'].get('use_composite', False):
master_out = load_from_cpickle('master_out_composite', config_in['output'], night)
print(' Using composite master-out from all nights')
else:
master_out = load_from_cpickle('master_out', config_in['output'], night)
if config_in['master-out'].get('use_smoothed', False):
master_out['rescaled'] = master_out['smoothed']
master_out['rescaled_err'] = master_out['smoothed_err']
print(' Using smoothed master-out')
quick_transmission = {
'subroutine': subroutine_name,
}
first_obs = lists['observations'][0]
quick_transmission['n_pixels'] = input_data[first_obs]['n_pixels']
quick_transmission['n_orders'] = input_data[first_obs]['n_orders']
quick_transmission['wave'] = input_data[first_obs]['wave']
quick_transmission['step'] = input_data[first_obs]['step']
master_raw = np.zeros([quick_transmission['n_orders'], quick_transmission['n_pixels']])
for obs in lists['transit_out']:
master_raw += input_data[obs]['e2ds']
quick_transmission['master_raw'] = master_raw.copy()
quick_transmission['master'] = master_raw / np.nanmedian(master_raw)
blaze = np.ones([quick_transmission['n_orders'], quick_transmission['n_pixels']])
for obs in lists['observations']:
quick_transmission[obs] = {}
transmission_raw = input_data[obs]['e2ds'] / quick_transmission['master']
quick_transmission[obs]['transmission_raw'] = transmission_raw.copy()
quick_transmission[obs]['transmission'] = transmission_raw / np.nanmedian(transmission_raw)
quick_transmission[obs]['wave'] = input_data[obs]['wave'] #Added for plotting purpose only
quick_transmission[obs]['binned'] = \
rebin_2d_to_1d(quick_transmission['wave'],
quick_transmission['step'],
quick_transmission[obs]['transmission'],
blaze,
transmission_shared['binned_wave'],
transmission_shared['binned_step'],
rv_shift=0,
preserve_flux=False)
#plt.scatter(quick_transmission['wave'],
# quick_transmission[obs]['transmission'],
# c='C1', s=1, zorder=3, alpha=0.25)
for obs in lists['transit_out']:
plt.scatter(transmission_shared['binned_wave'],
quick_transmission[obs]['binned'],
c='b', s=1, zorder=10, alpha=0.5)
for obs in lists['transit_full']:
plt.scatter(transmission_shared['binned_wave'],
quick_transmission[obs]['binned']-0.04,
c='r', s=1, zorder=10, alpha=0.5)
plt.xlim(transmission_shared['binned_wave'][0], transmission_shared['binned_wave'][-1])
plt.show()
print()
""" Keep going from here after preparation, unless the subroutines has been called just
to preform the data preparation step
"""
| 5,497 | 40.651515 | 103 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_molecfit_v1.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_telluric_molecfit_v1",
"plot_telluric_molecfit_v1"]
def compute_telluric_molecfit_v1(config_in):
"""
Lazy workaround
:param config_in:
:param kwargs:
:return:
"""
print('UNTESTED PRROCEDUE - I QUIT')
quit()
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
for night in night_dict:
instrument_name = night_dict[night]['instrument']
template_dict = instrument_dict[instrument_name]['telluric_template']
print()
print("compute_telluric_molecfit Night: ", night)
print()
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
continue
except:
print(" No telluric correction file found, computing now ")
print()
print(' instrument :', instrument_name)
print(' template :', template_dict['file'])
print(' fit_range :', template_dict['fit_range'])
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': 'telluric_molecfit',
'n_orders': 0,
'n_pixels': 0,
}
telluric = {
'subroutine': 'telluric_molecfit',
'reference_frame': 'observer'
}
processed['airmass_ref'] = 0.000
processed['telluric'] = {}
processed['rebin'] = {}
"""
Molecfit works on pixel grid, so we must ensure that the spectra are rebinned always on the same wavelength
scale and same wavelength step. We use local arrays for this purpose
"""
rebin_step_unit = 0.01000000
processed['rebin']['wave'] = np.arange(input_data['coadd']['wavelength_range'][0],
input_data['coadd']['wavelength_range'][1],
rebin_step_unit,
dtype=np.double)
processed['rebin']['size'] = np.size(processed['rebin']['wave'])
processed['rebin']['step'] = np.ones(processed['rebin']['size'], dtype=np.double) * rebin_step_unit
processed['rebin'] = {
'wave': input_data['coadd']['wave'],
'size': input_data['coadd']['size'],
'step': input_data['coadd']['step'],
}
print(' Writing data and configuration files for molecfit+calctrans')
print()
"""
We store all the molecfit files in a subdirectory
We save the path of the main directory to a temporary file
"""
os.system('mkdir -p molecfit_'+night)
os.system('mkdir -p molecfit_'+night + '/output/')
# There must be a more elegant way to do this, but I'm, not aware of it
for n_obs, obs in enumerate(lists['observations']):
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
""" e2ds spectra are rescaled and then rebinned while keeping them in the Observer Reference Frame"""
processed[obs]['e2ds_rescaling'], processed[obs]['e2ds_rescaled'], processed[obs]['e2ds_rescaled_err'] = \
perform_rescaling(input_data[obs]['wave'],
input_data[obs]['e2ds'],
input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['rebin_ORF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
processed[obs]['e2ds_rescaled'],
calib_data['blaze'],
processed['rebin']['wave'],
processed['rebin']['step'],
preserve_flux=preserve_flux,
rv_shift=0.00)
""" Molecfit analysis is skipped if the telluric computation has been computed already"""
if os.path.isfile('./molecfit_'+night +'/output/'+obs+'_ORF_s1d_TAC.dat'):
print(' molecfit+calctrans results for ' + obs + ' already available')
continue
""" the spectra is save onto an ASCII file in a format suitable for molecfit """
fileout = open('./molecfit_'+night +'/'+obs+'_ORF_s1d.dat', 'w')
for w, f in zip(processed['rebin']['wave'], processed[obs]['rebin_ORF']):
fileout.write('{0:12.6f} {1:12.6f} \n'.format(w, f))
fileout.close()
"""
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['rebin_SRF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
processed[obs]['e2ds_rescaled'],
calib_data['blaze'],
processed['rebin']['wave'],
processed['rebin']['step'],
preserve_flux=preserve_flux,
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF'])
fileout = open('./molecfit_'+night +'/'+obs+'_SRF_s1d.dat','w')
for w, f in zip(processed['rebin']['wave'], processed[obs]['rebin_SRF']):
fileout.write('{0:12.6f} {1:12.6f} \n'.format(w, f))
fileout.close()
"""
# TODO: input from configuration file for molecfit installation path
bash_script = open('./molecfit_'+night +'/molecfit_exec_' + obs + '.source', 'w')
bash_script.write('#!/bin/bash \n')
bash_script.write('echo " " executing molecfit+calctrans on '+obs+' \n')
bash_script.write('/usr/local/eso/bin/molecfit '+obs+'.par > ' + obs +'_molecfit.log\n')
bash_script.write('/usr/local/eso/bin/calctrans '+obs+'.par > ' + obs +'_calctrans.log\n')
bash_script.close()
fileout = open('./molecfit_'+night +'/'+obs+'.par', 'w')
fileout.write("### Driver for MOLECFIT\n")
# user working directory only important for REFLEX workflow and GUI
# not used by molecfit itself.
fileout.write("user_workdir:./\n")
## INPUT DATA
# Data file name (path relative to the current directory or absolute path)
fileout.write("filename: " + obs +"_ORF_s1d.dat\n")
# ASCII list of files to be corrected for telluric absorption using the
# transmission curve derived from the input reference file (path of list and
# listed files relative to the current directory or absolute path; default: "none")
fileout.write("listname: none\n")
# Type of input spectrum -- 1 = transmission (default); 0 = emission
fileout.write("trans: 1\n")
# Names of the file columns (table) or extensions (image) containing:
# Wavelength Flux Flux_Err Mask
# - Flux_Err and/or Mask can be avoided by writing 'NULL'
# - 'NULL' is required for Wavelength if it is given by header keywords
# - parameter list: col_lam, col_flux, col_dflux, and col_mask
fileout.write("columns: Wavelength Flux NULL NULL\n")
# Default error relative to mean for the case that the error column is missing
fileout.write("default_error: 0.001\n")
# Multiplicative factor to convert wavelength to micron
# (e.g. nm -> wlgtomicron = 1e-3)
fileout.write("wlgtomicron: 0.0001\n")
# Wavelengths in vacuum (= vac) or air (= air)
fileout.write("vac_air: air\n")
# TODO: input from configuration file for molecfit installation path
# ASCII or FITS table for wavelength ranges in micron to be fitted
# (path relative to the current directory or absolute path; default: "none")
fileout.write("wrange_include: include_"+night+".dat\n")
# ASCII or FITS table for wavelength ranges in micron to be excluded from the
# fit (path relative to the current directory or absolute path; default: "none")
# wrange_exclude: /Users/malavolta/Astro/ExoAtmospheres/molecfit_test//HIP63901_exclude_w.dat
# ASCII or FITS table for pixel ranges to be excluded from the fit
# (path relative to the current directory or absolute path; default: "none")
# prange_exclude: /Users/malavolta/Astro/ExoAtmospheres/molecfit_test//HIP63901_exclude_p.dat
## RESULTS
# Directory for output files (path relative to the current directory or absolute path)
fileout.write("output_dir:./\n")
# Name for output files
# (supplemented by "_fit" or "_tac" as well as ".asc", ".atm", ".fits",
# ".par, ".ps", and ".res")
fileout.write("output_name: "+ obs + "\n")
# Plot creation: gnuplot is used to create control plots
# W - screen output only (incorporating wxt terminal in gnuplot)
# X - screen output only (incorporating x11 terminal in gnuplot)
# P - postscript file labelled '<output_name>.ps', stored in <output_dir>
# combinations possible, i.e. WP, WX, XP, WXP (however, keep the order!)
# all other input: no plot creation is performed
fileout.write("plot_creation: none\n")
# Create plots for individual fit ranges? -- 1 = yes; 0 = no
fileout.write("plot_range: 0\n")
## FIT PRECISION
# Relative chi2 convergence criterion
fileout.write("ftol: " + input_data[obs]['molecfit']['ftol'] + "\n")
# Relative parameter convergence criterion
fileout.write("xtol: " + input_data[obs]['molecfit']['xtol'] + "\n")
## MOLECULAR COLUMNS
# List of molecules to be included in the model
# (default: 'H2O', N_val: nmolec)
molecules_list = "list_molec:"
for mol in input_data[obs]['molecfit']['molecules']:
molecules_list += " " + mol
fileout.write(molecules_list + "\n")
# Fit flags for molecules -- 1 = yes; 0 = no (N_val: nmolec)
fileout.write("fit_molec: 1 1\n")
# Values of molecular columns, expressed relatively to the input ATM profile
# columns (N_val: nmolec) [1 = 100%]
fileout.write("relcol: 1.0 1.0\n")
## BACKGROUND AND CONTINUUM
# Conversion of fluxes from phot/(s*m2*mum*as2) (emission spectrum only) to
# flux unit of observed spectrum:
# 0: phot/(s*m^2*mum*as^2) [no conversion]
# 1: W/(m^2*mum*as^2)
# 2: erg/(s*cm^2*A*as^2)
# 3: mJy/as^2
# For other units the conversion factor has to be considered as constant term
# of the continuum fit.
fileout.write("flux_unit: 0\n")
# Fit of telescope background -- 1 = yes; 0 = no (emission spectrum only)
fileout.write("fit_back: 0\n")
# Initial value for telescope background fit (range: [0,1])
fileout.write("telback: 0.1\n")
# Polynomial fit of continuum --> degree: cont_n
fileout.write("fit_cont: 1\n")
# Degree of coefficients for continuum fit
fileout.write("cont_n: {0:1.0f}".format(input_data[obs]['molecfit']['cont_n']) + "\n")
# Initial constant term for continuum fit (valid for all fit ranges)
# (emission spectrum: about 1 for correct flux_unit)
fileout.write("cont_const: {0:1.0f}".format(input_data[obs]['molecfit']['cont_const']) + "\n")
## WAVELENGTH SOLUTION
# Refinement of wavelength solution using a polynomial of degree wlc_n
fileout.write("fit_wlc: 1\n")
# Polynomial degree of the refined wavelength solution
fileout.write("wlc_n: {0:1.0f}".format(input_data[obs]['molecfit']['wlc_n']) + "\n")
# Initial constant term for wavelength correction (shift relative to half
# wavelength range)
fileout.write("wlc_const: {0:1.0f}".format(input_data[obs]['molecfit']['wlc_const']) + "\n")
## RESOLUTION
# Fit resolution by boxcar -- 1 = yes; 0 = no
fileout.write("fit_res_box: 0\n")
# Initial value for FWHM of boxcar relative to slit width (>= 0. and <= 2.)
fileout.write("relres_box: 0.0\n")
# Voigt profile approximation instead of independent Gaussian and Lorentzian
# kernels? -- 1 = yes; 0 = no
fileout.write("kernmode: 0\n")
# Fit resolution by Gaussian -- 1 = yes; 0 = no
fileout.write("fit_res_gauss: 1\n")
# Initial value for FWHM of Gaussian in pixels
fileout.write("res_gauss: {0:3.1f}".format(input_data[obs]['molecfit']['res_gauss']) + "\n")
# Fit resolution by Lorentzian -- 1 = yes; 0 = no
fileout.write("fit_res_lorentz: 0\n")
# Initial value for FWHM of Lorentzian in pixels
fileout.write("res_lorentz: 0.0\n")
# Size of Gaussian/Lorentzian/Voigtian kernel in FWHM
fileout.write("kernfac: {0:3.0f}".format(input_data[obs]['molecfit']['kernfac']) + "\n")
# Variable kernel (linear increase with wavelength)? -- 1 = yes; 0 = no
fileout.write("varkern: 0\n")
# ASCII file for kernel elements (one per line; normalisation not required)
# instead of synthetic kernel consisting of boxcar, Gaussian, and Lorentzian
# components (path relative to the current directory or absolute path; default: "none\n")
fileout.write("kernel_file: none\n")
## AMBIENT PARAMETERS
# If the input data file contains a suitable FITS header, the keyword names of
# the following parameters will be read, but the corresponding values will not
# be used. The reading of parameter values from this file can be forced by
# setting keywords to NONE.
# Observing date in years or MJD in days
fileout.write("obsdate: {0:13.5f}".format(input_data[obs]['MJD']) + "\n")
fileout.write("obsdate_key: NONE\n")
# UTC in s
fileout.write("utc: {0:8.1f}".format(input_data[obs]['UTC']) + "\n")
fileout.write("utc_key: NONE\n")
# Telescope altitude angle in deg
fileout.write("telalt: {0:13.5f}".format(input_data[obs]['ELEVATION']) + "\n")
fileout.write("telalt_key: NONE\n")
# Humidity in %
fileout.write("rhum: {0:13.5f}".format(input_data[obs]['HUMIDITY']) + "\n")
fileout.write("rhum_key: NONE\n")
# Pressure in hPa
fileout.write("pres: {0:5.1f}".format(input_data[obs]['PRESSURE']) + "\n")
fileout.write("pres_key: NONE\n")
# Ambient temperature in deg C
# temp: 15.0
fileout.write("temp: {0:4.1f}".format(input_data[obs]['TEMPERATURE_EN']) + "\n")
fileout.write("temp_key: NONE\n")
# Mirror temperature in deg C
# m1temp: 15.0
fileout.write("m1temp: {0:4.1f}".format(input_data[obs]['TEMPERATURE_M1']) + "\n")
fileout.write("m1temp_key: NONE\n")
# Elevation above sea level in m (default is Paranal: 2635m)
# geoelev: 2387.2
fileout.write("geoelev: {0:4.0f}".format(input_data[obs]['GEOELEV']) + "\n")
fileout.write("geoelev_key: NONE\n")
# Longitude (default is Paranal: -70.4051)
# longitude: -17.889
fileout.write("longitude: {0:9.4f}".format(input_data[obs]['GEOLONG']) + "\n")
fileout.write("longitude_key: NONE\n")
# Latitude (default is Paranal: -24.6276)
# latitude: 28.754
fileout.write("latitude: {0:9.4f}".format(input_data[obs]['GEOLAT']) + "\n")
fileout.write("latitude_key: NONE\n")
## INSTRUMENTAL PARAMETERS
# Slit width in arcsec (taken from FITS header if present)
fileout.write("slitw: {0:3.1f}".format(input_data[obs]['molecfit']['slitwidth']) + "\n")
fileout.write("slitw_key: NONE\n")
# Pixel scale in arcsec (taken from this file only)
fileout.write("pixsc: {0:4.2f}".format(input_data[obs]['molecfit']["pixelscale"]) + "\n")
fileout.write("pixsc_key: NONE\n")
## ATMOSPHERIC PROFILES
# Reference atmospheric profile
fileout.write("ref_atm: equ.atm\n")
# Specific GDAS-like input profile (P[hPa] HGT[m] T[K] RELHUM[%]) (path
# relative to the installation directory or absolute path). In the case of "none", no GDAS
# profiles will be considered. The default "auto" performs an automatic
# retrieval.
fileout.write("gdas_dir: data/profiles/grib\n")
fileout.write("gdas_prof: auto\n")
# Grid of layer heights for merging ref_atm and GDAS profile. Fixed grid = 1
# (default) and natural grid = 0.
fileout.write("layers: 0\n")
# Upper mixing height in km (default: 5) for considering data of a local meteo
# station. If emix is below geoelev, rhum, pres, and temp are not used for
# modifying the corresponding profiles.
fileout.write("emix: 5.0\n")
# PWV value in mm for the input water vapour profile. The merged profile
# composed of ref_atm, GDAS, and local meteo data will be scaled to this value
# if pwv > 0 (default: -1 -> no scaling).
fileout.write("pwv: -1.\n")
# internal GUI specific parameter
fileout.write("clean_mflux: 1\n")
fileout.write("end\n")
fileout.close()
os.system('cd molecfit_' + night + '/ && . ./molecfit_exec_' + obs + '.source')
print()
print(' molecfit+calcatrans completed')
for n_obs, obs in enumerate(lists['observations']):
telluric[obs] = {}
""" Loading the telluric spectrum from the output directory of molecfit """
telluric_molecfit = np.genfromtxt('./molecfit_'+night +'/output/'+obs+'_ORF_s1d_TAC.dat', usecols=2)
""" rebinning onto the e2ds wave scale"""
preserve_flux = input_data[obs].get('absolute_flux', True)
telluric[obs]['spectrum'] = \
rebin_1d_to_2d(processed['rebin']['wave'],
processed['rebin']['step'],
telluric_molecfit,
input_data[obs]['wave'],
input_data[obs]['step'],
preserve_flux=False)
try:
telluric[obs]['spectrum'] = np.nan_to_num(nan=1.0, posinf=1.0, neginf=1.0)
except:
temp = np.isfinite(telluric[obs]['spectrum'])
telluric[obs]['spectrum'][temp] = 1.0
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
" for compatibilty to some plots, even if it doesn't make any sense"
telluric[obs]['airmass_ref'] = 0.000
telluric[obs]['spectrum_noairmass'] = np.power(telluric[obs]['spectrum'],
telluric[obs]['airmass_ref'] - input_data[obs]['AIRMASS'])
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][telluric[obs]['null']] = 1.0
# we just copy the spectrum file, it's it's a model itself
telluric[obs]['spline'] = telluric[obs]['spectrum'].copy()
processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_rescaled'] / telluric[obs]['spectrum']
processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_rescaled_err'] / telluric[obs]['spectrum']
save_to_cpickle('telluric', telluric, config_in['output'], night)
save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
#
#""" After being rescaled for the proper factor, the template telluric spectrum is rebinned onto the 2D
#scale of the observations """
#
#telluric['template']['rebinned']['flux'] = \
# rebin_1d_to_2d(telluric['template']['input']['wave'],
# telluric['template']['input']['step'],
# telluric['template']['input']['flux'],
# telluric['template']['rebinned']['wave'],
# telluric['template']['rebinned']['step'],
# preserve_flux=False)
#
#telluric['template']['rebinned']['ferr'] = \
# rebin_1d_to_2d(telluric['template']['input']['wave'],
# telluric['template']['input']['step'],
# telluric['template']['input']['ferr'],
# telluric['template']['rebinned']['wave'],
# telluric['template']['rebinned']['step'],
# preserve_flux=False,
# is_error=True)
#
#
#sel_out_of_range = ~((telluric['template']['rebinned']['wave'] > telluric['template']['input']['range'][0]+1.) \
# & (telluric['template']['rebinned']['wave'] < telluric['template']['input']['range'][1]-1.))
#telluric['template']['rebinned']['flux'][sel_out_of_range] = 1.
#telluric['template']['rebinned']['ferr'][sel_out_of_range] = 0.1
#
#processed['telluric']['spectrum_noairmass'] = \
# (telluric['template']['rebinned']['flux'] - 1.) * telluric_factor + 1.0
#
#telluric['airmass_ref'] = processed['airmass_ref']
#
#for obs in lists['observations']:
# """ Correction of telluric lines for the average airmass value, following Wyttenbach et al. 2015 """
# processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_rescaled'] / \
# np.power(processed['telluric']['spectrum_noairmass'],
# input_data[obs]['AIRMASS'] - processed['airmass_ref'])
# processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_rescaled_err'] / \
# np.power(processed['telluric']['spectrum_noairmass'],
# input_data[obs]['AIRMASS'] - processed['airmass_ref'])
#
#for obs in lists['observations']:
# # Correction of telluric lines
#
# telluric[obs] = {}
#
# telluric[obs]['spectrum_noairmass'] = processed['telluric']['spectrum_noairmass']
#
# telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
# telluric[obs]['airmass_ref'] = processed['airmass_ref']
#
# """ Set anomalosly low point to one (e.g. when the template is not computed)"""
# telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
# telluric[obs]['spectrum_noairmass'][telluric[obs]['null']] = 1.0
#
# telluric[obs]['spectrum'] = np.power(processed['telluric']['spectrum_noairmass'],
# input_data[obs]['AIRMASS'] - processed['airmass_ref'])
#
# telluric[obs]['spline_noairmass'] = telluric[obs]['spectrum_noairmass'].copy()
#
# """ No need to compute the spline approximation since we are already dealing with a very high SNR template"""
# telluric[obs]['spline'] = np.power(telluric[obs]['spline_noairmass'],
# input_data[obs]['AIRMASS'] - processed['airmass_ref'])
#
# """ copy the keyword for future use"""
# telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
#
# telluric[obs]['telluric_corrected'] = processed[obs]['e2ds_corrected']
# telluric[obs]['telluric_corrected_err'] = processed[obs]['e2ds_corrected_err']
#
# save_to_cpickle('telluric', telluric, config_in['output'], night)
# save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
quit()
def plot_telluric_molecfit_v1(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
#plt.scatter(rescaling_array, computed_std, c='C0', zorder=1)
#plt.scatter(sel_factor, sel_stdev, c='C1', zorder=2)
#plt.plot(rescaling_array, np.polyval(coeff, rescaling_array))
#plt.plot(rescaling_array, 2*rescaling_array*coeff[0] + coeff[1] )
#plt.plot()
print("plot_telluric_template Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('telluric_processed', config_in['output'], night)
telluric = load_from_cpickle('telluric', config_in['output'], night)
except:
print()
print("No telluric correction, no plots")
continue
colors, cmap, line_colors = make_color_array(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
cbax1 = plt.subplot(gs[:, 1])
lift_spectrum = 0.25
for i, obs in enumerate(lists['observations']):
color_array = cmap(i / len(lists['observations']))
_, e2ds_rescaled , _ = \
perform_rescaling(processed[obs]['wave'],
processed[obs]['e2ds'],
processed[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
e2ds_rescaled_corrected_spectrum = e2ds_rescaled / telluric[obs]['spectrum']
e2ds_rescaled_corrected_spline = e2ds_rescaled / telluric[obs]['spline']
for order in range(0, processed[obs]['n_orders']):
if order == 0 and i==0:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5, label='uncorrected')
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array), label='corrected')
else:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5)
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array))
#ax1.plot(processed[obs]['wave'][order, :],
# e2ds_rescaled[order, :]+lift_spectrum,
# c=color_array, lw=1, alpha=0.5)
#ax1.scatter(processed[obs]['wave'][order, :],
# e2ds_rescaled_corrected_spline[order, :]+lift_spectrum,
# s=1, c=np.atleast_2d(color_array))
ax2.plot(processed[obs]['wave'][order, :],
telluric[obs]['spectrum'][order, :],
c=color_array)
ax2.axhline(1.00, c='k')
#ax2.plot(processed[obs]['wave'][order, :],
# telluric[obs]['spline'][order, :]+lift_spectrum,
# c=color_array)
#ax2.axhline(1.00+lift_spectrum, c='k')
#ax2.plot(input_data['coadd']['wave'],telluric['stellarRF']['spline_eval']+0.1,c='k')
#ax2.scatter(input_data['coadd']['wave'],telluric['stellarRF']['spectrum']+0.1,c='r', s=2)
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
try:
instrument = night_dict[night]['instrument']
comparison_file = config_in['instruments'][instrument]['telluric_comparison']
comparison_data = np.genfromtxt(comparison_file, skip_header=1)
if comparison_data[0,0]<1000.0:
nm2Ang = 10.
else:
nm2Ang = 1.
ax1.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
ax2.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
except:
pass
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show() | 31,063 | 44.749632 | 122 | py |
SLOPpy | SLOPpy-main/SLOPpy/second_telluric_correction_on_transmission.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from scipy.stats import linregress
__all__ = ["compute_second_telluric_correction_on_transmission", "plot_second_telluric_correction_on_transmission"]
subroutine_name = 'second_telluric_correction_on_transmission'
def compute_second_telluric_correction_on_transmission(config_in):
night_dict = from_config_get_nights(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
for night in night_dict:
try:
transmission = load_from_cpickle('transmission_planetRF_second_correction', config_in['output'], night)
continue
except:
print("No transmission spectra with second correction found, computing now ")
print()
print()
print("compute_second_telluric_correction_on_transmission Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
transmission = load_from_cpickle('transmission_planetRF', config_in['output'], night)
telluric = load_from_cpickle('telluric', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': subroutine_name
}
for obs in lists['observations']:
processed[obs] = {}
processed[obs]['telluric_shifted'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
telluric[obs]['spectrum'],
telluric[obs]['spectrum'],
transmission['wave'],
transmission['step'],
rv_shift=observational_pams[obs]['rv_shift_ORF2PRF'],
preserve_flux=False,
skip_blaze_correction=True)
processed[obs]['selection'] = (np.abs(1.0000-transmission[obs]['rescaled']) < 2*np.std(transmission[obs]['rescaled'])) \
& (np.abs(1.0000-processed[obs]['telluric_shifted']) > 0.02)
transmission[obs]['slope'], \
transmission[obs]['intercept'], \
transmission[obs]['rvalue'], \
transmission[obs]['pvalue'], \
transmission[obs]['stderr'], = linregress(processed[obs]['telluric_shifted'][processed[obs]['selection']],
transmission[obs]['rescaled'][processed[obs]['selection']])
transmission[obs]['rescaled'] += 1.000 - (transmission[obs]['intercept'] +
transmission[obs]['slope']*processed[obs]['telluric_shifted'])
array_average = np.zeros([len(lists['transit_in']), transmission['size']])
weights_average = np.zeros([len(lists['transit_in']), transmission['size']])
for i, obs in enumerate(lists['transit_in']):
array_average[i, :] = transmission[obs]['rescaled'][:]
weights_average[i, :] = 1./(transmission[obs]['rescaled_err']**2.)
transmission['average'], transmission['sum_weights'] = np.average(
array_average, axis=0, weights=weights_average, returned=True)
transmission['average_err'] = 1./np.sqrt(transmission['sum_weights'])
array_average = np.zeros([len(lists['transit_out']), transmission['size']])
weights_average = np.zeros([len(lists['transit_out']), transmission['size']])
for i, obs in enumerate(lists['transit_out']):
array_average[i, :] = transmission[obs]['rescaled'][:]
weights_average[i, :] = 1./(transmission[obs]['rescaled_err']**2.)
transmission['average_out'], transmission['sum_weights_out'] = np.average(
array_average, axis=0, weights=weights_average, returned=True)
transmission['average_out_err'] = 1./np.sqrt(transmission['sum_weights_out'])
save_to_cpickle('transmission_planetRF_second_correction_processed', processed, config_in['output'], night)
save_to_cpickle('transmission_planetRF_second_correction', transmission, config_in['output'], night)
def plot_second_telluric_correction_on_transmission(config_in, night_input=''):
night_dict = from_config_get_nights(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
previous_transmission = load_from_cpickle('transmission_planetRF', config_in['output'], night)
transmission = load_from_cpickle('transmission_planetRF_second_correction', config_in['output'], night)
processed = load_from_cpickle('transmission_planetRF_second_correction_processed', config_in['output'], night)
f, (ax1, ax2) = plt.subplots(2, sharex=True, sharey=True)
cmap = plt.cm.Spectral
line_colors = cmap(np.linspace(0, 1, len(lists['observations'])))
for i, obs in enumerate(lists['observations']):
ax1.axhline(1.+i/10., c='k', zorder=0)
ax2.axhline(1.+i/10., c='k', zorder=0)
ax1.scatter(processed[obs]['telluric_shifted'], previous_transmission[obs]['rescaled']+i / 10., s=2,
c=np.atleast_2d(line_colors[i]), zorder=1)
ax2.scatter(processed[obs]['telluric_shifted'], transmission[obs]['rescaled'] + i / 10., s=2,
#c=np.atleast_2d(line_colors[i]), zorder=2)
c = 'r', zorder = 2)
ax1.set_xlim(0.80, 1.02)
plt.show()
""" Creation of the color array, based on the BJD of the observations
"""
bjd = []
am = []
for obs in lists['observations']:
bjd.append(transmission[obs]['BJD'] - 2450000.0)
am.append(transmission[obs]['AIRMASS'])
colors = np.asarray(bjd)
cmap = plt.cm.Spectral
line_colors = cmap(np.linspace(0, 1, len(lists['observations'])))
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(1, 2, width_ratios=[50, 1])
ax = plt.subplot(gs[0, 0])
cbax1 = plt.subplot(gs[:, 1])
i_shift = 0.00
for i, obs in enumerate(lists['observations']):
ax.errorbar(transmission['wave'],
transmission[obs]['rescaled']-i_shift,
yerr=transmission[obs]['rescaled_err'],
marker = 'o', c=line_colors[i], ms=1, alpha=0.5)
i_shift += 0.05
ax.set_ylim(0.0-i_shift, 1.2)
ax.set_xlabel('$\lambda$ [$\AA$]')
ax.legend(loc=3)
ax.set_title('Night: ' + night)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 7,683 | 40.76087 | 132 | py |
SLOPpy | SLOPpy-main/SLOPpy/clv_rm_modelling.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.constants import *
from SLOPpy.subroutines.kepler_exo import *
from SLOPpy.subroutines.plot_subroutines import *
from astropy.convolution import Gaussian1DKernel, convolve
__all__ = ['compute_clv_rm_modelling', 'plot_clv_rm_modelling']
subroutine_name = 'clv_rm_modelling'
def compute_clv_rm_modelling(config_in):
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
planet_dict = from_config_get_planet(config_in)
star_dict = from_config_get_star(config_in)
clv_rm_dict = from_config_get_clv_rm(config_in)
try:
synthesis = load_from_cpickle('clv_rm_synthesis', config_in['output'])
star_grid = load_from_cpickle('clv_rm_star_grid', config_in['output'])
if not config_in['settings'].get('full_output', False):
for night in night_dict:
clv_rm_modelling = load_from_cpickle('clv_rm_modelling', config_in['output'], night)
print("{0:45s} {1:s}".format(subroutine_name, 'Retrieved'))
return
except:
print("{0:45s} {1:s}".format(subroutine_name, 'Computing'))
print()
"""
Loading the spectral synthesis results, at the moment only SME output is supported.
Properties of the synthesis data files
- limb_angles: this is an input to SME, so it is specific on how the synthesis has been performed
- spectra: stellar spectrum as a function of the limb angle, sampled near the spectral lines
- model: integrated spectrum of the star
"""
synthesis_data_limb_angles = np.genfromtxt(clv_rm_dict['synthesis_files'] + '_muvals.txt', dtype=np.double)
synthesis_data_spectra = np.genfromtxt(clv_rm_dict['synthesis_files'] + '_spectra.txt', dtype=np.double)
synthesis_data_model = np.genfromtxt(clv_rm_dict['synthesis_files'] + '_model.txt', dtype=np.double)
synthesis = {
'surface': {
'wave': synthesis_data_spectra[:, 0],
'flux': synthesis_data_spectra[:, 1:],
'n_mu': np.size(synthesis_data_limb_angles),
'mu': synthesis_data_limb_angles
},
'total': {
'wave': synthesis_data_model[:, 0],
'norm': synthesis_data_model[:, 1],
}
}
""" Setting up the array for model computation """
synthesis['total']['step'] = synthesis['total']['wave'] * 0.0
synthesis['total']['step'][1:] = synthesis['total']['wave'][1:] - synthesis['total']['wave'][:-1]
synthesis['total']['step'][0] = synthesis['total']['step'][1]
synthesis['surface']['step'] = synthesis['surface']['wave'] * 0.0
synthesis['surface']['step'][1:] = synthesis['surface']['wave'][1:] - synthesis['surface']['wave'][:-1]
synthesis['surface']['step'][0] = synthesis['surface']['step'][1]
synthesis['surface']['wave_out'] = np.arange(synthesis['surface']['wave'][0],
synthesis['surface']['wave'][-1],
clv_rm_dict['rebinning_step'])
synthesis['surface']['size_out'] = np.size(synthesis['surface']['wave_out'], axis=0)
synthesis['surface']['step_out'] = np.ones(synthesis['surface']['size_out']) * clv_rm_dict['rebinning_step']
synthesis['total']['norm_out'] = rebin_1d_to_1d(synthesis['total']['wave'],
synthesis['total']['step'],
synthesis['total']['norm'],
synthesis['surface']['wave_out'],
synthesis['surface']['step_out'],
method='exact_flux',
preserve_flux=False)
""" Check if the number of spectra corresponds to the number of limb angle values """
if np.size(synthesis['surface']['flux'], axis=1) != synthesis['surface']['n_mu']:
print('ERROR in loading the stellar spectra')
"""
Setting up the grid of stellar spectra for the CLV and RM computation
odd number of points to include the zero value
"""
star_grid = {
'n_grid': clv_rm_dict['n_gridpoints'],
'half_grid': int((clv_rm_dict['n_gridpoints'] - 1) / 2)
}
""" Coordinates of the centers of each grid cell (add offset) """
star_grid['xx'] = np.linspace(-1.0, 1.0, star_grid['n_grid'], dtype=np.double)
star_grid['xc'], star_grid['yc'] = np.meshgrid(star_grid['xx'], star_grid['xx'], indexing='xy')
# check the Note section of the wiki page of meshgrid
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.meshgrid.html
""" Distance of each grid cell from the center of the stellar disk """
star_grid['rc'] = np.sqrt(star_grid['xc'] ** 2 + star_grid['yc'] ** 2)
star_grid['inside'] = star_grid['rc'] <= 1.0 # Must avoid negative numbers inside the square root
star_grid['outside'] = star_grid['rc'] > 1.0 # Must avoid negative numbers inside the square root
""" Determine the mu angle for each grid cell, as a function of radius. """
star_grid['mu'] = np.zeros([star_grid['n_grid'], star_grid['n_grid']],
dtype=np.double) # initialization of the matrix with the mu values
star_grid['mu'][star_grid['inside']] = np.sqrt(1. - star_grid['rc'][star_grid['inside']] ** 2)
""" 2.2 Determine the Doppler shift to apply to the spectrum of each grid cell, from Cegla+2016 """
star_grid['x_ortho'] = star_grid['xc'] * np.cos(star_dict['lambda'][0] * deg2rad) \
- star_grid['yc'] * np.sin(
star_dict['lambda'][0] * deg2rad) # orthogonal distances from the spin-axis
star_grid['y_ortho'] = star_grid['xc'] * np.sin(star_dict['lambda'][0] * deg2rad) \
+ star_grid['yc'] * np.cos(star_dict['lambda'][0] * deg2rad)
star_grid['r_ortho'] = np.sqrt(star_grid['x_ortho'] ** 2 + star_grid['y_ortho'] ** 2)
star_grid['z_ortho'] = np.zeros([star_grid['n_grid'], star_grid['n_grid']],
dtype=np.double) # initialization of the matrix
star_grid['z_ortho'][star_grid['inside']] = np.sqrt(1 - star_grid['r_ortho'][star_grid['inside']] ** 2)
""" rotate the coordinate system around the x_ortho axis by an angle: """
star_grid['beta'] = (np.pi / 2.) - star_dict['inclination'][0] * deg2rad
""" orthogonal distance from the stellar equator """
star_grid['yp_ortho'] = star_grid['z_ortho'] * np.sin(star_grid['beta']) + star_grid['y_ortho'] * np.cos(
star_grid['beta'])
""" stellar rotational velocity for a given position """
star_grid['v_star'] = star_grid['x_ortho'] * star_dict['vsini'][0] * (
1 - star_dict['alpha'][0] * star_grid['yp_ortho'] ** 2)
star_grid['v_star'][star_grid['outside']] = 0.0 # Null velocity for points outside the stellar surface
""" Associate a synthetic spectrum to each cell """
star_grid['spectra_mu'] = [[0] * star_grid['n_grid'] for i in range(star_grid['n_grid'])]
for x in range(0, star_grid['n_grid']):
for y in range(0, star_grid['n_grid']):
if star_grid['outside'][y, x]: continue
index_closer = np.abs(
synthesis['surface']['mu'] - star_grid['mu'][y, x]).argmin() # take the index of the closer value
if star_grid['mu'][y, x] in synthesis['surface']['mu']:
star_grid['spectra_mu'][x][y] = synthesis['surface']['flux'][:, index_closer]
continue
elif index_closer == synthesis['surface']['n_mu'] - 1 or \
synthesis['surface']['mu'][index_closer] > star_grid['mu'][y, x]:
mu_ind0 = index_closer - 1
mu_ind1 = index_closer
else:
mu_ind0 = index_closer
mu_ind1 = index_closer + 1
diff_mu = synthesis['surface']['mu'][mu_ind1] - synthesis['surface']['mu'][mu_ind0]
star_grid['spectra_mu'][x][y] = synthesis['surface']['flux'][:, mu_ind0] \
+ (star_grid['mu'][y, x] - synthesis['surface']['mu'][mu_ind0]) / diff_mu \
* (synthesis['surface']['flux'][:, mu_ind1]
- synthesis['surface']['flux'][:, mu_ind0])
""" Computation of the continuum level (total flux is already normalized)"""
star_grid['continuum'] = [[0] * star_grid['n_grid'] for i in range(star_grid['n_grid'])]
spectra_window = ((synthesis['surface']['wave'] > clv_rm_dict['continuum_range'][0]) &
(synthesis['surface']['wave'] < clv_rm_dict['continuum_range'][1]))
for x in range(0, star_grid['n_grid']):
for y in range(0, star_grid['n_grid']):
if star_grid['outside'][y, x]: continue
star_grid['continuum'][x][y] = np.median(star_grid['spectra_mu'][x][y][spectra_window])
star_grid['continuum_level'] = np.sum(star_grid['continuum'])
for night in night_dict:
""" Retrieving the list of observations"""
print()
print('compute_CLV_RM_modelling Night: ', night)
try:
clv_rm_modelling = load_from_cpickle('clv_rm_modelling', config_in['output'], night)
continue
except:
print()
print(' No CLV & RM correction files found, computing now ')
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
instrument = night_dict[night]['instrument']
clv_rm_modelling = {
'common': {
'wave': synthesis['surface']['wave_out'],
'step': synthesis['surface']['step_out'],
'norm': synthesis['total']['norm_out'],
'continuum_level': star_grid['continuum_level']
}
}
clv_rm_modelling['common']['convolution_dlambda'] = \
np.median(clv_rm_modelling['common']['wave']) / instrument_dict[instrument]['resolution']
clv_rm_modelling['common']['convolution_sigma'] = \
clv_rm_modelling['common']['convolution_dlambda'] / np.median(clv_rm_modelling['common']['step'])
gaussian = Gaussian1DKernel(stddev=clv_rm_modelling['common']['convolution_sigma'])
clv_rm_modelling['common']['norm_convolved'] = convolve(clv_rm_modelling['common']['norm'], gaussian)
processed = {}
print()
for obs in lists['observations']:
print(' Computing CLV+RM correction for ', obs)
processed[obs] = {}
clv_rm_modelling[obs] = {}
n_oversampling = int(observational_pams[obs]['EXPTIME'] / clv_rm_dict['time_step'])
if n_oversampling % 2 == 0: n_oversampling += 1
half_time = observational_pams[obs]['EXPTIME'] / 2 / 86400.
processed[obs]['bjd_oversampling'] = np.linspace(observational_pams[obs]['BJD'] - half_time,
observational_pams[obs]['BJD'] + half_time,
n_oversampling, dtype=np.double)
if planet_dict['orbit'] == 'circular':
# Time of pericenter concides with transit time, if we assume e=0 and omega=np.pi/2.
eccentricity = 0.00
omega_rad = np.pi / 2.
# Tcent is assumed as reference time
Tref = planet_dict['reference_time_of_transit'][0]
Tcent_Tref = 0.000
else:
omega_rad = planet_dict['omega'][0] * deg2rad
Tref = planet_dict['reference_time']
Tcent_Tref = planet_dict['reference_time_of_transit'][0] - Tref
eccentricity = planet_dict['eccentricity'][0]
inclination_rad = planet_dict['inclination'][0] * deg2rad
true_anomaly, orbital_distance_ratio = kepler_true_anomaly_orbital_distance(
processed[obs]['bjd_oversampling'] - Tref,
Tcent_Tref,
planet_dict['period'][0],
eccentricity,
omega_rad,
planet_dict['semimajor_axis_ratio'][0])
""" planet position during its orbital motion, in unit of stellar radius"""
# Following Murray+Correia 2011 , with the argument of the ascending node set to zero.
# 1) the ascending node coincide with the X axis
# 2) the reference plance coincide with the plane of the sky
processed[obs]['planet_position'] = {
'xp': -orbital_distance_ratio * (np.cos(omega_rad + true_anomaly)),
'yp': orbital_distance_ratio * (np.sin(omega_rad + true_anomaly) * np.cos(inclination_rad)),
'zp': orbital_distance_ratio * (np.sin(inclination_rad) * np.sin(omega_rad + true_anomaly))
}
# projected distance of the planet's center to the stellar center
processed[obs]['planet_position']['rp'] = np.sqrt(processed[obs]['planet_position']['xp'] ** 2 \
+ processed[obs]['planet_position']['yp'] ** 2)
# obscured flux integrated over the full epoch
clv_rm_modelling[obs]['missing_flux'] = np.zeros(synthesis['surface']['size_out'], dtype=np.double)
# iterating on the sub-exposures
for j, zeta in enumerate(processed[obs]['planet_position']['zp']):
if zeta > 0 and processed[obs]['planet_position']['rp'][j] < 1 + planet_dict['radius_ratio'][0]:
# the planet is in the foreground or inside the stellar disk, continue
# adjustment: computation is performed even if only part of the planet is shadowing the star
rd = np.sqrt((processed[obs]['planet_position']['xp'][j] - star_grid['xc']) ** 2 + \
(processed[obs]['planet_position']['yp'][j] - star_grid['yc']) ** 2)
# iterating on the cell grid
for x in range(0, star_grid['n_grid']):
for y in range(0, star_grid['n_grid']):
# skip the step if the cell is outside the stellar disk
# or if the cell is not shadowed by the planet
if star_grid['outside'][y, x] or rd[y, x] > planet_dict['radius_ratio'][0]: continue
flux_tmp = rebin_1d_to_1d(synthesis['surface']['wave'],
synthesis['surface']['step'],
star_grid['spectra_mu'][x][y],
clv_rm_modelling['common']['wave'],
clv_rm_modelling['common']['step'],
rv_shift=star_grid['v_star'][y, x],
method='exact_flux',
preserve_flux=False)
# fixing zero values that may have been introduced by
# the rebinning process from an extremely irregular sampling
ind_sel = np.where(flux_tmp < 0.)[0]
for ii in ind_sel:
if ii == 0:
flux_tmp[ii] = flux_tmp[ii + 1]
elif ii == np.size(flux_tmp) - 1:
flux_tmp[ii] = flux_tmp[ii - 1]
else:
flux_tmp[ii] = (flux_tmp[ii - 1] + flux_tmp[ii + 1]) / 2.
clv_rm_modelling[obs]['missing_flux'] += flux_tmp
clv_rm_modelling[obs]['missing_flux'] /= n_oversampling
clv_rm_modelling[obs]['stellar_spectra'] = clv_rm_modelling['common']['norm'] \
- (clv_rm_modelling[obs]['missing_flux']
/ clv_rm_modelling['common']['continuum_level'])
clv_rm_modelling[obs]['stellar_spectra_convolved'] = \
convolve(clv_rm_modelling[obs]['stellar_spectra'], gaussian)
save_to_cpickle('clv_rm_modelling', clv_rm_modelling, config_in['output'], night)
if not config_in['settings'].get('full_output', False):
del star_grid['spectra_mu']
save_to_cpickle('clv_rm_star_grid', star_grid, config_in['output'])
save_to_cpickle('clv_rm_synthesis', synthesis, config_in['output'])
def plot_clv_rm_modelling(config_in, night_input=''):
night_dict = from_config_get_nights(config_in)
synthesis = load_from_cpickle('clv_rm_synthesis', config_in['output'])
star_grid = load_from_cpickle('clv_rm_star_grid', config_in['output'])
if night_input == '':
# Visualize the mu of star
fig = plt.figure(figsize=(8, 6.5))
plt.title('Limb angle')
plt.contourf(star_grid['xx'], star_grid['xx'], star_grid['mu'], 60, cmap=plt.cm.viridis)
plt.colorbar(label='$\mu$') # draw colorbar
# plot data points.
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xlabel('x [R_s]')
plt.ylabel('y [R_s]')
plt.show()
# Visualize the RV of star
fig = plt.figure(figsize=(8, 6.5))
# CS = plt.contour(xx,xx,v_star,50,linewidths=0.5,colors='k')
plt.title('Radial velocity field')
plt.contourf(star_grid['xx'], star_grid['xx'], star_grid['v_star'], 100, cmap=plt.cm.seismic)
plt.colorbar(label='v_star') # draw colorbar
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xlabel('x [R_s]')
plt.ylabel('y [R_s]')
plt.show()
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
lists = load_from_cpickle('lists', config_in['output'], night)
clv_rm_modelling = load_from_cpickle('clv_rm_modelling', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
colors_properties, colors_plot, colors_scatter = make_color_array_matplotlib3(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
for obs in lists['transit_in']:
ax1.plot(clv_rm_modelling['common']['wave'],
clv_rm_modelling[obs]['stellar_spectra'],
color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.plot(clv_rm_modelling['common']['wave'],
clv_rm_modelling[obs]['missing_flux'] / clv_rm_modelling['common']['continuum_level'],
color=colors_plot['mBJD'][obs], alpha=0.2)
for obs in lists['transit_out']:
ax2.plot(clv_rm_modelling['common']['wave'],
clv_rm_modelling[obs]['stellar_spectra'],
color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.set_title('Night: {0:s} \n Input spectra'.format(night))
ax2.set_title('Out of transit spectra')
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show() | 20,737 | 48.971084 | 119 | py |
SLOPpy | SLOPpy-main/SLOPpy/clv_rm_models.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.constants import *
from SLOPpy.subroutines.kepler_exo import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.math_functions import *
from astropy.convolution import Gaussian1DKernel, convolve
__all__ = ['compute_clv_rm_models', 'plot_clv_rm_models']
subroutine_name = 'clv_rm_models'
def compute_clv_rm_models(config_in):
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
planet_dict = from_config_get_planet(config_in)
star_dict = from_config_get_star(config_in)
clv_rm_dict = from_config_get_clv_rm(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
# un-convolved portion of the spectrum given by range_boundaries +-
wave_fix_convo = 1.0
# Added back-compatibility to old or "wrong" keys
norm_dict = clv_rm_dict.get('normalization', {})
norm_pams={}
norm_pams['model_poly_degree'] = norm_dict.get('model_poly_degree', 2)
norm_pams['spectra_poly_degree'] = norm_dict.get('spectra_poly_degree', 2)
norm_pams['lower_threshold'] = norm_dict.get('lower_threshold', 0.950)
norm_pams['percentile_selection'] = norm_dict.get('percentile_selection', 10)
try:
synthesis = load_from_cpickle('clv_rm_synthesis', config_in['output'])
star_grid = load_from_cpickle('clv_rm_star_grid', config_in['output'])
if not config_in['settings'].get('full_output', False):
for night in night_dict:
clv_rm_models = load_from_cpickle(
'clv_rm_models', config_in['output'], night)
print("{0:45s} {1:s}".format(
subroutine_name, 'Retrieved'))
except:
print("{0:45s} {1:s}".format(
subroutine_name, 'Computing'))
print()
"""
Loading the spectral synthesis results, at the moment only SME output is supported.
Properties of the synthesis data files
- limb_angles: this is an input to SME, so it is specific on how the synthesis has been performed
- spectra: stellar spectrum as a function of the limb angle, sampled near the spectral lines
- model: integrated spectrum of the star
"""
synthesis_data_limb_angles = np.genfromtxt(
clv_rm_dict['synthesis_files'] + '_muvals.txt', dtype=np.double)
synthesis_data_spectra = np.genfromtxt(
clv_rm_dict['synthesis_files'] + '_spectra.txt', dtype=np.double)
synthesis_data_model = np.genfromtxt(
clv_rm_dict['synthesis_files'] + '_model.txt', dtype=np.double)
synthesis = {
'surface': {
'wave': synthesis_data_spectra[:, 0],
'flux': synthesis_data_spectra[:, 1:],
'n_mu': np.size(synthesis_data_limb_angles),
'mu': synthesis_data_limb_angles
},
'total': {
'wave': synthesis_data_model[:, 0],
'norm': synthesis_data_model[:, 1],
}
}
""" Setting up the array for model computation """
synthesis['total']['step'] = synthesis['total']['wave'] * 0.0
synthesis['total']['step'][1:] = synthesis['total']['wave'][1:] - \
synthesis['total']['wave'][:-1]
synthesis['total']['step'][0] = synthesis['total']['step'][1]
synthesis['surface']['step'] = synthesis['surface']['wave'] * 0.0
synthesis['surface']['step'][1:] = synthesis['surface']['wave'][1:] - \
synthesis['surface']['wave'][:-1]
synthesis['surface']['step'][0] = synthesis['surface']['step'][1]
synthesis['surface']['wave_out'] = np.arange(synthesis['surface']['wave'][0],
synthesis['surface']['wave'][-1],
clv_rm_dict['rebinning_step'])
synthesis['surface']['size_out'] = np.size(
synthesis['surface']['wave_out'], axis=0)
synthesis['surface']['step_out'] = np.ones(
synthesis['surface']['size_out']) * clv_rm_dict['rebinning_step']
synthesis['total']['norm_out'] = rebin_1d_to_1d(synthesis['total']['wave'],
synthesis['total']['step'],
synthesis['total']['norm'],
synthesis['surface']['wave_out'],
synthesis['surface']['step_out'],
method='exact_flux',
preserve_flux=False)
""" Check if the number of spectra corresponds to the number of limb angle values """
if np.size(synthesis['surface']['flux'], axis=1) != synthesis['surface']['n_mu']:
print('ERROR in loading the stellar spectra')
"""
Setting up the grid of stellar spectra for the CLV and RM computation
odd number of points to include the zero value
"""
star_grid = {
'n_grid': clv_rm_dict['n_gridpoints'],
'half_grid': int((clv_rm_dict['n_gridpoints'] - 1) / 2)
}
""" Coordinates of the centers of each grid cell (add offset) """
star_grid['xx'] = np.linspace(-1.0000000000000, 1.0000000000000,
star_grid['n_grid'], dtype=np.double)
star_grid['xc'], star_grid['yc'] = np.meshgrid(
star_grid['xx'], star_grid['xx'], indexing='xy')
# check the Note section of the wiki page of meshgrid
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.meshgrid.html
""" Distance of each grid cell from the center of the stellar disk """
star_grid['rc'] = np.sqrt(star_grid['xc'] ** 2 + star_grid['yc'] ** 2)
# Must avoid negative numbers inside the square root
star_grid['inside'] = star_grid['rc'] < 1.0000000000000
# Must avoid negative numbers inside the square root
star_grid['outside'] = star_grid['rc'] >= 1.00000000000000
""" Determine the mu angle for each grid cell, as a function of radius. """
star_grid['mu'] = np.zeros([star_grid['n_grid'], star_grid['n_grid']],
dtype=np.double) # initialization of the matrix with the mu values
star_grid['mu'][star_grid['inside']] = np.sqrt(
1. - star_grid['rc'][star_grid['inside']] ** 2)
""" 2.2 Determine the Doppler shift to apply to the spectrum of each grid cell, from Cegla+2016 """
star_grid['x_ortho'] = star_grid['xc'] * np.cos(star_dict['lambda'][0] * deg2rad) \
- star_grid['yc'] * np.sin(
star_dict['lambda'][0] * deg2rad) # orthogonal distances from the spin-axis
star_grid['y_ortho'] = star_grid['xc'] * np.sin(star_dict['lambda'][0] * deg2rad) \
+ star_grid['yc'] * np.cos(star_dict['lambda'][0] * deg2rad)
star_grid['r_ortho'] = np.sqrt(
star_grid['x_ortho'] ** 2 + star_grid['y_ortho'] ** 2)
star_grid['z_ortho'] = np.zeros([star_grid['n_grid'], star_grid['n_grid']],
dtype=np.double) # initialization of the matrix
star_grid['z_ortho'][star_grid['inside']] = np.sqrt(
1. -star_grid['r_ortho'][star_grid['inside']] ** 2)
""" rotate the coordinate system around the x_ortho axis by an angle: """
star_grid['beta'] = (np.pi / 2.) - \
star_dict['inclination'][0] * deg2rad
""" orthogonal distance from the stellar equator """
star_grid['yp_ortho'] = star_grid['z_ortho'] * np.sin(star_grid['beta']) + star_grid['y_ortho'] * np.cos(
star_grid['beta'])
""" stellar rotational velocity for a given position """
star_grid['v_star'] = star_grid['x_ortho'] * star_dict['vsini'][0] * (
1. -star_dict['alpha'][0] * star_grid['yp_ortho'] ** 2)
# Null velocity for points outside the stellar surface
star_grid['v_star'][star_grid['outside']] = 0.0
""" Associate a synthetic spectrum to each cell """
""" recomputation of spectra_mu - most likely it has been deleted from the
output file
"""
star_grid['spectra_mu'] = [[0] * star_grid['n_grid']
for i in range(star_grid['n_grid'])]
for x in range(0, star_grid['n_grid']):
for y in range(0, star_grid['n_grid']):
if star_grid['outside'][y, x]:
continue
index_closer = np.abs(
synthesis['surface']['mu'] - star_grid['mu'][y, x]).argmin() # take the index of the closer value
if star_grid['mu'][y, x] in synthesis['surface']['mu']:
star_grid['spectra_mu'][x][y] = synthesis['surface']['flux'][:, index_closer]
continue
elif index_closer == synthesis['surface']['n_mu'] - 1 or \
synthesis['surface']['mu'][index_closer] > star_grid['mu'][y, x]:
mu_ind0 = index_closer - 1
mu_ind1 = index_closer
else:
mu_ind0 = index_closer
mu_ind1 = index_closer + 1
diff_mu = synthesis['surface']['mu'][mu_ind1] - \
synthesis['surface']['mu'][mu_ind0]
star_grid['spectra_mu'][x][y] = synthesis['surface']['flux'][:, mu_ind0] \
+ (star_grid['mu'][y, x] - synthesis['surface']['mu'][mu_ind0]) / diff_mu \
* (synthesis['surface']['flux'][:, mu_ind1]
- synthesis['surface']['flux'][:, mu_ind0])
""" Computation of the continuum level (total flux is already normalized)"""
star_grid['continuum'] = [[0] * star_grid['n_grid']
for i in range(star_grid['n_grid'])]
spectral_window = ((synthesis['surface']['wave'] > clv_rm_dict['continuum_range'][0]) &
(synthesis['surface']['wave'] < clv_rm_dict['continuum_range'][1]))
for x in range(0, star_grid['n_grid']):
for y in range(0, star_grid['n_grid']):
if star_grid['outside'][y, x]:
continue
star_grid['continuum'][x][y] = np.median(
star_grid['spectra_mu'][x][y][spectral_window])
star_grid['continuum_level'] = np.sum(star_grid['continuum'])
"""
Setting up the grid for the rescaling factor of the planetary radius
"""
try:
radius_grid = np.arange(clv_rm_dict['radius_factor'][0],
clv_rm_dict['radius_factor'][1] +
clv_rm_dict['radius_factor'][2],
clv_rm_dict['radius_factor'][2])
except KeyError:
radius_grid = np.arange(0.5, 2.6, 0.1)
for night in night_dict:
""" Retrieving the list of observations"""
print()
print('compute_CLV_RM_models Night: ', night)
try:
clv_rm_models = load_from_cpickle(
'clv_rm_models', config_in['output'], night)
continue
except:
print()
print(' No CLV & RM correction files found, computing now ')
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle(
'observational_pams', config_in['output'], night)
instrument = night_dict[night]['instrument']
clv_rm_models = {
'common': {
'wave': synthesis['surface']['wave_out'],
'step': synthesis['surface']['step_out'],
'norm': synthesis['total']['norm_out'],
'continuum_level': star_grid['continuum_level'],
'radius_grid': radius_grid,
'n_radius_grid': len(radius_grid)
}
}
clv_rm_models['common']['convolution_dlambda'] = \
np.median(clv_rm_models['common']['wave']) / \
instrument_dict[instrument]['resolution']
clv_rm_models['common']['convolution_sigma'] = \
clv_rm_models['common']['convolution_dlambda'] / \
np.median(clv_rm_models['common']['step'])
gaussian = Gaussian1DKernel(
stddev=clv_rm_models['common']['convolution_sigma'])
clv_rm_models['common']['norm_convolved'] = convolve(
clv_rm_models['common']['norm'], gaussian)
""" Fixing border effect (we took already wave_extension angstrom outside of the
actual range, so doing it this way is fine)"""
wave_fix_convolution = (clv_rm_models['common']['wave'] > clv_rm_models['common']['wave'][0]+wave_fix_convo) \
| (clv_rm_models['common']['wave'] > clv_rm_models['common']['wave'][-1]-wave_fix_convo)
clv_rm_models['common']['norm_convolved'][wave_fix_convolution] = clv_rm_models['common']['norm'][wave_fix_convolution]
"""
Computation of the first derivative, useful to identify
continuum level. This method is prone to errors for
observational data, but it's quite robust for synthetic spectra
if jumps in wavelngth are small
"""
clv_rm_models['common']['norm_convolved_derivative'] = \
first_derivative(clv_rm_models['common']['wave'],
clv_rm_models['common']['norm_convolved'])
# Using only the 10percentile of values of the derivative around zero
cont_10perc = np.percentile(np.abs(clv_rm_models['common']['norm_convolved_derivative']), norm_pams['percentile_selection'])
clv_rm_models['common']['norm_convolved_bool'] = (np.abs(clv_rm_models['common']['norm_convolved_derivative']) < cont_10perc) \
& (clv_rm_models['common']['norm_convolved']> norm_pams['lower_threshold'])
print(' Number of points within 10percentile: {0:10.0f}'.format(np.sum((np.abs(clv_rm_models['common']['norm_convolved_derivative']) < cont_10perc))))
print(' Number of points above threshold: {0:10.0f}'.format(np.sum( (clv_rm_models['common']['norm_convolved']> norm_pams['lower_threshold']))))
norm_convolved_bool = (np.abs(clv_rm_models['common']['norm_convolved_derivative']) < cont_10perc) \
& (clv_rm_models['common']['norm_convolved']> norm_pams['lower_threshold'])
if np.sum(norm_convolved_bool) < 100:
print(' Lower threshold decreased by 80% to allow point selection ', norm_pams['lower_threshold']*0.80)
clv_rm_models['common']['norm_convolved_bool'] = (np.abs(clv_rm_models['common']['norm_convolved_derivative']) < cont_10perc) \
& (clv_rm_models['common']['norm_convolved']> norm_pams['lower_threshold']*0.80)
else:
clv_rm_models['common']['norm_convolved_bool'] = norm_convolved_bool
processed = {}
print()
for obs in lists['observations']:
print(' Computing CLV+RM correction for ', obs)
processed[obs] = {}
clv_rm_models[obs] = {}
n_oversampling = int(
observational_pams[obs]['EXPTIME'] / clv_rm_dict['time_step'])
if n_oversampling % 2 == 0:
n_oversampling += 1
half_time = observational_pams[obs]['EXPTIME'] / 2 / 86400.
processed[obs]['bjd_oversampling'] = np.linspace(observational_pams[obs]['BJD'] - half_time,
observational_pams[obs]['BJD'] + half_time,
n_oversampling, dtype=np.double)
if planet_dict['orbit'] == 'circular':
# Time of pericenter concides with transit time, if we assume e=0 and omega=np.pi/2.
eccentricity = 0.00
omega_rad = np.pi / 2.
# Tcent is assumed as reference time
Tref = planet_dict['reference_time_of_transit'][0]
Tcent_Tref = 0.000
else:
omega_rad = planet_dict['omega'][0] * deg2rad
Tref = planet_dict['reference_time']
Tcent_Tref = planet_dict['reference_time_of_transit'][0] - Tref
eccentricity = planet_dict['eccentricity'][0]
inclination_rad = planet_dict['inclination'][0] * deg2rad
true_anomaly, orbital_distance_ratio = kepler_true_anomaly_orbital_distance(
processed[obs]['bjd_oversampling'] - Tref,
Tcent_Tref,
planet_dict['period'][0],
eccentricity,
omega_rad,
planet_dict['semimajor_axis_ratio'][0])
""" planet position during its orbital motion, in unit of stellar radius"""
# Following Murray+Correia 2011 , with the argument of the ascending node set to zero.
# 1) the ascending node coincide with the X axis
# 2) the reference plance coincide with the plane of the sky
processed[obs]['planet_position'] = {
'xp': -orbital_distance_ratio * (np.cos(omega_rad + true_anomaly)),
'yp': orbital_distance_ratio * (np.sin(omega_rad + true_anomaly) * np.cos(inclination_rad)),
'zp': orbital_distance_ratio * (np.sin(inclination_rad) * np.sin(omega_rad + true_anomaly))
}
# projected distance of the planet's center to the stellar center
processed[obs]['planet_position']['rp'] = np.sqrt(processed[obs]['planet_position']['xp'] ** 2
+ processed[obs]['planet_position']['yp'] ** 2)
# obscured flux integrated over the full epoch
# grid n_radius_grid X size_out (of spectral model)
clv_rm_models[obs]['missing_flux'] = np.zeros(
[len(radius_grid), synthesis['surface']['size_out']], dtype=np.double)
# iterating on the sub-exposures
for j, zeta in enumerate(processed[obs]['planet_position']['zp']):
if zeta > 0 and processed[obs]['planet_position']['rp'][j] < 1. + planet_dict['radius_ratio'][0]:
# the planet is in the foreground or inside the stellar disk, continue
# adjustment: computation is performed even if only part of the planet is shadowing the star
rd = np.sqrt((processed[obs]['planet_position']['xp'][j] - star_grid['xc']) ** 2 +
(processed[obs]['planet_position']['yp'][j] - star_grid['yc']) ** 2)
# iterating on the cell grid
for x in range(0, star_grid['n_grid']):
for y in range(0, star_grid['n_grid']):
# skip the step if the cell is outside the stellar disk
# or if the cell is not shadowed by the planet when the largest possible size is considered
if star_grid['outside'][y, x] or rd[y, x] > planet_dict['radius_ratio'][0]*radius_grid[-1]:
continue
# rescaled planetary radius selection
grid_sel = (
rd[y, x] <= planet_dict['radius_ratio'][0]*radius_grid)
# stellar flux in the masked region
flux_tmp = rebin_1d_to_1d(synthesis['surface']['wave'],
synthesis['surface']['step'],
star_grid['spectra_mu'][x][y],
clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
rv_shift=star_grid['v_star'][y, x],
method='exact_flux',
preserve_flux=False)
# fixing zero values that may have been introduced by
# the rebinning process from an extremely irregular sampling
ind_sel = np.where(flux_tmp < 0.)[0]
for ii in ind_sel:
if ii == 0:
flux_tmp[ii] = flux_tmp[ii + 1]
elif ii == np.size(flux_tmp) - 1:
flux_tmp[ii] = flux_tmp[ii - 1]
else:
flux_tmp[ii] = (
flux_tmp[ii - 1] + flux_tmp[ii + 1]) / 2.
"""
Outer product of the radius selection array (size=M)
and the flux array (N) so that it can be summed
properly to the MxN missing_flux matrix.
"""
clv_rm_models[obs]['missing_flux'] += \
np.outer(grid_sel, flux_tmp)
clv_rm_models[obs]['missing_flux'] /= n_oversampling
clv_rm_models[obs]['stellar_spectra'] = \
np.outer(np.ones(len(radius_grid)), clv_rm_models['common']['norm']) \
- (clv_rm_models[obs]['missing_flux'] /
clv_rm_models['common']['continuum_level'])
clv_rm_models[obs]['stellar_spectra_convolved'] = \
np.zeros([len(radius_grid), synthesis['surface']['size_out']],
dtype=np.double)
clv_rm_models[obs]['clv_rm_model_convolved'] = \
np.zeros([len(radius_grid), synthesis['surface']['size_out']],
dtype=np.double)
clv_rm_models[obs]['clv_rm_model_convolved_derivative'] = \
np.zeros([len(radius_grid), synthesis['surface']['size_out']],
dtype=np.double)
clv_rm_models[obs]['clv_rm_model_convolved_continuum_bool'] = \
np.zeros([len(radius_grid), synthesis['surface']['size_out']],
dtype=bool)
clv_rm_models[obs]['clv_rm_model_convolved_normalized'] = \
np.zeros([len(radius_grid), synthesis['surface']['size_out']],
dtype=np.double)
for ii in range(0, len(radius_grid)):
clv_rm_models[obs]['stellar_spectra_convolved'][ii, :] = \
convolve(clv_rm_models[obs]['stellar_spectra'][ii, :],
gaussian)
clv_rm_models[obs]['stellar_spectra_convolved'][ii, wave_fix_convolution] = \
clv_rm_models[obs]['stellar_spectra'][ii, wave_fix_convolution]
"""
This is the theoretical transmission spectrum in the stellar reference frame
when only CLV and RM effects are present (no atmospheric
transmission)
"""
clv_rm_models[obs]['clv_rm_model_convolved'][ii, :] = \
clv_rm_models[obs]['stellar_spectra_convolved'][ii, :] \
/ clv_rm_models['common']['norm_convolved']
"""
High-resolution transmission spectra are always rescaled for
their continuum because in fiber-fed spectrographs the
information on the absolute flux of the star is lost.
If not using the normalized spectrum, normalization factor must
be included somehow when correcting for the CLV+RM, before
fitting the atomic absoprtion lines
"""
normalization_function = np.polynomial.chebyshev.Chebyshev.fit(
clv_rm_models['common']['wave'][clv_rm_models['common']['norm_convolved_bool']],
clv_rm_models[obs]['clv_rm_model_convolved'][ii, :][clv_rm_models['common']['norm_convolved_bool']],
deg=norm_pams['model_poly_degree']
)
clv_rm_models[obs]['clv_rm_model_convolved_normalized'][ii, :] = clv_rm_models[obs]['clv_rm_model_convolved'][ii, :] / normalization_function(clv_rm_models['common']['wave'])
#plt.plot(clv_rm_models['common']['wave'], clv_rm_models[obs]['clv_rm_model_convolved_normalized'][ii, :])
#plt.plot(clv_rm_models['common']['wave'], clv_rm_models[obs]['clv_rm_model_convolved'][ii, :])
#plt.show()
""" In the planetary reference frame, the corrected transmission
spectrum T_corr is given by
T_corr = T_input * (synthetic_convolved /
stellar_spectra_convolved),
where: T_input: transmission spectrum before the correction
synthetic_convolved: integrated synthetic stellar spectrum,
convolved for the instrumental resolution.
stellar_spectra_convolved: stellar spectrum after removing the
contribute of the stellar surface covered by the planet, convolved
for the instrumental resolution (synthetic_convolved and
stellar_spectra_convolved are in the stellar rest frame must be
rebinned in the planetary rest frame)
Since clv_rm_model_convolved = stellar_spectra_convolved /
synthetic_convolved the observed transmission spectrum must be
DIVIDED by clv_rm_model_convolved
"""
save_to_cpickle('clv_rm_models', clv_rm_models,
config_in['output'], night)
clv_rm_models = None # Forcing memory de-allocation
if not config_in['settings'].get('full_output', False):
del star_grid['spectra_mu']
save_to_cpickle('clv_rm_star_grid', star_grid, config_in['output'])
save_to_cpickle('clv_rm_synthesis', synthesis, config_in['output'])
def plot_clv_rm_models(config_in, night_input=''):
night_dict = from_config_get_nights(config_in)
synthesis = load_from_cpickle('clv_rm_synthesis', config_in['output'])
star_grid = load_from_cpickle('clv_rm_star_grid', config_in['output'])
if night_input == '':
# Visualize the mu of star
fig = plt.figure(figsize=(8, 6.5))
plt.title('Limb angle')
plt.contourf(star_grid['xx'], star_grid['xx'],
star_grid['mu'], 60, cmap=plt.cm.viridis)
plt.colorbar(label='$\mu$') # draw colorbar
# plot data points.
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xlabel('x [R_s]')
plt.ylabel('y [R_s]')
plt.show()
# Visualize the RV of star
fig = plt.figure(figsize=(8, 6.5))
# CS = plt.contour(xx,xx,v_star,50,linewidths=0.5,colors='k')
plt.title('Radial velocity field')
plt.contourf(star_grid['xx'], star_grid['xx'],
star_grid['v_star'], 100, cmap=plt.cm.seismic)
plt.colorbar(label='v_star') # draw colorbar
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xlabel('x [R_s]')
plt.ylabel('y [R_s]')
plt.show()
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
lists = load_from_cpickle('lists', config_in['output'], night)
clv_rm_models = load_from_cpickle(
'clv_rm_models', config_in['output'], night)
observational_pams = load_from_cpickle(
'observational_pams', config_in['output'], night)
colors_properties, colors_plot, colors_scatter = make_color_array_matplotlib3(
lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
i0_radius = np.argmin(
np.abs(clv_rm_models['common']['radius_grid']-1.00))
for obs in lists['transit_in']:
ax1.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['stellar_spectra'][i0_radius, :],
color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['missing_flux'][i0_radius, :] /
clv_rm_models['common']['continuum_level'],
color=colors_plot['mBJD'][obs], alpha=0.2)
ax2.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['stellar_spectra'][-1, :],
color=colors_plot['mBJD'][obs], alpha=0.2)
ax2.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['missing_flux'][-1, :] /
clv_rm_models['common']['continuum_level'],
color=colors_plot['mBJD'][obs], alpha=0.2)
# for obs in lists['transit_out']:
# ax2.plot(clv_rm_models['common']['wave'],
# clv_rm_models[obs]['stellar_spectra'],
# color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.set_title(
'Night: {0:s} \n Input spectra, stellar radius'.format(night))
ax2.set_title('Stellar radius x {0:2.2f}'.format(
clv_rm_models['common']['radius_grid'][-1]))
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(
cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 30,251 | 46.566038 | 190 | py |
SLOPpy | SLOPpy-main/SLOPpy/transmission_lightcurve_average.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.rebin_subroutines import *
from astropy.convolution import convolve, Box1DKernel
__all__ = ['compute_transmission_lightcurve_average_planetRF',
'plot_transmission_lightcurve_average_planetRF',
'compute_transmission_lightcurve_average_stellarRF',
'plot_transmission_lightcurve_average_stellarRF',
'compute_transmission_lightcurve_average_observerRF',
'plot_transmission_lightcurve_average_observerRF',
'compute_transmission_lightcurve_average',
'plot_transmission_lightcurve_average'
]
def compute_transmission_lightcurve_average_planetRF(config_in, lines_label):
compute_transmission_lightcurve_average(config_in, lines_label, reference='planetRF')
def plot_transmission_lightcurve_average_planetRF(config_in, night_input):
plot_transmission_lightcurve_average(config_in, night_input, reference='planetRF')
def compute_transmission_lightcurve_average_stellarRF(config_in, lines_label):
compute_transmission_lightcurve_average(config_in, lines_label, reference='stellarRF')
def plot_transmission_lightcurve_average_stellarRF(config_in, night_input):
plot_transmission_lightcurve_average(config_in, night_input, reference='stellarRF')
def compute_transmission_lightcurve_average_observerRF(config_in, lines_label):
compute_transmission_lightcurve_average(config_in, lines_label, reference='observerRF')
def plot_transmission_lightcurve_average_observerRF(config_in, night_input):
plot_transmission_lightcurve_average(config_in, night_input, reference='observerRF')
subroutine_name = 'transmission_lightcurve_average'
pick_files = 'transmission_lightcurve'
def compute_transmission_lightcurve_average(config_in, lines_label, reference='planetRF'):
night_dict = from_config_get_nights(config_in)
#instrument_dict = from_config_get_instrument(config_in)
#system_dict = from_config_get_system(config_in)
planet_dict = from_config_get_planet(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label] # from_config_get_transmission_lightcurve(config_in)
output_list = ['user',
'mcmc_night_MED',
'mcmc_night_MAP',
'mcmc_global_MED',
'mcmc_global_MAP'
'user_uncorrected']
append_list = ['', '_uncorrected', '_clv_model']
shared_data = load_from_cpickle('shared', config_in['output'])
""" Using the line-specific range to define the transmission spectrum region """
shared_selection = (shared_data['coadd']['wave'] >= lines_dict['range'][0]) \
& (shared_data['coadd']['wave'] < lines_dict['range'][1])
binned_selection = (shared_data['binned']['wave'] >= lines_dict['range'][0]) \
& (shared_data['binned']['wave'] < lines_dict['range'][1])
lightcurve_average_template = {
'subroutine': subroutine_name,
'range': lines_dict['range'],
'wave': shared_data['coadd']['wave'][shared_selection],
'step': shared_data['coadd']['step'][shared_selection],
'size': np.int(np.sum(shared_selection)),
'binned_wave': shared_data['binned']['wave'][binned_selection],
'binned_step': shared_data['binned']['step'][binned_selection],
'binned_size': np.int(np.sum(binned_selection))
}
for output_selection in output_list:
skip_iteration = False
try:
lightcurve_average = load_from_cpickle(subroutine_name+'_'+reference+ '_' + output_selection, config_in['output'], lines=lines_label)
continue
except (FileNotFoundError, IOError):
print(" No average transmission lightcurve found for case:{0:s}, computing now ".format(output_selection))
print()
lightcurve_average = lightcurve_average_template.copy()
# doublet sodium in the lab reference frame
"""
C stabds for central
"""
C_bands = {}
for passband_key, passband_val in lines_dict['passbands'].items():
C_bands[passband_key] = {}
for line_key, line_val in lines_dict['lines'].items():
C_bands[passband_key][line_key] = (np.abs(lightcurve_average['wave'] - line_val)*2. <passband_val)
"""
S stand for side
"""
S_bands = {}
for band_key, band_val in lines_dict['continuum'].items():
S_bands[band_key] = (lightcurve_average['wave'] >= band_val[0]) & (lightcurve_average['wave'] <= band_val[1])
if 'full_transit_duration' in planet_dict:
full_transit_duration = planet_dict['total_transit_duration'][0]
else:
full_transit_duration = planet_dict['transit_duration'][0]
if 'total_transit_duration' in planet_dict:
total_transit_duration = planet_dict['total_transit_duration'][0]
else:
total_transit_duration = planet_dict['transit_duration'][0]
transit_in_bins = np.linspace(
-total_transit_duration/2./planet_dict['period'][0],
total_transit_duration/2./planet_dict['period'][0],
6
)
transit_full_bins = np.linspace(
-full_transit_duration/2./planet_dict['period'][0],
full_transit_duration/2./planet_dict['period'][0],
6
)
transit_in_step = np.average(transit_in_bins[1:]-transit_in_bins[:-1])
transit_full_step = np.average(transit_full_bins[1:]-transit_full_bins[:-1])
lightcurve_average['transit_in_flag'] = []
lightcurve_average['transit_full_flag'] = []
lightcurve_average['transit_out_flag'] = []
lightcurve_average['transit_in'] = {}
lightcurve_average['transit_full'] = {}
lightcurve_average['transit_out'] = {}
lightcurve_average['observations'] = {'phase': []}
lightcurve_average['bands_list'] = []
lightcurve_average['C_bands'] = C_bands
lightcurve_average['S_bands'] = S_bands
lightcurve_average['bins'] = {
'transit_in_bins': transit_in_bins,
'transit_in_step': transit_in_step,
'transit_full_bins': transit_full_bins,
'transit_full_step': transit_full_step
}
for band_key in C_bands:
for name_append in append_list:
lightcurve_average['observations']['delta_' + band_key + name_append] = []
lightcurve_average['bands_list'].extend([band_key])
for night in night_dict:
try:
lightcurve = load_from_cpickle(pick_files+'_'+reference+ '_' + output_selection, config_in['output'], night, lines_label)
except:
skip_iteration = True
continue
print("compute_transmission_lightcurve Night: ", night)
lightcurve_average['observations']['phase'].extend(lightcurve['arrays']['observations']['phase'].tolist())
lightcurve_average['transit_in_flag'].extend(
lightcurve['arrays']['observations']['transit_in_flag'].tolist())
lightcurve_average['transit_full_flag'].extend(
lightcurve['arrays']['observations']['transit_full_flag'].tolist())
lightcurve_average['transit_out_flag'].extend(
lightcurve['arrays']['observations']['transit_out_flag'].tolist())
for band_key in lightcurve_average['bands_list']:
for name_append in append_list:
lightcurve_average['observations']['delta_' + band_key + name_append].extend(
lightcurve['arrays']['observations']['delta_' + band_key + name_append].tolist())
if skip_iteration: continue
sorting_index = np.argsort(lightcurve_average['observations']['phase'])
lightcurve_average['observations']['phase'] = np.asarray(lightcurve_average['observations']['phase'])[sorting_index]
lightcurve_average['transit_in_flag'] = np.asarray(lightcurve_average['transit_in_flag'])[sorting_index]
lightcurve_average['transit_full_flag'] = np.asarray(lightcurve_average['transit_full_flag'])[sorting_index]
lightcurve_average['transit_out_flag'] = np.asarray(lightcurve_average['transit_out_flag'])[sorting_index]
lightcurve_average['transit_in']['phase'] = \
lightcurve_average['observations']['phase'][lightcurve_average['transit_in_flag']]
lightcurve_average['transit_full']['phase'] = \
lightcurve_average['observations']['phase'][lightcurve_average['transit_full_flag']]
lightcurve_average['transit_out']['phase'] = \
lightcurve_average['observations']['phase'][lightcurve_average['transit_out_flag']]
for band_key in lightcurve_average['bands_list']:
for name_append in append_list:
lightcurve_average['observations']['delta_' + band_key + name_append] = \
np.asarray(lightcurve_average['observations']['delta_' + band_key + name_append])[sorting_index]
lightcurve_average['transit_in']['delta_' + band_key + name_append] = \
lightcurve_average['observations']['delta_' + band_key + name_append][lightcurve_average['transit_in_flag']]
lightcurve_average['transit_full']['delta_' + band_key + name_append] = \
lightcurve_average['observations']['delta_' + band_key + name_append][lightcurve_average['transit_full_flag']]
lightcurve_average['transit_out']['delta_' + band_key + name_append] = \
lightcurve_average['observations']['delta_' + band_key + name_append][lightcurve_average['transit_out_flag']]
pre_duration = transit_full_bins[0] - lightcurve_average['transit_out']['phase'][0]
if pre_duration > 0:
nsteps_pre = int(pre_duration / transit_full_step)
if pre_duration % transit_full_step > 0.0:
nsteps_pre += 1
else:
nsteps_pre = 0
post_duration = lightcurve_average['transit_out']['phase'][-1] - transit_full_bins[-1]
if post_duration > 0:
nsteps_post = int(post_duration / transit_full_step)
if post_duration % transit_full_step > 0.0:
nsteps_post += 1
else:
nsteps_post = 0
transit_bins = np.arange(transit_full_bins[0] - nsteps_pre * transit_full_step,
transit_full_bins[-1] + (nsteps_post + 1.1) * transit_full_step,
transit_full_step)
lightcurve_average['binned'] = {
'observations': {
'phase': np.zeros(len(transit_bins)),
},
'transit_in': {},
'transit_full': {},
'transit_step': {},
'transit_out': {},
}
for band_key in C_bands:
for name_append in append_list:
lightcurve_average['binned']['observations']['delta_' + band_key + name_append] = np.zeros([len(transit_bins), 2])
transit_out_flag = np.zeros(len(transit_bins), dtype=bool)
transit_in_flag = np.zeros(len(transit_bins), dtype=bool)
transit_full_flag = np.zeros(len(transit_bins), dtype=bool)
n_a = 0
for nb in range(0, len(transit_bins) - 1):
sel = (lightcurve_average['observations']['phase'] >= transit_bins[nb]) \
& (lightcurve_average['observations']['phase'] < transit_bins[nb + 1])
if np.sum(sel) <= 0: continue
lightcurve_average['binned']['observations']['phase'][n_a] = np.average(
lightcurve_average['observations']['phase'][sel])
for band_key in C_bands:
for name_append in append_list:
lightcurve_average['binned']['observations']['delta_' + band_key + name_append][n_a, 0], sum_weights = np.average(
lightcurve_average['observations']['delta_' + band_key + name_append][sel, 0],
weights=1. / lightcurve_average['observations']['delta_' + band_key + name_append][sel, 1] ** 2,
returned=True)
lightcurve_average['binned']['observations']['delta_' + band_key + name_append][n_a, 1] = np.sqrt(1. / sum_weights)
if np.abs(lightcurve_average['binned']['observations']['phase'][n_a]) >= \
total_transit_duration/2./planet_dict['period'][0]:
transit_out_flag[n_a] = True
elif np.abs(lightcurve_average['binned']['observations']['phase'][n_a]) >= \
full_transit_duration/2./planet_dict['period'][0]:
transit_in_flag[n_a] = True
else:
transit_full_flag[n_a] = True
n_a += 1 # bins actually computed
lightcurve_average['binned']['transit_in']['phase'] = lightcurve_average['binned']['observations']['phase'][transit_in_flag]
lightcurve_average['binned']['transit_full']['phase'] = lightcurve_average['binned']['observations']['phase'][transit_full_flag]
lightcurve_average['binned']['transit_out']['phase'] = lightcurve_average['binned']['observations']['phase'][transit_out_flag]
lightcurve_average['binned']['observations']['phase'] = lightcurve_average['binned']['observations']['phase'][:n_a]
for band_key in C_bands:
for name_append in append_list:
lightcurve_average['binned']['transit_in']['delta_' + band_key + name_append] = \
lightcurve_average['binned']['observations']['delta_' + band_key + name_append][transit_in_flag, :]
lightcurve_average['binned']['transit_full']['delta_' + band_key + name_append] = \
lightcurve_average['binned']['observations']['delta_' + band_key + name_append][transit_full_flag, :]
lightcurve_average['binned']['transit_out']['delta_' + band_key + name_append] = \
lightcurve_average['binned']['observations']['delta_' + band_key + name_append][transit_out_flag, :]
lightcurve_average['binned']['observations']['delta_' + band_key + name_append] = \
lightcurve_average['binned']['observations']['delta_' + band_key + name_append][:n_a, :]
save_to_cpickle(subroutine_name + '_'+reference+ '_' + output_selection, lightcurve_average, config_in['output'], lines=lines_label)
def plot_transmission_lightcurve_average(config_in, night_input='', reference='planetRF'):
import matplotlib.pyplot as plt
lightcurve_average = load_from_cpickle('transmission_lightcurve_average_'+reference, config_in['output'])
for band_key in lightcurve_average['C_bands']:
plt.figure(figsize=(12, 6))
plt.title('Average transmission lightcurve\n {0:s}'.format(band_key))
plt.errorbar(lightcurve_average['observations']['phase'],
lightcurve_average['observations']['delta_' + band_key][:,0],
yerr= lightcurve_average['observations']['delta_' + band_key][:,1] ,
fmt='.', c='k', alpha=0.25, label='observations')
plt.errorbar(lightcurve_average['binned']['observations']['phase'],
lightcurve_average['binned']['observations']['delta_' + band_key][:, 0],
yerr= lightcurve_average['binned']['observations']['delta_' + band_key][:,1],
fmt='.', c='k', alpha=1.0, label='observations')
plt.axvspan(-1, lightcurve_average['bins']['transit_in_bins'][0], alpha=0.25, color='green')
plt.axvspan(lightcurve_average['bins']['transit_in_bins'][-1], 1., alpha=0.25, color='green')
plt.axhline(0, c='C1')
plt.xlim(lightcurve_average['observations']['phase'][0]-0.01,
lightcurve_average['observations']['phase'][-1]+0.01)
plt.xlabel('$\lambda$ [$\AA$]')
plt.ylabel('$\mathcal{R}$ - 1.')
plt.legend()
plt.show()
| 16,394 | 47.794643 | 145 | py |
SLOPpy | SLOPpy-main/SLOPpy/differential_refraction.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.differential_refraction_preparation import compute_differential_refraction_preparation
__all__ = ["compute_differential_refraction",
"plot_differential_refraction",
"compute_differential_refraction_update",
"plot_differential_refraction_update"]
def compute_differential_refraction_update(config_in):
compute_differential_refraction(config_in, append_name='update')
def plot_differential_refraction_update(config_in, night_input=''):
plot_differential_refraction(config_in, night_input, append_name='update')
def compute_differential_refraction(config_in, append_name=None):
if append_name:
subroutine_name = 'differential_refraction_' + append_name
filename = 'refraction_' + append_name
else:
subroutine_name = 'differential_refraction'
filename = 'refraction'
compute_differential_refraction_preparation(config_in, append_name)
night_dict = from_config_get_nights(config_in)
for night in night_dict:
try:
refraction = load_from_cpickle(filename, config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
refraction_dict = from_config_refraction(config_in, night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving input and calibration data """
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'],
use_refraction=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
preparation = load_from_cpickle(filename + '_preparation', config_in['output'], night)
defined_reference = night_dict[night]['refraction'].get('reference', False)
if defined_reference:
reference = load_from_cpickle(filename + '_reference', config_in['output'])
preparation['coadd']['wave'] = reference['wave']
preparation['coadd']['step'] = reference['step']
preparation['coadd']['rescaled'] = reference['rescaled']
if not preparation.get('absolute_SRF', False):
print(" Observations and reference spectra are in different reference system ")
quit()
processed = {
'subroutine': subroutine_name,
'coadd': {
'wave': preparation['coadd']['wave'],
'step': preparation['coadd']['step'],
'size': preparation['coadd']['size']
}
}
refraction = {
'subroutine': subroutine_name,
'wave': preparation['coadd']['wave'],
'binned': {}
}
refraction['binned']['wave'] = np.arange(preparation['coadd']['wave'][0],
preparation['coadd']['wave'][-11],
20.*preparation['coadd']['step'][0], dtype=np.double)
refraction['binned']['size'] = np.size(refraction['binned']['wave'])
refraction['binned']['step'] = np.ones(refraction['binned']['size'], dtype=np.double) \
* 20. * preparation['coadd']['step'][0]
if refraction_dict['approach'] == 'full_spectrum':
print(" Differential refraction performed over the full spectrum")
elif refraction_dict['approach'] == 'individual_order':
print(" Differential refraction performed order-by-order ")
else:
raise ValueError("ERROR: fitting approach for differential refraction not implemented")
if refraction_dict['method'] == 'spline':
print(" Modelling performed with spline")
print(" Spline order for differential refraction fit: ",
refraction_dict['fit_order'])
print(" Knots spacing (in Angstrom) for refraction fit: ",
refraction_dict['knots_spacing'])
elif refraction_dict['method'] == 'polynomial':
print(" Modelling performed with polynomials")
print(" Chebyshev polynomial order for differential refraction fit: ",
refraction_dict['fit_order'])
else:
raise ValueError("ERROR: fitting method for differential refraction not implemented")
print(" Number of iterations: ",
refraction_dict['fit_iters'])
print()
""" Now each observation is divided by the reference spectrum, after being doppler-shifted to the observer RF
The result is then used to model the flux variation
"""
approach = refraction_dict.get('approach', 'full_spectrum')
if approach == 'full_spectrum':
for obs in lists['observations']:
print(" Division by reference spectrum and fit of the flux variation: ", obs)
refraction[obs] = {}
processed[obs] = {}
if preparation['absolute_SRF']:
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF']
else:
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF_mod']
processed[obs]['ratio'] = preparation[obs]['rescaled'] / preparation['coadd']['rescaled']
processed[obs]['ratio_err'] = processed[obs]['ratio'] * \
np.sqrt((preparation[obs]['rescaled_err']/preparation[obs]['rescaled'])**2
+ (preparation['coadd']['rescaled_err']/preparation['coadd']['rescaled'])**2)
refraction[obs]['fit_flag'] = (processed[obs]['ratio'] > 0.01)
if refraction_dict['method'] == 'spline':
for n_iter in range(0, refraction_dict['fit_iters']):
wave = processed['coadd']['wave'][refraction[obs]['fit_flag']]
""" picking the number of knots """
nknots = ((np.amax(wave) - np.amin(wave)) / refraction_dict['knots_spacing'])
""" picking the indices of the knots"""
idx_knots = (np.arange(1, len(wave) - 1, (len(wave) - 2.) / nknots)).astype('int')
""" passing from indices to knots values """
processed[obs]['knots'] = wave[idx_knots]
refraction[obs]['coeff'] = \
sci_int.splrep(
processed['coadd']['wave'][refraction[obs]['fit_flag']],
processed[obs]['ratio'][refraction[obs]['fit_flag']],
task=-1,
k=refraction_dict['fit_order'],
t=processed[obs]['knots'])
refraction[obs]['fit_s1d'] = sci_int.splev(processed['coadd']['wave'], refraction[obs]['coeff'])
processed[obs]['residuals'] = processed[obs]['ratio'] - refraction[obs]['fit_s1d']
if n_iter < refraction_dict['fit_iters'] - 1:
std = np.std(processed[obs]['residuals'])
refraction[obs]['fit_flag'] = (refraction[obs]['fit_flag']) \
& (np.abs(processed[obs]['residuals']) <
refraction_dict['fit_sigma'] * std)
elif refraction_dict['method'] == 'polynomial':
refraction[obs]['fit_flag'][:50] = False
refraction[obs]['fit_flag'][-50:] = False
for n_iter in range(0, refraction_dict['fit_iters']):
refraction[obs]['coeff'] = np.polynomial.chebyshev.chebfit(
processed['coadd']['wave'][refraction[obs]['fit_flag']],
processed[obs]['ratio'][refraction[obs]['fit_flag']],
refraction_dict['fit_order'])
refraction[obs]['fit_s1d'] = \
np.polynomial.chebyshev.chebval(processed['coadd']['wave'], refraction[obs]['coeff'])
processed[obs]['residuals'] = processed[obs]['ratio'] - refraction[obs]['fit_s1d']
if n_iter < refraction_dict['fit_iters'] - 1:
std = np.std(processed[obs]['residuals'])
refraction[obs]['fit_flag'] = (refraction[obs]['fit_flag']) \
& (np.abs(processed[obs]['residuals']) <
refraction_dict['fit_sigma'] * std)
""" Going back to the observer RF and rebinning the polynomial fit into the observed orders """
refraction[obs]['fit_e2ds'] = \
rebin_1d_to_2d(processed['coadd']['wave'],
input_data['coadd']['step'],
refraction[obs]['fit_s1d'],
input_data[obs]['wave'],
input_data[obs]['step'],
rv_shift=-rv_shift,
preserve_flux=False,
)
""" Zero or negative values are identified, flagged and substituted with another value """
refraction[obs]['null'] = np.zeros([input_data[obs]['n_orders'], input_data[obs]['n_pixels']],
dtype=bool)
for order in range(0, observational_pams['n_orders']):
refraction[obs]['fit_e2ds'][order, :], _, refraction[obs]['null'][order, :] = \
replace_values_errors_with_interpolation_1d(refraction[obs]['fit_e2ds'][order, :],
refraction[obs]['fit_e2ds'][order, :],
less_than=0.001)
processed[obs]['flux_rebinned_stellarRF_corrected'] = preparation[obs]['flux_rebinned_stellarRF'] \
/ refraction[obs]['fit_s1d']
refraction[obs]['binned_residuals'] = \
rebin_1d_to_1d(processed['coadd']['wave'],
processed['coadd']['step'],
processed[obs]['residuals'],
refraction['binned']['wave'],
refraction['binned']['step'],
preserve_flux=False)
elif approach == 'individual_order':
for obs in lists['observations']:
print(" Division by reference spectrum and fit of the flux variation: ", obs)
refraction[obs] = {}
processed[obs] = {}
if preparation['absolute_SRF']:
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF']
else:
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF_mod']
""" Going back to the observer RF and rebinning the spectrum into the observed orders """
preserve_flux = input_data[obs].get('absolute_flux', True)
# processed[obs]['master_flux'] = \
processed[obs]['master_flux'] = \
rebin_1d_to_2d(preparation['coadd']['wave'],
preparation['coadd']['step'],
preparation['coadd']['rescaled'],
input_data[obs]['wave'],
input_data[obs]['step'],
preserve_flux=preserve_flux,
rv_shift=-rv_shift)
# processed[obs]['master_ferr'] = \
processed[obs]['master_ferr'] = \
rebin_1d_to_2d(preparation['coadd']['wave'],
preparation['coadd']['step'],
preparation['coadd']['rescaled_err'],
input_data[obs]['wave'],
input_data[obs]['step'],
preserve_flux=preserve_flux,
rv_shift=-rv_shift,
is_error=True)
""" Zero or negative values are identified, flagged and substituted with another value """
processed[obs]['master_flux'], processed[obs]['master_ferr'], processed[obs]['master_null'] = \
replace_values_errors_with_interpolation_2d(processed[obs]['master_flux'],
processed[obs]['master_ferr'],
less_than=0.001)
# processed[obs]['ratio'] = preparation[obs]['rescaled_blazed'] / master_flux
processed[obs]['ratio'] = input_data[obs]['e2ds'] \
/ preparation[obs]['rescaling'] \
/ (processed[obs]['master_flux'] * calib_data['blaze'])
processed[obs]['ratio_err'] = processed[obs]['ratio'] * \
np.sqrt(preparation[obs]['rescaling']/input_data[obs]['e2ds']
+ (processed[obs]['master_ferr']/processed[obs]['master_flux'])**2)
refraction[obs]['fit_e2ds'] = np.zeros([input_data[obs]['n_orders'], input_data[obs]['n_pixels']])
processed[obs]['residuals'] = np.zeros([input_data[obs]['n_orders'], input_data[obs]['n_pixels']])
refraction[obs]['fit_flag'] = np.zeros([input_data[obs]['n_orders'], input_data[obs]['n_pixels']],
dtype=bool)
for order in range(0, input_data[obs]['n_orders']):
order_coeff_name = 'order_' + repr(order)
refraction[obs]['fit_flag'][order, :] = (processed[obs]['ratio'][order, :] > 0.1)
if refraction_dict['method'] == 'spline':
for n_iter in range(0, refraction_dict['fit_iters']):
wave = input_data[obs]['wave'][order, refraction[obs]['fit_flag'][order, :]]
""" picking the number of knots """
nknots = ((np.amax(wave) - np.amin(wave)) / refraction_dict['knots_spacing'])
""" picking the indices of the knots"""
idx_knots = (np.arange(1, len(wave) - 1, (len(wave) - 2.) / nknots)).astype('int')
""" passing from indices to knots values """
refraction[obs]['knots'] = wave[idx_knots]
refraction[obs][order_coeff_name] = \
sci_int.splrep(
input_data[obs]['wave'][order, refraction[obs]['fit_flag'][order, :]],
processed[obs]['ratio'][order, refraction[obs]['fit_flag'][order, :]],
task=-1,
k=refraction_dict['fit_order'],
t=refraction[obs]['knots'])
refraction[obs]['fit_e2ds'][order, :] = \
sci_int.splev(input_data[obs]['wave'][order, :],
refraction[obs][order_coeff_name])
processed[obs]['residuals'][order, :] = processed[obs]['ratio'][order, :] \
- refraction[obs]['fit_e2ds'][order, :]
if n_iter < refraction_dict['fit_iters'] - 1:
std = np.std(processed[obs]['residuals'][order, :])
refraction[obs]['fit_flag'][order, :] = (refraction[obs]['fit_flag'][order, :]) \
& (np.abs(processed[obs]['residuals'][order, :]) <
refraction_dict['fit_sigma'] * std)
elif refraction_dict['method'] == 'polynomial':
refraction[obs]['fit_flag'][order, :50] = False
refraction[obs]['fit_flag'][order, -50:] = False
for n_iter in range(0, refraction_dict['fit_iters']):
refraction[obs][order_coeff_name] = np.polynomial.chebyshev.chebfit(
input_data[obs]['wave'][order, refraction[obs]['fit_flag'][order, :]],
processed[obs]['ratio'][order, refraction[obs]['fit_flag'][order, :]],
refraction_dict['fit_order'])
refraction[obs]['fit_e2ds'][order, :] = \
np.polynomial.chebyshev.chebval(input_data[obs]['wave'][order, :],
refraction[obs][order_coeff_name])
processed[obs]['residuals'][order, :] = processed[obs]['ratio'][order, :] \
- refraction[obs]['fit_e2ds'][order, :]
if n_iter < refraction_dict['fit_iters'] - 1:
std = np.std(processed[obs]['residuals'][order, :])
refraction[obs]['fit_flag'][order, :] = (refraction[obs]['fit_flag'][order, :]) \
& (np.abs(processed[obs]['residuals'][order, :]) <
refraction_dict['fit_sigma'] * std)
e2ds_corrected = input_data[obs]['e2ds'] / refraction[obs]['fit_e2ds']
e2ds_corrected_err = input_data[obs]['e2ds_err'] / refraction[obs]['fit_e2ds']
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['flux_rebinned_stellarRF_corrected'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
e2ds_corrected,
calib_data['blaze'],
processed['coadd']['wave'],
input_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=rv_shift)
processed[obs]['err_flux_rebinned_stellarRF_corrected'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
e2ds_corrected_err,
calib_data['blaze'],
processed['coadd']['wave'],
input_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=rv_shift,
is_error=True)
processed[obs]['flux_rebinned_stellarRF_corrected'], \
processed[obs]['err_flux_rebinned_stellarRF_corrected'], _ = \
replace_values_errors_with_interpolation_1d(processed[obs]['flux_rebinned_stellarRF_corrected'],
processed[obs]['err_flux_rebinned_stellarRF_corrected'],
less_than=0.001)
refraction[obs]['binned_residuals'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
processed[obs]['residuals'],
np.ones_like(processed[obs]['residuals']),
refraction['binned']['wave'],
refraction['binned']['step'],
rv_shift=0.0000,
preserve_flux=False)
else:
print(" Please choose either full_spectrum or individual_order as preferred approach")
quit()
if not config_in['settings'].get('full_output', False):
for obs in lists['observations']:
del processed[obs]['ratio_err']
try:
del processed[obs]['err_flux_rebinned_stellarRF_corrected']
del processed[obs]['master_flux']
del processed[obs]['master_ferr']
del processed[obs]['master_null']
except:
pass
if append_name:
save_to_cpickle('refraction_' + append_name + '_processed', processed, config_in['output'], night)
save_to_cpickle('refraction_' + append_name, refraction, config_in['output'], night)
else:
save_to_cpickle('refraction_processed', processed, config_in['output'], night)
save_to_cpickle('refraction', refraction, config_in['output'], night)
def plot_differential_refraction(config_in, night_input='', append_name=None):
night_dict = from_config_get_nights(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
refraction_dict = from_config_refraction(config_in, night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
input_data = retrieve_observations(config_in['output'], night, lists['observations'],
use_refraction=False, use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
try:
""" Retrieving the analysis"""
if append_name:
processed = load_from_cpickle('refraction_' + append_name + '_processed', config_in['output'], night)
preparation = load_from_cpickle('refraction_' + append_name + '_preparation', config_in['output'],
night)
refraction = load_from_cpickle('refraction_' + append_name, config_in['output'], night)
else:
processed = load_from_cpickle('refraction_processed', config_in['output'], night)
preparation = load_from_cpickle('refraction_preparation', config_in['output'], night)
refraction = load_from_cpickle('refraction', config_in['output'], night)
except:
print(" Failed in retrieving processed data")
return
approach = refraction_dict.get('approach', 'full_spectrum')
colors_properties, colors_plot, colors_scatter = make_color_array_matplotlib3(lists, observational_pams)
offset = 0.10
y_limits = [0.8, 1.2]
flag_e2ds = {}
flag_coadd = {}
for i, obs in enumerate(lists['observations']):
shrink_factor = 4
if input_data[obs]['n_orders'] > shrink_factor:
factor = (input_data[obs]['n_orders'] * input_data[obs]['n_pixels']) \
// (input_data[obs]['n_pixels'] * shrink_factor)
flag_e2ds[obs] = (np.random.choice(a=([False] * (factor-1)) + [True],
size=(input_data[obs]['n_orders'], input_data[obs]['n_pixels'])))
flag_coadd[obs] = \
np.random.choice(a=([False] * factor) + [True], size=input_data['coadd']['size'])
else:
flag_e2ds[obs] = np.ones([input_data[obs]['n_orders'], input_data[obs]['n_pixels']], dtype=bool)
flag_coadd[obs] = np.ones(input_data['coadd']['size'], dtype=bool)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
""" We slim down the plot """
if i == 0:
ax1.scatter(preparation['coadd']['wave'][flag_coadd[obs]],
preparation[obs]['flux_rebinned_stellarRF'][flag_coadd[obs]] /
preparation[obs]['rescaling'],
c=colors_scatter['mBJD'][obs], s=2, alpha=0.2, label='observation (SRF)')
else:
ax1.scatter(preparation['coadd']['wave'][flag_coadd[obs]],
preparation[obs]['flux_rebinned_stellarRF'][flag_coadd[obs]] /
preparation[obs]['rescaling'],
c=colors_scatter['mBJD'][obs], s=2, alpha=0.2)
ax2.scatter(processed['coadd']['wave'][flag_coadd[obs]],
processed[obs]['flux_rebinned_stellarRF_corrected'][flag_coadd[obs]] /
preparation[obs]['rescaling'],
c=colors_scatter['mBJD'][obs], s=3, alpha=0.2)
ax1.plot(preparation['coadd']['wave'], preparation['coadd']['rescaled'], c='k', lw=1, alpha=0.5,
label='reference spectrum')
ax2.plot(preparation['coadd']['wave'], preparation['coadd']['rescaled'], c='k', lw=1, alpha=0.5)
ax1.set_xlim(processed['coadd']['wave'][0], processed['coadd']['wave'][-1])
ax1.set_ylim(y_limits)
ax2.set_ylim(y_limits)
ax1.legend(loc=1)
ax1.set_title('Night: {0:s} \n Input spectra'.format(night))
ax2.set_title('After differential refraction correction')
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
"""
PLOT
"""
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(1, 2, width_ratios=[50, 1])
ax = plt.subplot(gs[0, 0])
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
if i == 0:
offset = np.std(processed[obs]['ratio'][refraction[obs]['fit_flag']].flatten()) * 6
average = np.average(processed[obs]['ratio'][refraction[obs]['fit_flag']].flatten())
y_limits = [average - offset, average + offset]
if approach == 'full_spectrum':
flag = flag_coadd[obs] & refraction[obs]['fit_flag']
wave = refraction['wave']
elif approach == 'individual_order':
flag = flag_e2ds[obs] & refraction[obs]['fit_flag']
wave = input_data[obs]['wave']
ax.scatter(wave[flag],
processed[obs]['ratio'][flag] + offset * i,
c=colors_scatter['mBJD'][obs], s=1, alpha=0.50, zorder=2)
ax.scatter(wave[~refraction[obs]['fit_flag']],
processed[obs]['ratio'][~refraction[obs]['fit_flag']] + offset * i,
c='k', s=2, alpha=0.1, zorder=1)
for order in range(0, input_data[obs]['n_orders']):
ax.plot(input_data[obs]['wave'][order, :],
refraction[obs]['fit_e2ds'][order, :] + offset * i,
c='k', lw=1, alpha=0.5, zorder=5)
y_limits_offset = [min(y_limits[0] + offset * i, y_limits[0]),
max(y_limits[1] + offset * i, y_limits[1])]
ax.set_ylim(y_limits_offset)
ax.set_xlabel('$\lambda$ [$\AA$]')
# ax.legend(loc=3)
ax.set_title('Night: {0:s} \n Differential refraction correction - Fit of the ratio obs/master'.format(night))
sm = plt.cm.ScalarMappable(cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
"""
PLOT: residuals of the fit
"""
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(1, 2, width_ratios=[50, 1])
ax = plt.subplot(gs[0, 0])
cbax1 = plt.subplot(gs[:, 1])
approach = refraction_dict.get('approach', 'full_spectrum')
for i, obs in enumerate(lists['observations']):
if i == 0:
median = np.median(processed[obs]['residuals'][refraction[obs]['fit_flag']].flatten())
offset = np.std(processed[obs]['residuals'][refraction[obs]['fit_flag']].flatten()) * 6
y_limits = [median - offset, median + offset]
if approach == 'full_spectrum':
flag = flag_coadd[obs] & refraction[obs]['fit_flag']
wave = refraction['wave']
elif approach == 'individual_order':
flag = flag_e2ds[obs] & refraction[obs]['fit_flag']
wave = input_data[obs]['wave']
ax.scatter(wave[flag],
processed[obs]['residuals'][flag] + offset * i,
c=colors_scatter['mBJD'][obs], s=1, alpha=0.50, zorder=2)
ax.scatter(wave[~refraction[obs]['fit_flag']],
processed[obs]['residuals'][~refraction[obs]['fit_flag']] + offset * i,
c='k', s=2, alpha=0.1, zorder=1)
ax.axhline(offset * i, c='k', zorder=3)
y_limits_offset = [min(y_limits[0] + offset * i, y_limits[0]),
max(y_limits[1] + offset * i, y_limits[1])]
ax.set_ylim(y_limits_offset)
ax.set_xlabel('$\lambda$ [$\AA$]')
# ax.legend(loc=3)
ax.set_title(
'Night: {0:s} \n Differential refraction correction - Residuals of the fit on ratio obs/master'.format(
night))
sm = plt.cm.ScalarMappable(cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
continue
"""
PLOT: corrected e2ds
"""
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(1, 2, width_ratios=[50, 1])
ax = plt.subplot(gs[0, 0])
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
e2ds_corrected = input_data[obs]['e2ds'] / refraction[obs]['fit_e2ds']
ax.scatter(input_data[obs]['wave'],
e2ds_corrected / preparation[obs]['rescaling'],
c=colors_scatter['mBJD'][obs], s=2, alpha=0.20)
# for order in range(0, np.size(input_data[obs]['wave'][:, 0])):
#
# ax.plot(input_data[obs]['wave'][order, :],
# refraction[obs]['fit_e2ds'][order, :],
# c=color_array, lw=1)
ax.set_xlabel('$\lambda$ [$\AA$]')
ax.set_title('Night: {0:s} \n Rescaled e2ds spectra after differential refraction correction'.format(night))
sm = plt.cm.ScalarMappable(cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 32,600 | 48.395455 | 120 | py |
SLOPpy | SLOPpy-main/SLOPpy/transmission_lightcurve.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.rebin_subroutines import *
from astropy.convolution import convolve, Box1DKernel
__all__ = ['compute_transmission_lightcurve_planetRF',
'plot_transmission_lightcurve_planetRF',
'compute_transmission_lightcurve_stellarRF',
'plot_transmission_lightcurve_stellarRF',
'compute_transmission_lightcurve_observerRF',
'plot_transmission_lightcurve_observerRF',
'compute_transmission_lightcurve',
'plot_transmission_lightcurve'
]
def compute_transmission_lightcurve_planetRF(config_in, lines_label):
compute_transmission_lightcurve(config_in, lines_label, reference='planetRF')
def plot_transmission_lightcurve_planetRF(config_in, night_input):
plot_transmission_lightcurve(config_in, night_input, reference='planetRF')
def compute_transmission_lightcurve_stellarRF(config_in, lines_label):
compute_transmission_lightcurve(config_in, lines_label, reference='stellarRF')
def plot_transmission_lightcurve_stellarRF(config_in, night_input):
plot_transmission_lightcurve(config_in, night_input, reference='stellarRF')
def compute_transmission_lightcurve_observerRF(config_in, lines_label):
compute_transmission_lightcurve(config_in, lines_label, reference='observerRF')
def plot_transmission_lightcurve_observerRF(config_in, night_input):
plot_transmission_lightcurve(config_in, night_input, reference='observerRF')
subroutine_name = 'transmission_lightcurve'
pick_files = 'transmission_spectrum'
def compute_transmission_lightcurve(config_in, lines_label, reference='planetRF'):
do_average_instead_of_sum = True
night_dict = from_config_get_nights(config_in)
#instrument_dict = from_config_get_instrument(config_in)
#system_dict = from_config_get_system(config_in)
planet_dict = from_config_get_planet(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
#shared_data = load_from_cpickle('shared', config_in['output'])
lines_dict = spectral_lines[lines_label] # from_config_get_transmission_lightcurve(config_in)
if 'full_transit_duration' in planet_dict:
full_transit_duration = planet_dict['total_transit_duration'][0]
else:
full_transit_duration = planet_dict['transit_duration'][0]
if 'total_transit_duration' in planet_dict:
total_transit_duration = planet_dict['total_transit_duration'][0]
else:
total_transit_duration = planet_dict['transit_duration'][0]
transit_in_bins = np.linspace(
-total_transit_duration/2./planet_dict['period'][0],
total_transit_duration/2./planet_dict['period'][0],
6
)
transit_full_bins = np.linspace(
-full_transit_duration/2./planet_dict['period'][0],
full_transit_duration/2./planet_dict['period'][0],
6
)
transit_in_step = np.average(transit_in_bins[1:]-transit_in_bins[:-1])
transit_full_step = np.average(transit_full_bins[1:]-transit_full_bins[:-1])
output_list = ['user',
'mcmc_night_MED',
'mcmc_night_MAP',
'mcmc_global_MED',
'mcmc_global_MAP']
append_list = ['', '_uncorrected', '_clv_model']
for output_selection in output_list:
skip_iteration = False
for night in night_dict:
print("compute_transmission_lightcurve Night: ", night)
try:
transmission = load_from_cpickle(pick_files+'_'+reference + '_' + output_selection, config_in['output'], night, lines_label)
except (FileNotFoundError, IOError):
print('No transmission spectra found for case:{0:s}, be sure to run transmission_spectra before this step'.format(output_selection))
skip_iteration = True
if skip_iteration: continue
try:
lightcurve = load_from_cpickle(subroutine_name +'_'+reference+ '_' + output_selection, config_in['output'], night, lines_label)
print()
continue
except (FileNotFoundError, IOError):
print(" No transmission_lightcurve file found, computing now ")
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
#calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
#input_data = retrieve_observations( config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
# doublet sodium in the lab reference frame
"""
C stands for central
"""
C_bands = {}
for passband_key, passband_val in lines_dict['passbands'].items():
C_bands[passband_key] = {}
for line_key, line_val in lines_dict['lines'].items():
C_bands[passband_key][line_key] = (np.abs(transmission['wave'] - line_val) * 2. < passband_val)
"""
S stands for side
"""
S_bands = {}
for band_key, band_val in lines_dict['continuum'].items():
S_bands[band_key] = (transmission['wave'] >= band_val[0]) & (transmission['wave'] <= band_val[1])
processed = {
'subroutine': subroutine_name
}
lightcurve = {
'subroutine': subroutine_name,
'arrays': {
'observations': {
'obs_name': np.zeros(len(lists['observations']), dtype=str),
'phase': np.zeros(len(lists['observations'])),
},
'transit_in': {},
'transit_full': {},
'transit_out': {},
},
'C_bands': C_bands,
'S_bands': S_bands,
'bins': {
'transit_in_bins': transit_in_bins,
'transit_in_step': transit_in_step,
'transit_full_bins': transit_full_bins,
'transit_full_step': transit_full_step
}
}
""" Adding the C-bands arrays to the dictionary"""
for band_key in C_bands:
for name_append in append_list:
lightcurve['arrays']['observations']['delta_' + band_key + name_append] = np.zeros([len(lists['observations']), 2])
transit_out_flag = np.zeros(len(lists['observations']), dtype=bool)
transit_in_flag = np.zeros(len(lists['observations']), dtype=bool)
transit_full_flag = np.zeros(len(lists['observations']), dtype=bool)
for n_obs, obs in enumerate( lists['observations']):
processed[obs] = {}
lightcurve[obs] = {}
try:
phase_internal = (observational_pams[obs]['BJD'] - night_dict[night]['time_of_transit'][0])/planet_dict['period'][0]
except:
phase_internal = (observational_pams[obs]['BJD'] - night_dict[night]['time_of_transit'])/planet_dict['period'][0]
processed[obs]['bands'] = {
'phase': phase_internal
}
processed[obs]['bands_uncorrected'] = {
'phase': phase_internal
}
processed[obs]['bands_clv_model'] = {
'phase': phase_internal
}
processed[obs]['s_integrated'] = 0.000
processed[obs]['s_integrated_uncorrected'] = 0.000
processed[obs]['s_integrated_clv_model'] = 0.000
processed[obs]['s_sigmaq_sum'] = 0.000
n_bands = 0.0
for band_key, band_val in S_bands.items():
if do_average_instead_of_sum:
processed[obs]['bands'][band_key] = \
[np.average(transmission[obs]['normalized'][band_val]),
np.sum((transmission[obs]['normalized_err'][band_val])**2)
/len(transmission[obs]['normalized_err'][band_val])**2]
processed[obs]['bands_uncorrected'][band_key] = \
[np.average(transmission[obs]['normalized_uncorrected'][band_val]),
np.sum((transmission[obs]['normalized_uncorrected_err'][band_val])**2)
/len(transmission[obs]['normalized_uncorrected_err'][band_val])**2]
processed[obs]['bands_clv_model'][band_key] = \
[np.average(transmission[obs]['clv_model_rebinned'][band_val]),
np.sum((transmission[obs]['normalized_err'][band_val])**2)
/len(transmission[obs]['normalized_err'][band_val])**2]
else:
processed[obs]['bands'][band_key] = \
[np.sum(transmission[obs]['normalized'][band_val]),
np.sum((transmission[obs]['normalized_err'][band_val])**2)]
processed[obs]['bands_uncorrected'][band_key] = \
[np.sum(transmission[obs]['normalized_uncorrected'][band_val]),
np.sum((transmission[obs]['normalized_uncorrected_err'][band_val])**2)]
processed[obs]['bands_clv_model'][band_key] = \
[np.sum(transmission[obs]['clv_model_rebinned'][band_val]),
np.sum((transmission[obs]['normalized_err'][band_val])**2)]
processed[obs]['s_integrated'] += processed[obs]['bands'][band_key][0]
processed[obs]['s_integrated_uncorrected'] += processed[obs]['bands_uncorrected'][band_key][0]
processed[obs]['s_integrated_clv_model'] += processed[obs]['bands_clv_model'][band_key][0]
processed[obs]['s_sigmaq_sum'] += processed[obs]['bands'][band_key][1]
n_bands += 1.
processed[obs]['s_integrated'] /= n_bands
processed[obs]['s_integrated_uncorrected'] /= n_bands
processed[obs]['s_integrated_clv_model'] /= n_bands
processed[obs]['s_sigmaq_sum'] /= n_bands**2
#processed[obs]['s_factor'] =np.power(s_integrated, -2.0)
#processed[obs]['s_factor_clv_model'] =np.power(s_integrated, -2.0)
#processed[obs]['s_factor_uncorrected'] = np.power(s_integrated, -2.0)
for band_key, band_dict in C_bands.items():
processed[obs]['bands'][band_key] = {}
processed[obs]['bands_uncorrected'][band_key] = {}
processed[obs]['bands_clv_model'][band_key] = {}
processed[obs]['c_integrated'] = 0.000
processed[obs]['c_integrated_uncorrected'] = 0.000
processed[obs]['c_integrated_clv_model'] = 0.000
processed[obs]['c_sigmaq_sum'] = 0.000
n_bands = 0.0
for line_key, line_val in band_dict.items():
if do_average_instead_of_sum:
processed[obs]['bands'][band_key][line_key] = \
[np.average(transmission[obs]['normalized'][line_val]),
np.sum((transmission[obs]['normalized_err'][line_val])**2)
/ len(transmission[obs]['normalized_err'][line_val])**2]
processed[obs]['bands_uncorrected'][band_key][line_key] = \
[np.average(transmission[obs]['normalized_uncorrected'][line_val]),
np.sum((transmission[obs]['normalized_uncorrected_err'][line_val])**2)
/ len(transmission[obs]['normalized_uncorrected_err'][line_val])**2]
processed[obs]['bands_clv_model'][band_key][line_key] = \
[np.average(transmission[obs]['clv_model_rebinned'][line_val]),
np.sum((transmission[obs]['normalized_err'][line_val])**2)
/ len(transmission[obs]['normalized_err'][line_val])**2]
else:
processed[obs]['bands'][band_key][line_key] = \
[np.sum(transmission[obs]['normalized'][line_val]),
np.sum((transmission[obs]['normalized_err'][line_val]) ** 2)]
processed[obs]['bands_uncorrected'][band_key][line_key] = \
[np.sum(transmission[obs]['normalized_uncorrected'][line_val]),
np.sum((transmission[obs]['normalized_uncorrected_err'][line_val]) ** 2)]
processed[obs]['bands_clv_model'][band_key][line_key] = \
[np.sum(transmission[obs]['clv_model_rebinned'][line_val]),
np.sum((transmission[obs]['normalized_err'][line_val]) ** 2)]
processed[obs]['c_integrated'] += processed[obs]['bands'][band_key][line_key][0]
processed[obs]['c_integrated_uncorrected'] += processed[obs]['bands_uncorrected'][band_key][line_key][0]
processed[obs]['c_integrated_clv_model'] += processed[obs]['bands_clv_model'][band_key][line_key][0]
processed[obs]['c_sigmaq_sum'] += processed[obs]['bands'][band_key][line_key][1]
n_bands += 1.
processed[obs]['c_integrated'] /= n_bands
processed[obs]['c_integrated_uncorrected'] /= n_bands
processed[obs]['c_integrated_clv_model'] /= n_bands
processed[obs]['c_sigmaq_sum'] /= n_bands ** 2
for name_append in append_list:
lightcurve[obs]['delta_' + band_key + name_append] = [processed[obs]['c_integrated' + name_append]
- processed[obs]['s_integrated' + name_append],
np.sqrt(processed[obs]['s_sigmaq_sum'] + processed[obs]['c_sigmaq_sum'])]
lightcurve['arrays']['observations']['delta_' + band_key + name_append][n_obs, :] = \
lightcurve[obs]['delta_' + band_key + name_append][:]
lightcurve[obs]['phase'] = processed[obs]['bands']['phase']
lightcurve['arrays']['observations']['obs_name'][n_obs] = obs
lightcurve['arrays']['observations']['phase'][n_obs] = lightcurve[obs]['phase']
if obs in lists['transit_out']:
transit_out_flag[n_obs] = True
if obs in lists['transit_in']:
transit_in_flag[n_obs] = True
if obs in lists['transit_full']:
transit_full_flag[n_obs] = True
for band_key in C_bands:
for name_append in append_list:
lightcurve['arrays']['rescaling_' + band_key + name_append] = \
np.average(lightcurve['arrays']['observations']['delta_' + band_key + name_append][transit_out_flag, 0], axis=0)
sorting_index = np.argsort(lightcurve['arrays']['observations']['phase'])
transit_out_flag = transit_out_flag[sorting_index]
transit_in_flag = transit_in_flag[sorting_index]
transit_full_flag = transit_full_flag[sorting_index]
lightcurve['arrays']['observations']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][sorting_index]
lightcurve['arrays']['observations']['phase'] = lightcurve['arrays']['observations']['phase'][sorting_index]
lightcurve['arrays']['transit_in']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][transit_in_flag]
lightcurve['arrays']['transit_in']['phase'] = lightcurve['arrays']['observations']['phase'][transit_in_flag]
lightcurve['arrays']['transit_full']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][transit_full_flag]
lightcurve['arrays']['transit_full']['phase'] = lightcurve['arrays']['observations']['phase'][transit_full_flag]
lightcurve['arrays']['transit_out']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][transit_out_flag]
lightcurve['arrays']['transit_out']['phase'] = lightcurve['arrays']['observations']['phase'][transit_out_flag]
for band_key in C_bands:
for name_append in append_list:
lightcurve['arrays']['observations']['delta_' + band_key + name_append] = \
lightcurve['arrays']['observations']['delta_' + band_key + name_append][sorting_index]
# / lightcurve['arrays']['rescaling_' + band_key]
lightcurve['arrays']['transit_in']['delta_' + band_key + name_append] = \
lightcurve['arrays']['observations']['delta_' + band_key + name_append][transit_in_flag]
lightcurve['arrays']['transit_full']['delta_' + band_key + name_append] = \
lightcurve['arrays']['observations']['delta_' + band_key + name_append][transit_full_flag]
lightcurve['arrays']['transit_out']['delta_' + band_key + name_append] = \
lightcurve['arrays']['observations']['delta_' + band_key + name_append][transit_out_flag]
lightcurve['arrays']['observations']['transit_out_flag'] = transit_out_flag
lightcurve['arrays']['observations']['transit_in_flag'] = transit_in_flag
lightcurve['arrays']['observations']['transit_full_flag'] = transit_full_flag
pre_duration = transit_full_bins[0] - lightcurve['arrays']['transit_out']['phase'][0]
if pre_duration > 0:
nsteps_pre = int(pre_duration/transit_full_step)
if pre_duration % transit_full_step > 0.0:
nsteps_pre += 1
else:
nsteps_pre = 0
post_duration = lightcurve['arrays']['transit_out']['phase'][-1] - transit_full_bins[-1]
if post_duration > 0:
nsteps_post = int(post_duration / transit_full_step)
if post_duration % transit_full_step > 0.0:
nsteps_post += 1
else:
nsteps_post = 0
transit_bins = np.arange(transit_full_bins[0]-nsteps_pre*transit_full_step,
transit_full_bins[-1] + (nsteps_post+1.1) * transit_full_step,
transit_full_step)
lightcurve['binned'] = {
'observations': {
'phase': np.zeros(len(transit_bins)),
},
'transit_in': {},
'transit_full': {},
'transit_out': {},
}
for band_key in C_bands:
for name_append in append_list:
lightcurve['binned']['observations']['delta_' + band_key + name_append] = np.zeros([len(transit_bins), 2])
transit_out_flag = np.zeros(len(transit_bins), dtype=bool)
transit_in_flag = np.zeros(len(transit_bins), dtype=bool)
transit_full_flag = np.zeros(len(transit_bins), dtype=bool)
n_a = 0
for nb in range(0, len(transit_bins)-1):
sel = (lightcurve['arrays']['observations']['phase'] >= transit_bins[nb]) \
& (lightcurve['arrays']['observations']['phase'] < transit_bins[nb+1])
if np.sum(sel) <= 0: continue
lightcurve['binned']['observations']['phase'][n_a] = np.average(lightcurve['arrays']['observations']['phase'][sel])
for band_key in C_bands:
for name_append in append_list:
lightcurve['binned']['observations']['delta_' + band_key + name_append][n_a, 0], sum_weights = np.average(
lightcurve['arrays']['observations']['delta_' + band_key + name_append][sel, 0],
weights=1. / lightcurve['arrays']['observations']['delta_' + band_key + name_append][sel, 1]**2,
returned=True)
lightcurve['binned']['observations']['delta_' + band_key + name_append][n_a, 1] = np.sqrt(1. / sum_weights)
if np.abs(lightcurve['binned']['observations']['phase'][n_a]) >= \
total_transit_duration/2./planet_dict['period'][0]:
transit_out_flag[n_a] = True
elif np.abs(lightcurve['binned']['observations']['phase'][n_a]) >= \
full_transit_duration/2./planet_dict['period'][0]:
transit_in_flag[n_a] = True
else:
transit_full_flag[n_a] = True
n_a += 1 # bins actually computed
lightcurve['binned']['transit_in']['phase'] = lightcurve['binned']['observations']['phase'][transit_in_flag]
lightcurve['binned']['transit_full']['phase'] = lightcurve['binned']['observations']['phase'][transit_full_flag]
lightcurve['binned']['transit_out']['phase'] = lightcurve['binned']['observations']['phase'][transit_out_flag]
lightcurve['binned']['observations']['phase'] = lightcurve['binned']['observations']['phase'][:n_a]
for band_key in C_bands:
for name_append in append_list:
lightcurve['binned']['transit_in']['delta_' + band_key + name_append] = \
lightcurve['binned']['observations']['delta_' + band_key + name_append][transit_in_flag, :]
lightcurve['binned']['transit_full']['delta_' + band_key + name_append] = \
lightcurve['binned']['observations']['delta_' + band_key + name_append][transit_full_flag, :]
lightcurve['binned']['transit_out']['delta_' + band_key + name_append] = \
lightcurve['binned']['observations']['delta_' + band_key + name_append][transit_out_flag, :]
lightcurve['binned']['observations']['delta_' + band_key + name_append] = \
lightcurve['binned']['observations']['delta_' + band_key + name_append][:n_a, :]
save_to_cpickle(subroutine_name + '_'+reference + '_' + output_selection +'_processed', processed, config_in['output'], night, lines_label)
save_to_cpickle(subroutine_name + '_'+reference + '_' + output_selection, lightcurve, config_in['output'], night, lines_label)
def plot_transmission_lightcurve(config_in, night_input='', reference='planetRF'):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
if night_input=='':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
print("plot_transmission_lightcurve Night: ", night)
""" Retrieving the analysis"""
try:
lightcurve = load_from_cpickle(subroutine_name+'_'+reference, config_in['output'], night)
except:
print()
print("No transmission lightcurve dataset, no plots")
continue
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
C_bands = lightcurve['C_bands']
for band_key in C_bands:
plt.figure(figsize=(12, 6))
plt.title('Transmission lightcurve - night {0:s} \n {1:s}'.format(night, band_key))
plt.errorbar(lightcurve['arrays']['observations']['phase'],
lightcurve['arrays']['observations']['delta_' + band_key][:,0],
yerr= lightcurve['arrays']['observations']['delta_' + band_key][:,1],
fmt='.', c='k', alpha=0.25, label='observations')
plt.errorbar(lightcurve['binned']['observations']['phase'],
lightcurve['binned']['observations']['delta_' + band_key][:, 0],
yerr= lightcurve['binned']['observations']['delta_' + band_key][:,1],
fmt='.', c='k', alpha=1.0, label='observations')
plt.axvspan(-1, lightcurve['bins']['transit_in_bins'][0], alpha=0.25, color='green')
plt.axvspan(lightcurve['bins']['transit_in_bins'][-1], 1., alpha=0.25, color='green')
plt.axhline(0, c='C1')
plt.xlim(lightcurve['arrays']['observations']['phase'][0]-0.01,
lightcurve['arrays']['observations']['phase'][-1]+0.01)
plt.xlabel('$\lambda$ [$\AA$]')
plt.ylabel('$\mathcal{R}$ - 1.')
plt.legend()
plt.show()
| 25,556 | 51.58642 | 151 | py |
SLOPpy | SLOPpy-main/SLOPpy/differential_refraction_preparation.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.plot_subroutines import *
__all__ = ["compute_differential_refraction_preparation"]
def compute_differential_refraction_preparation(config_in, append_name=None):
if append_name:
subroutine_name = 'differential_refraction_preparation_' + append_name
filename = 'refraction_'+append_name
else:
subroutine_name = 'differential_refraction_preparation'
filename = 'refraction'
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
reference_flux = np.empty([len(night_dict), shared_data['coadd']['size']], dtype=np.double)
reference_wght = np.zeros([len(night_dict), shared_data['coadd']['size']], dtype=np.double)
reference_mask = np.ones([len(night_dict), shared_data['coadd']['size']], dtype=bool)
compute_reference = False
""" make sure that all the spectra are computed in the same reference system if cross-calibrations is used"""
absolute_SRF = False
for n_night, night in enumerate(night_dict):
if night_dict[night]['refraction'].get('reference_night', False) \
or night_dict[night]['refraction'].get('reference_instrument', False):
absolute_SRF = True
for n_night, night in enumerate(night_dict):
try:
preparation = load_from_cpickle(filename +'_preparation', config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
instrument = night_dict[night]['instrument']
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'],
use_refraction=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
defined_reference = night_dict[night]['refraction'].get('reference', False)
if defined_reference:
preparation = {
'subroutine': 'differential_refraction_preparation',
'coadd': {
'wave': shared_data['coadd']['wave'],
'step': shared_data['coadd']['step'],
'size': shared_data['coadd']['size']
},
'absolute_SRF': True,
'reference_coadd': True
}
else:
preparation = {
'subroutine': 'differential_refraction_preparation',
'coadd': {
'wave': input_data['coadd']['wave'],
'step': input_data['coadd']['step'],
'size': input_data['coadd']['size'],
},
'absolute_SRF': absolute_SRF,
'reference_coadd': False
}
total_flux = np.empty([len(lists['observations']), preparation['coadd']['size']], dtype=np.double)
total_wght = np.zeros([len(lists['observations']), preparation['coadd']['size']], dtype=np.double)
total_mask = np.ones([len(lists['observations']), preparation['coadd']['size']], dtype=bool)
""" Rebinning of all the spectra """
for n_obs, obs in enumerate(lists['observations']):
print(" Spectral rebinning - Processing: ", obs)
preparation[obs] = {}
""" Rebinning of the spectra in the SRF, except for a fixed constant in order to minimize
the difference between
"""
if preparation['absolute_SRF']:
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF']
else:
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF_mod']
preserve_flux = input_data[obs].get('absolute_flux', True)
preparation[obs]['flux_rebinned_stellarRF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds'],
calib_data['blaze'],
preparation['coadd']['wave'],
preparation['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=rv_shift)
err_flux_rebinned_SRF = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds_err'],
calib_data['blaze'],
preparation['coadd']['wave'],
preparation['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=rv_shift,
is_error=True)
""" Zero or negative values are identified, flagged and substituted with another value """
#processed[obs]['flux_rebinned_stellarRF'], \
#processed[obs]['err_flux_rebinned_SRF'], \
#processed[obs]['flux_rebinned_SRF_null'] = \
# replace_values_errors_with_interpolation_1d(processed[obs]['flux_rebinned_stellarRF'],
# processed[obs]['err_flux_rebinned_SRF'],
# force_positive=True)
#processed[obs]['rescaling'], processed[obs]['rescaled'], processed[obs]['rescaled_err'] = \
# perform_rescaling(processed['coadd']['wave'],
# processed[obs]['flux_rebinned_stellarRF'],
# processed[obs]['err_flux_rebinned_SRF'],
# observational_pams['wavelength_rescaling'])
#
""" Zero or negative values are identified, flagged and substituted
with another value """
preparation[obs]['flux_rebinned_stellarRF'], \
err_flux_rebinned_SRF, \
flux_rebinned_SRF_null = \
replace_values_errors_with_interpolation_1d(preparation[obs]['flux_rebinned_stellarRF'],
err_flux_rebinned_SRF,
force_positive=True)
preparation[obs]['rescaling'], preparation[obs]['rescaled'], preparation[obs]['rescaled_err'] = \
perform_rescaling(preparation['coadd']['wave'],
preparation[obs]['flux_rebinned_stellarRF'],
err_flux_rebinned_SRF,
observational_pams['wavelength_rescaling'])
#if instrument_dict[instrument]['refraction'].get('reference_night', False):
# if night != instrument_dict[instrument]['refraction']['reference_night']:
# continue
#
#if instrument_dict[instrument]['refraction'].get('reference_instrument', False):
# if instrument != instrument_dict[instrument]['refraction']['reference_instrument']:
# continue
if night_dict[night]['refraction'].get('use_all_observations', False) or obs in lists['telluric']:
total_flux[n_obs, :] = preparation[obs]['rescaled']
total_mask[n_obs, :] = flux_rebinned_SRF_null
total_wght[n_obs, :] = 1. / (preparation[obs]['rescaled_err'] ** 2)
print(" Observation added to reference spectrum")
masked_array = np.ma.array(total_flux, mask=total_mask)
rescaled_mask, sum_weights = np.ma.average(masked_array,
weights=total_wght,
axis=0,
returned=True)
preparation['coadd']['rescaled'] = rescaled_mask.filled(0.00)
sum_weights[sum_weights <= 0.0] = 1.0
preparation['coadd']['rescaled_err'] = 1. / np.sqrt(sum_weights)
preparation['coadd']['rescaled'], preparation['coadd']['rescaled_err'], preparation['coadd']['null'] = \
replace_values_errors_with_interpolation_1d(preparation['coadd']['rescaled'],
preparation['coadd']['rescaled_err'],
force_positive=True)
save_to_cpickle(filename + '_preparation', preparation, config_in['output'], night)
if defined_reference == night \
or defined_reference == instrument \
or defined_reference == 'all' :
compute_reference = True
reference_flux[n_night, :] = preparation['coadd']['rescaled']
reference_mask[n_night, :] = preparation['coadd']['null']
reference_wght[n_night, :] = 1. / (preparation['coadd']['rescaled_err'] ** 2)
if compute_reference:
reference = {
'wave': shared_data['coadd']['wave'],
'step': shared_data['coadd']['step'],
'size': shared_data['coadd']['size'],
}
masked_array = np.ma.array(reference_flux, mask=reference_mask)
rescaled_mask, sum_weights = np.ma.average(masked_array,
weights=reference_wght,
axis=0,
returned=True)
reference['rescaled'] = rescaled_mask.filled(0.00)
sum_weights[sum_weights <= 0.0] = 1.0
reference['rescaled_err'] = 1. / np.sqrt(sum_weights)
reference['rescaled'], reference['rescaled_err'], reference['null'] = \
replace_values_errors_with_interpolation_1d(reference['rescaled'],
reference['rescaled_err'],
force_positive=True)
save_to_cpickle(filename + '_reference', reference, config_in['output'])
print()
| 10,775 | 47.981818 | 113 | py |
SLOPpy | SLOPpy-main/SLOPpy/sysrem_correction.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.pca_preparation import compute_pca_preparation
from PyAstronomy import pyasl
from scipy.interpolate import UnivariateSpline
__all__ = ['compute_sysrem_correction',
'plot_sysrem_correction']
def compute_sysrem_correction(config_in):
subroutine_name = 'sysrem_correction'
compute_pca_preparation(config_in)
print()
night_dict = from_config_get_nights(config_in)
pca_parameters = from_config_get_pca_parameters(config_in)
for night in night_dict:
try:
sysrem_output = load_from_cpickle('transmission_preparation',
config_in['output'],
night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
n_iter = pca_parameters.get('iterations',5 )
ref_iter = pca_parameters.get('ref_iteration',3 )
sysrem_output = {
'subroutine': subroutine_name,
'iterations': n_iter,
'pca_output': True,
'ref_iteration': ref_iter
}
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
preparation = load_from_cpickle('pca_preparation', config_in['output'], night)
n_obs, n_orders, n_pixels = np.shape(preparation['stack_e2ds'])
model_iter = np.ones([n_iter, n_obs, n_orders, n_pixels], dtype=np.double)
model_out = np.ones([n_iter, n_obs, n_orders, n_pixels], dtype=np.double)
for order in range(0, observational_pams['n_orders']):
obs = preparation['stack_e2ds'][:,order,:]
sigs = preparation['stack_e2ds_err'][:,order,:]
sr = pyasl.SysRem(obs, sigs)
previous_residuals = obs.copy()
for it in range(0, n_iter):
r, a, c = sr.iterate()
model_iter[it,:,order,:] = previous_residuals - r
previous_residuals = r.copy()
for it in range(0, n_iter):
# Full model is the sum of all the models until the given iteration
model_out[it,:, :, :] = np.sum(model_iter[:it+1,:, :, :], axis=0)
#import matplotlib.pyplot as plt
#plt.figure()
#plt.title("Model " + repr(it))
#plt.imshow( model_out[it,:, 10, :], origin='lower', aspect="auto")
#plt.show()
it_string = str(it).zfill(2)
sysrem_output[it_string] = {
'model': model_out[it,:, :, :]
}
for i_obs, obs in enumerate(lists['observations']):
sysrem_output[it_string][obs] = {}
sysrem_output[it_string][obs]['ratio'] = preparation['stack_e2ds'][i_obs, :, :]/model_out[it, i_obs, :, :]
sysrem_output[it_string][obs]['ratio_err'] = preparation['stack_e2ds_err'][i_obs, :, :]/model_out[it, i_obs, :, :]
save_to_cpickle('transmission_preparation', sysrem_output, config_in['output'], night)
print()
""" Keep going from here after preparation, unless the subroutines has been called just
to preform the data preparation step
"""
def plot_sysrem_correction(config_in, night_input=''):
subroutine_name = 'transmission_spectrum_preparation'
night_dict = from_config_get_nights(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
# ! To be removed when testing is done
# ! This plots do not make any sense anymore
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
""" Retrieving the analysis"""
try:
preparation = load_from_cpickle('transmission_preparation', config_in['output'], night)
except:
print("No transmission spectrum results, no plots")
print()
continue
#from SLOPpy.subroutines.lines_fit_functions import logprob_case12
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
len_y = len(lists['observations'])
len_x = 4096
order= 11
time_from_transit = np.empty(len_y, dtype=np.double)
plot_data = np.empty([len_y, len_x], dtype=np.double)
for i_obs, obs in enumerate(lists['observations']):
print(np.median(preparation[obs]['deblazed'][order ,:]))
time_from_transit[i_obs] = input_data[obs]['BJD'] - observational_pams['time_of_transit']
plot_data[i_obs, :] = preparation[obs]['deblazed'][order ,:]/ np.median(preparation[obs]['ratio'][order ,:])
wave = preparation[obs]['wave'][order, :]
wave_meshgrid, time_meshgrid = np.meshgrid(wave, time_from_transit)
print('COOLWARM')
cmap = plt.get_cmap('coolwarm')
levels = MaxNLocator(nbins=15).tick_values(
plot_data.min(), plot_data.max())
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
plt.figure(figsize=(15, 10))
PCF = plt.contourf(wave_meshgrid, time_meshgrid,
plot_data, levels=levels, cmap=cmap)
cbar = plt.colorbar(PCF)
cbar.ax.set_ylabel('Intensity')
plt.show()
res = plot_data * 1.
from scipy.interpolate import UnivariateSpline
for ii in range(0,4096):
spl = UnivariateSpline(time_from_transit, plot_data[:, ii])
val = spl(time_from_transit)
res[:,ii] -= val
res[:,ii] /= val
print('COOLWARM')
cmap = plt.get_cmap('coolwarm')
levels = MaxNLocator(nbins=10).tick_values(
-0.05, 0.05)
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
plt.figure(figsize=(15, 10))
PCF = plt.contourf(wave_meshgrid, time_meshgrid,
res, levels=levels, cmap=cmap)
cbar = plt.colorbar(PCF)
cbar.ax.set_ylabel('Intensity')
plt.show()
""" Creation of the color array, based on the BJD of the observations
"""
colors_properties, colors_plot, colors_scatter = make_color_array_matplotlib3(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(1, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
#ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
for obs in lists['transit_in']:
preparation[obs]['rescaling'], \
preparation[obs]['rescaled'], \
preparation[obs]['rescaled_err'] = perform_rescaling(
preparation[obs]['wave'],
preparation[obs]['deblazed'] / (input_data[obs]['step'] / np.median(input_data[obs]['step'])),
preparation[obs]['deblazed_err'] / (input_data[obs]['step'] / np.median(input_data[obs]['step'])),
observational_pams['wavelength_rescaling'])
ax1.scatter(preparation[obs]['wave'],
preparation[obs]['rescaled'],
s=1, alpha=0.25,
color=colors_plot['mBJD'][obs])
sm = plt.cm.ScalarMappable(cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 8,372 | 34.935622 | 131 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_template_alternative.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_telluric_template_alternative", "plot_telluric_template_alternative"]
def compute_telluric_template_alternative(config_in):
compute_telluric_template_alternative_routine(config_in,
n_iterations=1,
use_berv=False,
use_reference_airmass=False,
use_template=True,
subroutine_name='telluric_template')
def compute_telluric_template_alternative_routine(config_in, **kwargs):
"""
Lazy workaround
:param config_in:
:param kwargs:
:return:
"""
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
for night in night_dict:
instrument_name = night_dict[night]['instrument']
print()
print("compute_telluric_template Night: ", night)
print()
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
continue
except:
print(" No telluric correction file found, computing now ")
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': kwargs['subroutine_name'],
'n_orders': 0,
'n_pixels': 0,
'telluric': {}
}
telluric = {
'subroutine': kwargs['subroutine_name'],
'reference_frame': 'observer',
'template': {},
'linear' : {}
}
# There must be a more elegant way to do this, but I'm, not aware of it
""" computation of the rescaled spectra, for later use in the analysis and in the plotting subroutines"""
for obs in lists['observations']:
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
""" for plotting purpose only"""
processed[obs]['wave'] = input_data[obs]['wave']
processed[obs]['e2ds'] = input_data[obs]['e2ds']
processed[obs]['e2ds_err'] = input_data[obs]['e2ds_err']
processed[obs]['e2ds_rescaling'], processed[obs]['e2ds_rescaled'], processed[obs]['e2ds_rescaled_err'] = \
perform_rescaling(input_data[obs]['wave'],
input_data[obs]['e2ds'],
input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
""" Reference airmass for iterative correction of airmass - disabled here"""
processed['airmass_ref'] = 0.000
"""
Definiton of the wavelength scale for the output telluric spectrum
We assume that the wavelength solution did not change during the night
"""
obs_reference = lists['observations'][0]
telluric['rebinned'] = {
'wave': input_data[obs_reference]['wave'],
'step': input_data[obs_reference]['step'],
'flux': np.ones(input_data[obs_reference]['step'].shape),
'ferr': np.ones(input_data[obs_reference]['step'].shape)*0.001
}
processed['telluric']['spectrum_noairmass'] = telluric['rebinned']['flux'].copy()
processed['telluric']['spectrum_noairmass_err'] = telluric['rebinned']['ferr'].copy()
processed['n_orders'] = input_data[obs_reference]['orders']
processed['n_pixels'] = input_data[obs_reference]['wave_size']
"""
Computation of the rescaling factor for the telluric template. This factor has no physical value, it's just
the ratio of the observed telluric features (in a normalized spectra) to the input template.
"""
try:
if 'telluric_template' not in instrument_dict[instrument_name]:
raise MissingKeywordException()
template_dict = instrument_dict[instrument_name]['telluric_template']
if template_dict['fit_range'][0] > shared_data['coadd']['wavelength_range'][1] or \
template_dict['fit_range'][1] < shared_data['coadd']['wavelength_range'][0]:
raise OutOfRangeException()
print(' rescaled telluric template')
print(' instrument :', instrument_name)
print(' template :', template_dict['file'])
print(' fit_range :', template_dict['fit_range'])
print()
""" Retrieving the template data"""
telluric_template_data = np.genfromtxt(template_dict['file'])
telluric['template']['input'] = {
'range': [np.amin(telluric_template_data[:, 0]), np.amax(telluric_template_data[-1, 0])],
'wave': telluric_template_data[:, 0],
'flux': telluric_template_data[:, 1],
'ferr': telluric_template_data[:, 2],
'step': telluric_template_data[:, 3]
}
processed['template'] = {
'array_wave': [],
'array_data': {},
'array_move': {}
}
""" Again we assume that the wavelength solution did not change during the night """
tel_selection = (input_data[obs_reference]['wave'] > template_dict['fit_range'][0]) & \
(input_data[obs_reference]['wave'] < template_dict['fit_range'][1])
processed['template']['wave_sel'] = input_data[obs_reference]['wave'][tel_selection]
processed['template']['step_sel'] = input_data[obs_reference]['step'][tel_selection]
for obs in lists['telluric']:
processed['template'][obs] = processed[obs]['e2ds_rescaled'][tel_selection]
""" saving the wavelength array for plotting purpose"""
processed['template']['array_wave'].extend(processed['template']['wave_sel'])
rescaling_array = np.arange(0.05, 2.0, 0.05)
computed_std = np.zeros(len(rescaling_array))
for n_rescaling_factor, rescaling_factor in enumerate(rescaling_array):
"""
Template spectrum is rebinned onto the observations wavelength scale, using only the wavelength
range selected for the computation of the rescaling factor
"""
template_rebinned_flux = \
rebin_1d_to_1d(telluric['template']['input']['wave'],
telluric['template']['input']['step'],
telluric['template']['input']['flux'],
processed['template']['wave_sel'],
processed['template']['step_sel'],
preserve_flux=False)
""" Saving the outcome to dictionary """
template_rebinned_flux -= 1.00
template_rebinned_flux *= rescaling_factor
template_rebinned_flux += 1.00
e2ds_corrected = []
for obs in lists['telluric']:
e2ds_corrected.extend(processed['template'][obs] /
np.power(template_rebinned_flux, input_data[obs]['AIRMASS']))
computed_std[n_rescaling_factor] = np.nanstd(e2ds_corrected)
#print(n_rescaling_factor, rescaling_factor, computed_std[n_rescaling_factor])
#plt.scatter(processed['template']['array_wave'], e2ds_corrected)
#plt.plot(processed['template']['wave_sel'], template_rebinned_flux)
#plt.show()
label_dict = '{0}'.format(rescaling_factor)
processed['template']['array_data'][label_dict] = e2ds_corrected
processed['template']['array_move'][label_dict] = rescaling_factor
#plt.scatter(rescaling_array, computed_std)
#plt.show()
""" Selection of the rescaling factor with the lowest scatter """
ind_factor = np.argmin(computed_std)
ind_range = 3
if ind_factor < ind_range:
sel_factor = rescaling_array[0:ind_factor+ind_range]
sel_stdev = computed_std[0:ind_factor+ind_range]
elif ind_factor > len(rescaling_array) - ind_range:
sel_factor = rescaling_array[ind_factor-ind_range:]
sel_stdev = computed_std[ind_factor-ind_range:]
else:
sel_factor = rescaling_array[ind_factor-ind_range:ind_factor+ind_range]
sel_stdev = computed_std[ind_factor-ind_range:ind_factor+ind_range]
coeff = np.polyfit(sel_factor, sel_stdev, 2)
telluric_factor = - coeff[1] / (2*coeff[0])
print(' telluric factor: {0:7f}'.format(telluric_factor))
print()
processed['template']['telluric_factor'] = telluric_factor
processed['template']['rescaling_factor'] = sel_factor
processed['template']['computed_std'] = computed_std
processed['template']['polyfit'] = {
'package': 'numpy', # in case we forget what we used...
'order': 2,
'coeff': coeff,
'sel_factor': sel_factor,
'sel_stdev': sel_stdev
}
"""
The template telluric spectrum is rebinned onto the 2D scale of the observations.
Then it is rescaled according to the computed factor
We assume that the wavelength solution did not change during the night
"""
processed['template']['rebinned'] = {}
processed['template']['rebinned']['flux'] = \
rebin_1d_to_2d(telluric['template']['input']['wave'],
telluric['template']['input']['step'],
telluric['template']['input']['flux'],
telluric['rebinned']['wave'],
telluric['rebinned']['step'],
preserve_flux=False)
processed['template']['rebinned']['ferr'] = \
rebin_1d_to_2d(telluric['template']['input']['wave'],
telluric['template']['input']['step'],
telluric['template']['input']['ferr'],
telluric['rebinned']['wave'],
telluric['rebinned']['step'],
preserve_flux=False,
is_error=True)
sel_out_of_range = ~((telluric['rebinned']['wave'] > telluric['template']['input']['range'][0]+1.) \
& (telluric['rebinned']['wave'] < telluric['template']['input']['range'][1]-1.))
processed['template']['rebinned']['flux'][sel_out_of_range] = 1.
processed['template']['rebinned']['ferr'][sel_out_of_range] = 0.1
processed['telluric']['spectrum_noairmass'] = \
(processed['template']['rebinned']['flux'] - 1.) * telluric_factor + 1.0
processed['telluric']['spectrum_noairmass_err'] = processed['template']['rebinned']['ferr'] * telluric_factor
except MissingFileException:
print(' *** Missing telluric_template keyword in configuration file ***')
print()
except OutOfRangeException:
print(' *** Wavelength range for the calculation of the rescaling factor is outside the boundaries ***')
print(' Rescaling factor wavelength range: {0:7.2f} to {1:7.2f}'.format(
template_dict['fit_range'][0], template_dict['fit_range'][1]))
print(' Shared data wavelength range : {0:7.2f} to {1:7.2f}'.format(
shared_data['coadd']['wavelength_range'][0],
shared_data['coadd']['wavelength_range'][1]))
print()
"""
Introduction of a second telluric correction, where the telluric spectrum depends linearly on the precipitable
water vapour (PWV). As such, the values stored in the files are the coefficient of the PWV term, while the
baseline is given by the outcome of the previous step, i.e., the spectrum_noairmass array
"""
try:
"""
The algorith is essentially the same as in the previous step, with the exception of the calculation
of the telluric spectrum at each iteration of the chi-square minimization
"""
if 'telluric_linear_term' not in instrument_dict[instrument_name]:
raise MissingKeywordException()
linear_dict = instrument_dict[instrument_name]['telluric_linear_term']
if linear_dict['fit_range'][0] > shared_data['coadd']['wavelength_range'][1] or \
linear_dict['fit_range'][1] < shared_data['coadd']['wavelength_range'][0]:
raise OutOfRangeException()
print(' PWV-dependent telluric spectrum')
print(' instrument :', instrument_name)
print(' linear :', linear_dict['file'])
print(' fit_range :', linear_dict['fit_range'])
print()
""" Retrieving the linear data"""
telluric_linear_data = np.genfromtxt(linear_dict['file'])
telluric['linear']['input'] = {
'range': [np.amin(telluric_linear_data[:, 0]), np.amax(telluric_linear_data[-1, 0])],
'wave': telluric_linear_data[:, 0],
'coef': telluric_linear_data[:, 1],
'cerr': telluric_linear_data[:, 2],
'step': telluric_linear_data[:, 3]
}
processed['linear'] = {
'array_wave': [],
'array_data': {},
'array_move': {}
}
""" Again we assume that the wavelength solution did not change during the night """
tel_selection = (input_data[obs_reference]['wave'] > linear_dict['fit_range'][0]) & \
(input_data[obs_reference]['wave'] < linear_dict['fit_range'][1])
processed['linear']['wave_sel'] = input_data[obs_reference]['wave'][tel_selection]
processed['linear']['step_sel'] = input_data[obs_reference]['step'][tel_selection]
for obs in lists['telluric']:
processed['linear'][obs] = processed[obs]['e2ds_rescaled'][tel_selection]
""" saving the wavelength array for plotting purpose"""
processed['linear']['array_wave'].extend(processed['linear']['wave_sel'])
rescaling_array = 10**np.arange(-1, np.log10(50), 0.1)
computed_std = np.zeros(len(rescaling_array))
for n_rescaling_factor, rescaling_factor in enumerate(rescaling_array):
"""
Template spectrum is rebinned onto the observations wavelength scale, using only the wavelength
range selected for the computation of the rescaling factor
"""
linear_rebinned_flux = \
rebin_1d_to_1d(telluric['linear']['input']['wave'],
telluric['linear']['input']['step'],
telluric['linear']['input']['coef'],
processed['linear']['wave_sel'],
processed['linear']['step_sel'],
preserve_flux=False)
""" Saving the outcome to dictionary """
linear_rebinned_flux *= rescaling_factor
linear_rebinned_flux += processed['telluric']['spectrum_noairmass'][tel_selection]
e2ds_corrected = []
for obs in lists['telluric']:
e2ds_corrected.extend(processed['linear'][obs] /
np.power(linear_rebinned_flux, input_data[obs]['AIRMASS']))
computed_std[n_rescaling_factor] = np.nanstd(e2ds_corrected)
label_dict = '{0}'.format(rescaling_factor)
processed['linear']['array_data'][label_dict] = e2ds_corrected
processed['linear']['array_move'][label_dict] = rescaling_factor
#print(n_rescaling_factor, rescaling_factor, computed_std[n_rescaling_factor])
#plt.scatter(processed['linear']['array_wave'], e2ds_corrected)
#plt.plot(processed['linear']['wave_sel'], linear_rebinned_flux)
#plt.show()
#plt.scatter(rescaling_array, computed_std)
#plt.show()
""" Selection of the PWV value with the lowest scatter """
ind_factor = np.argmin(computed_std)
ind_range = 3
if ind_factor < ind_range:
sel_factor = rescaling_array[0:ind_factor+ind_range]
sel_stdev = computed_std[0:ind_factor+ind_range]
elif ind_factor > len(rescaling_array) - ind_range:
sel_factor = rescaling_array[ind_factor-ind_range:]
sel_stdev = computed_std[ind_factor-ind_range:]
else:
sel_factor = rescaling_array[ind_factor-ind_range:ind_factor+ind_range]
sel_stdev = computed_std[ind_factor-ind_range:ind_factor+ind_range]
coeff = np.polyfit(sel_factor, sel_stdev, 2)
PWV_value = - coeff[1] / (2*coeff[0])
print(' PWV value : {0:7f}'.format(PWV_value))
print()
processed['linear']['PWV_value'] = PWV_value
processed['linear']['PWV_closest'] = sel_factor
processed['linear']['computed_std'] = computed_std
processed['linear']['polyfit'] = {
'package': 'numpy', # in case we forget what we used...
'order': 2,
'coeff': coeff,
'sel_factor': sel_factor,
'sel_stdev': sel_stdev
}
"""
The linear coefficient for the PWV-dependent part are rebinned onto the 2D scale of the observations.
Then the teluric spectrum is computed, using also the baseline from the previous step
"""
processed['linear']['rebinned'] = {}
processed['linear']['rebinned']['coef'] = \
rebin_1d_to_2d(telluric['linear']['input']['wave'],
telluric['linear']['input']['step'],
telluric['linear']['input']['coef'],
telluric['rebinned']['wave'],
telluric['rebinned']['step'],
preserve_flux=False)
processed['linear']['rebinned']['cerr'] = \
rebin_1d_to_2d(telluric['linear']['input']['wave'],
telluric['linear']['input']['step'],
telluric['linear']['input']['cerr'],
telluric['rebinned']['wave'],
telluric['rebinned']['step'],
preserve_flux=False,
is_error=True)
sel_out_of_range = ~((telluric['rebinned']['wave'] > telluric['linear']['input']['range'][0]+1.) \
& (telluric['rebinned']['wave'] < telluric['linear']['input']['range'][1]-1.))
processed['linear']['rebinned']['coef'][sel_out_of_range] = 0.
processed['linear']['rebinned']['cerr'][sel_out_of_range] = 0.1
processed['telluric']['spectrum_noairmass'] += processed['linear']['rebinned']['coef'] * PWV_value
processed['telluric']['spectrum_noairmass_err'] = np.sqrt(
processed['telluric']['spectrum_noairmass_err']**2 +
(processed['linear']['rebinned']['cerr'] * PWV_value)**2)
except MissingFileException:
print(' *** Missing telluric_linear_coeff keyword in configuration file ***')
print()
except OutOfRangeException:
print(' *** Wavelength range for the calculation of the PWV-dependent telluric spectrum is outside the boundaries ***')
print()
for obs in lists['observations']:
""" Correction of telluric lines for the average airmass value, following Wyttenbach et al. 2015 """
processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_rescaled'] / \
np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'])
processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_rescaled_err'] / \
np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'])
for obs in lists['observations']:
# Correction of telluric lines
telluric[obs] = {}
telluric[obs]['spectrum_noairmass'] = processed['telluric']['spectrum_noairmass']
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
telluric[obs]['airmass_ref'] = processed['airmass_ref']
""" Set anomalosly low point to one (e.g. when the template is not computed)"""
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][telluric[obs]['null']] = 1.0
telluric[obs]['spectrum'] = np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
telluric[obs]['spline_noairmass'] = telluric[obs]['spectrum_noairmass'].copy()
""" No need to compute the spline approximation since we are already dealing with a very high SNR template"""
telluric[obs]['spline'] = np.power(telluric[obs]['spline_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
""" copy the keyword for future use"""
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
telluric[obs]['telluric_corrected'] = processed[obs]['e2ds_corrected']
telluric[obs]['telluric_corrected_err'] = processed[obs]['e2ds_corrected_err']
save_to_cpickle('telluric', telluric, config_in['output'], night)
save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
def plot_telluric_template_alternative(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
#plt.scatter(rescaling_array, computed_std, c='C0', zorder=1)
#plt.scatter(sel_factor, sel_stdev, c='C1', zorder=2)
#plt.plot(rescaling_array, np.polyval(coeff, rescaling_array))
#plt.plot(rescaling_array, 2*rescaling_array*coeff[0] + coeff[1] )
#plt.plot()
print("plot_telluric_template Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('telluric_processed', config_in['output'], night)
telluric = load_from_cpickle('telluric', config_in['output'], night)
except:
print()
print("No telluric correction, no plots")
continue
colors, cmap, line_colors = make_color_array(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
cbax1 = plt.subplot(gs[:, 1])
lift_spectrum = 0.25
for i, obs in enumerate(lists['observations']):
color_array = cmap(i / len(lists['observations']))
_, e2ds_rescaled , _ = \
perform_rescaling(processed[obs]['wave'],
processed[obs]['e2ds'],
processed[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
e2ds_rescaled_corrected_spectrum = e2ds_rescaled / telluric[obs]['spectrum']
e2ds_rescaled_corrected_spline = e2ds_rescaled / telluric[obs]['spline']
for order in range(0, processed[obs]['n_orders']):
if order == 0 and i==0:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5, label='uncorrected')
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array), label='corrected')
else:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5)
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array))
#ax1.plot(processed[obs]['wave'][order, :],
# e2ds_rescaled[order, :]+lift_spectrum,
# c=color_array, lw=1, alpha=0.5)
#ax1.scatter(processed[obs]['wave'][order, :],
# e2ds_rescaled_corrected_spline[order, :]+lift_spectrum,
# s=1, c=np.atleast_2d(color_array))
ax2.plot(processed[obs]['wave'][order, :],
telluric[obs]['spectrum'][order, :],
c=color_array)
ax2.axhline(1.00, c='k')
#ax2.plot(processed[obs]['wave'][order, :],
# telluric[obs]['spline'][order, :]+lift_spectrum,
# c=color_array)
#ax2.axhline(1.00+lift_spectrum, c='k')
#ax2.plot(input_data['coadd']['wave'],telluric['stellarRF']['spline_eval']+0.1,c='k')
#ax2.scatter(input_data['coadd']['wave'],telluric['stellarRF']['spectrum']+0.1,c='r', s=2)
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
try:
instrument = night_dict[night]['instrument']
comparison_file = config_in['instruments'][instrument]['telluric_comparison']
comparison_data = np.genfromtxt(comparison_file, skip_header=1)
if comparison_data[0,0]<1000.0:
nm2Ang = 10.
else:
nm2Ang = 1.
ax1.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
ax2.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
except:
pass
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show() | 28,925 | 45.654839 | 131 | py |
SLOPpy | SLOPpy-main/SLOPpy/prepare_datasets.py | from __future__ import print_function, division
from SLOPpy.instruments.get_data import *
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["prepare_datasets", "plot_dataset"]
def _check_wavelength_rescaling(wave_rescaling, wave_observations):
if wave_rescaling[0] < wave_observations[0] or \
wave_rescaling[1] > wave_observations[1]:
warnings.warn("Valid wavelength rescaling window must be between {0:8.2f} and {1:8.2f}".format(
wave_observations[0], wave_observations[1]))
return False
else:
return True
def _check_coadd_in_shared_data(shared_data, wavelength_range):
if 'coadd' not in shared_data:
shared_data['coadd'] = {
'wavelength_range': wavelength_range[:]
}
shared_data['binned'] = {
'wavelength_range': wavelength_range[:]
}
else:
shared_data['coadd']['wavelength_range'][0] = min(shared_data['coadd']['wavelength_range'][0],
wavelength_range[0])
shared_data['coadd']['wavelength_range'][1] = max(shared_data['coadd']['wavelength_range'][1],
wavelength_range[1])
shared_data['binned']['wavelength_range'][0] = min(shared_data['binned']['wavelength_range'][0],
wavelength_range[0])
shared_data['binned']['wavelength_range'][1] = max(shared_data['binned']['wavelength_range'][1],
wavelength_range[1])
return shared_data
def prepare_datasets(config_in):
"""
FITS files, telluric list etc. are retrieved at the beginning and converted to a pickle object
to be processed to the next steps in the pipeline
In this way all the changes performed on the fits files are preserved (sky correction, differential correction)
"""
""" config_dictionary: dictionary with all the configuration parameters from config_in
lists_dictionary: for each night, the list of files
"""
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
#try:
# shared_data = load_from_cpickle('shared', config_in['output'])
# loaded_shared_data = True
#except:
# shared_data = {}
# loaded_shared_data = False
if check_existence_cpickle('shared', config_in['output']):
loaded_shared_data = True
else:
shared_data = {}
loaded_shared_data = False
pass_wavelength_rescaling = True
pass_wavelength_master_out = True
for night in night_dict:
print('Processing data for night: ', night)
print()
""" List files are supposed to be in the same directory of the yaml file,
NOT on the archive directory: in this way it is possible to try different
combinations of nights and files without making a mess in the archive """
files_list, files_transit_out, files_transit_in, files_transit_full, files_telluric, files_star_telluric = get_filelists(
night_dict[night])
lists_dictionary = {
'observations': files_list,
'star_telluric': files_star_telluric,
'n_observations': len(files_list),
}
try:
lists_dictionary['transit_out'] = files_transit_out
lists_dictionary['transit_in'] = files_transit_in
lists_dictionary['transit_full'] = files_transit_full
lists_dictionary['n_transit_out'] = len(files_transit_out)
lists_dictionary['n_transit_in'] = len(files_transit_in)
lists_dictionary['n_transit_full'] = len(files_transit_full)
lists_dictionary['telluric'] = files_telluric
lists_dictionary['n_tellurics'] = len(files_telluric)
write_transit_list = False
except:
print(' Input lists for transit in/out not found, proceeding to automatic selection and writing ')
print()
write_transit_list = True
""" Retrieval on instrument characteristics """
instrument = night_dict[night]['instrument']
mask = night_dict[night]['mask']
archive_dir = instrument_dict[instrument]['data_archive']
order_selection = instrument_dict[instrument]['orders']
wavelength_rescaling = instrument_dict[instrument]['wavelength_rescaling']
time_of_transit = night_dict[night]['time_of_transit']
planet_dict = from_config_get_planet(config_in)
star_dict = from_config_get_star(config_in)
print(" # observations: ", lists_dictionary['n_observations'])
try:
print(" # out-transit obs: ", lists_dictionary['n_transit_out'])
print(" # in-transit obs: ", lists_dictionary['n_transit_in'])
except:
pass
print(" Instrument: ", instrument)
print(" Archive DIR: ", archive_dir)
print(" Night: ", night)
print(" Mask: ", mask)
print(" WL rescaling: ", wavelength_rescaling)
print()
try:
if not check_existence_cpickle('input_dataset_fibA', config_in['output'], night):
raise ValueError()
#pass_wavelength_rescaling = _check_wavelength_rescaling(wavelength_rescaling,
# observations_A['coadd']['wavelength_range']
# )
#shared_data = _check_coadd_in_shared_data(shared_data,
# observations_A['coadd']['wavelength_range'])
print(" Input data for night {0:s} successfully retrieved".format(night))
if write_transit_list:
observations_A = load_from_cpickle('input_dataset_fibA', config_in['output'], night)
lists_dictionary = _write_transit_list(observations_A,
lists_dictionary,
night_dict[night],
planet_dict)
save_to_cpickle('lists', lists_dictionary, config_in['output'], night)
print(" List rewritten, be careful however that you may incur in extra problems if they have changed")
try:
observational_parameters = load_from_cpickle('observational_pams', config_in['output'], night)
except:
observations_A = load_from_cpickle('input_dataset_fibA', config_in['output'], night)
observational_parameters = _get_observational_parameters(observations_A,
lists_dictionary,
night_dict[night],
instrument_dict[instrument],
star_dict,
planet_dict)
save_to_cpickle('observational_pams', observational_parameters, config_in['output'], night)
print(" New observational parameters loaded successfully")
for key_name, key_val in observational_parameters['RV_star'].items():
print(" RV star {0:s}: {1}".format(key_name, key_val))
print()
continue
except ValueError:
pass
observations_A = {}
observations_s1d_A = {}
observations_B = {}
calib_data_A = {}
calib_data_B = {}
for obs in lists_dictionary['observations']:
print(" Reading ", obs, " associated files")
observations_A[obs], observations_s1d_A[obs] = \
get_input_data(instrument, archive_dir + night, obs, mask, skip_s1d=False,
order_selection=order_selection)
#""" Zero or negative values are identified, flagged and substituted with another value """
#replacement = 0.01
#observations_A[obs]['null'] = (observations_A[obs]['e2ds'] <= replacement)
#observations_A[obs]['e2ds'][observations_A[obs]['null']] = replacement
""" Negative values are just statistical noise around the null flux points,
removing them would bias the flux level of the sky towards
higher values
We proceed in this way:
- identify the negative values and make a statistics of their
average value
- if in relevant number, we assume that the median of their absolute
values corresponds to the noise floor
- we add the noise floor to the error estimate
"""
observations_A[obs]['null'] = (observations_A[obs]['e2ds'] <= 0.0)
if (np.sum(observations_A[obs]['null']) > 30):
observations_A[obs]['noise_floor'] = np.median(np.abs(
observations_A[obs]['e2ds'][observations_A[obs]['null']]))
else:
if observations_A[obs].get('absolute_flux', True):
observations_A[obs]['noise_floor'] = 1.0000
else:
observations_A[obs]['noise_floor'] = 0.00001
observations_A[obs]['e2ds_err'] = np.sqrt(observations_A[obs]['e2ds_err']**2 + observations_A[obs]['noise_floor']**2)
#observations_A[obs]['e2ds_err'] = np.sqrt(observations_A[obs]['e2ds'])
if 'n_orders' not in observations_A or 'n_pixels' not in observations_A:
observations_A['n_orders'] = observations_A[obs]['n_orders']
observations_A['n_pixels'] = observations_A[obs]['n_pixels']
calib_data_A = get_calib_data(instrument, archive_dir + night, obs,
order_selection=order_selection)
""" Updating info on shared data """
if 'coadd' not in observations_A:
observations_A['coadd'] = {
'wavelength_range': [np.min(observations_A[obs]['wave'][0, :]),
np.max(observations_A[obs]['wave'][-1, :])]
}
else:
observations_A['coadd']['wavelength_range'][0] = min(observations_A['coadd']['wavelength_range'][0],
np.min(observations_A[obs]['wave'][0, :]))
observations_A['coadd']['wavelength_range'][1] = max(observations_A['coadd']['wavelength_range'][1],
np.max(observations_A[obs]['wave'][-1, :]))
""" Reading the fiber B counter part
If the target has been observed in ThAr or FP mode, fiber B data will not be accessible
"""
has_fiber_B = False
try:
observations_B[obs], _ = get_input_data(instrument, archive_dir + night, obs, mask,
fiber='B', order_selection=order_selection)
""" Negative values are just statistical noise around the null flux points,
removing them would bias the flux level of the sky towards
higher values
We proceed in this way:
- identify the negative values and make a statistics of their
average value
- if in relevant number, we assume that the their median value
corresponds to the noise floor
- we add the noise floor to the error estimate
"""
#""" Zero or negative values are identified, flagged and substituted with another value """
#replacement = 0.01
observations_B[obs]['null'] = (observations_B[obs]['e2ds'] <= 0.0)
if (np.sum(observations_B[obs]['null']) > 30):
observations_B[obs]['noise_floor'] = np.median(np.abs(
observations_B[obs]['e2ds'][observations_B[obs]['null']]))
else:
observations_B[obs]['noise_floor'] = 1.
#observations_B[obs]['e2ds'][observations_B[obs]['null']] = replacement
observations_B[obs]['e2ds_err'] = np.sqrt(np.abs(observations_B[obs]['e2ds'])) + observations_B[obs]['noise_floor']
if 'n_orders' not in observations_B or 'n_pixels' not in observations_B:
observations_B['n_orders'] = observations_B[obs]['n_orders']
observations_B['n_pixels'] = observations_B[obs]['n_pixels']
calib_data_B = get_calib_data(instrument, archive_dir + night, obs,
fiber='B', order_selection=order_selection)
has_fiber_B = True
except:
pass
""" Building the base (array of wavelengths) for coadded spectra within the same night """
observations_A['coadd']['wave'] = np.arange(observations_A['coadd']['wavelength_range'][0],
observations_A['coadd']['wavelength_range'][1],
instrument_dict[instrument]['wavelength_step'], dtype=np.double)
observations_A['coadd']['size'] = np.size(observations_A['coadd']['wave'])
observations_A['coadd']['step'] = np.ones(observations_A['coadd']['size'], dtype=np.double) * \
instrument_dict[instrument]['wavelength_step']
print()
print(" Fixing the observation lists if they are missing")
if write_transit_list:
lists_dictionary = _write_transit_list(observations_A,
lists_dictionary,
night_dict[night],
planet_dict)
print()
print(" Computing the RV shift outside the transit, and store it to an additional file for quick access ")
observational_parameters = _get_observational_parameters(observations_A,
lists_dictionary,
night_dict[night],
instrument_dict[instrument],
star_dict,
planet_dict)
print()
for key_name, key_val in observational_parameters['RV_star'].items():
print(" RV star {0:s}: {1:f}".format(key_name, key_val))
print()
print(" Writing dataset files for night ", night, " fiber A")
save_to_cpickle('lists', lists_dictionary, config_in['output'], night)
save_to_cpickle('input_dataset_fibA', observations_A, config_in['output'], night)
save_to_cpickle('input_dataset_s1d_fibA', observations_s1d_A, config_in['output'], night)
save_to_cpickle('calibration_fibA', calib_data_A, config_in['output'], night)
save_to_cpickle('observational_pams', observational_parameters, config_in['output'], night)
if has_fiber_B:
observations_B['coadd'] = {}
observations_B['coadd']['wave'] = observations_A['coadd']['wave'].copy()
observations_B['coadd']['size'] = np.size(observations_B['coadd']['wave'])
observations_B['coadd']['step'] = np.ones(observations_B['coadd']['size'], dtype=np.double) * \
instrument_dict[instrument]['wavelength_step']
print(" Writing dataset files for night ", night, " fiber B")
save_to_cpickle('input_dataset_fibB', observations_B, config_in['output'], night)
save_to_cpickle('calibration_fibB', calib_data_B, config_in['output'], night)
""" Running some checks to see if input parameters have been configured properly """
wavelength_rescaling = instrument_dict[instrument]['wavelength_rescaling']
pass_wavelength_rescaling = _check_wavelength_rescaling(
wavelength_rescaling,
observations_A['coadd']['wavelength_range']
)
print()
if loaded_shared_data: continue
""" Setting up the base arrays for all the nights"""
shared_data = _check_coadd_in_shared_data(shared_data, observations_A['coadd']['wavelength_range'])
if loaded_shared_data: return
""" Building the base (array of wavelengths) for master-out and coadd spectra
We do it now to be sure that it will be the same for the whole pipeline
"""
print(" Creating the shared arrays")
print()
shared_data['coadd']['wavelength_range'][0] += 2.0
shared_data['coadd']['wavelength_range'][1] -= 2.0
shared_data['coadd']['wave'] = np.arange(shared_data['coadd']['wavelength_range'][0],
shared_data['coadd']['wavelength_range'][1],
config_in['instruments']['shared']['wavelength_step'], dtype=np.double)
shared_data['coadd']['size'] = np.size(shared_data['coadd']['wave'])
shared_data['coadd']['step'] = np.ones(shared_data['coadd']['size'], dtype=np.double) * \
config_in['instruments']['shared']['wavelength_step']
shared_data['binned']['wave'] = np.arange(shared_data['coadd']['wavelength_range'][0],
shared_data['coadd']['wavelength_range'][1],
config_in['instruments']['shared']['wavelength_step']
* config_in['master-out']['binning_factor'], dtype=np.double)
shared_data['binned']['size'] = np.size(shared_data['binned']['wave'])
shared_data['binned']['step'] = np.ones(shared_data['binned']['size'], dtype=np.double) * \
config_in['instruments']['shared']['wavelength_step'] * \
config_in['master-out']['binning_factor']
if config_in['master-out']['wavelength_range'][0] < shared_data['coadd']['wavelength_range'][0] or \
config_in['master-out']['wavelength_range'][1] > shared_data['coadd']['wavelength_range'][1]:
warnings.warn("ERROR: Valid master_out wavelength window must be between {0:8.2f} and {1:8.2f}".format(
shared_data['coadd']['wavelength_range'][0],
shared_data['coadd']['wavelength_range'][1]))
pass_wavelength_master_out = False
shared_data['master-out'] = {}
shared_data['master-out']['wave'] = np.arange(config_in['master-out']['wavelength_range'][0],
config_in['master-out']['wavelength_range'][1],
config_in['master-out']['wavelength_step'], dtype=np.double)
shared_data['master-out']['size'] = np.size(shared_data['master-out']['wave'])
shared_data['master-out']['step'] = np.ones(shared_data['master-out']['size'], dtype=np.double) * \
config_in['master-out']['wavelength_step']
if not (pass_wavelength_master_out and pass_wavelength_rescaling):
raise ValueError("ERROR: check the previous warnings to see where you are doing it worng")
print(" COADD wavelength range between {0:8.2f} and {1:8.2f}".format(
shared_data['coadd']['wavelength_range'][0], shared_data['coadd']['wavelength_range'][1]))
print(" COADD wavelength step: {0:5.3f}".format(config_in['instruments']['shared']['wavelength_step']))
print()
print("Saving shared data")
save_to_cpickle('shared', shared_data, config_in['output'])
print()
def _write_transit_list(observations_A, lists_dict, night_dict_key, planet_dict):
fileout_transit_in_list = open(night_dict_key['in_transit'], 'w')
fileout_transit_out_list = open(night_dict_key['out_transit'], 'w')
fileout_transit_full_list = open(night_dict_key['full_transit'], 'w')
try:
total_transit_start = np.atleast_1d(night_dict_key['time_of_transit'])[0] - np.atleast_1d(planet_dict['total_transit_duration'])[0] / 2.
total_transit_end = np.atleast_1d(night_dict_key['time_of_transit'])[0] + np.atleast_1d(planet_dict['total_transit_duration'])[0] / 2.
full_transit_start = np.atleast_1d(night_dict_key['time_of_transit'])[0] - np.atleast_1d(planet_dict['full_transit_duration'])[0] / 2.
full_transit_end = np.atleast_1d(night_dict_key['time_of_transit'])[0] + np.atleast_1d(planet_dict['full_transit_duration'])[0] / 2.
except KeyError:
total_transit_start = np.atleast_1d(night_dict_key['time_of_transit'])[0] - np.atleast_1d(planet_dict['transit_duration'])[0] / 2.
total_transit_end = np.atleast_1d(night_dict_key['time_of_transit'])[0] + np.atleast_1d(planet_dict['transit_duration'])[0] / 2.
full_transit_start = np.atleast_1d(night_dict_key['time_of_transit'])[0] - np.atleast_1d(planet_dict['transit_duration'])[0] / 2.
full_transit_end = np.atleast_1d(night_dict_key['time_of_transit'])[0] + np.atleast_1d(planet_dict['transit_duration'])[0] / 2.
print('*** unclear transit duration, ingress/egress observations will be considered full-transit')
for obs in lists_dict['observations']:
""" Check if the file should be in transit_in or transit_out list, in case they are not present"""
#phase_internal = (observations_A[obs]['BJD'] - np.atleast_1d(night_dict_key['time_of_transit'])[0]) / \
# np.atleast_1d(planet_dict['period'])[0]
#if np.abs(phase_internal) <= planet_dict['transit_duration'][0] / 2. / planet_dict['period'][0]:
# fileout_transit_in_list.write('{0:s}\n'.format(obs))
#else:
# fileout_transit_out_list.write('{0:s}\n'.format(obs))
exptime_seconds = observations_A[obs]['EXPTIME'] / 86400.
"""BJD times have been already corrected to match mid-exposure epochs """
if observations_A[obs]['BJD'] + exptime_seconds/2. < total_transit_start \
or observations_A[obs]['BJD'] - exptime_seconds/2. > total_transit_end:
fileout_transit_out_list.write('{0:s}\n'.format(obs))
else:
fileout_transit_in_list.write('{0:s}\n'.format(obs))
if observations_A[obs]['BJD'] - exptime_seconds/2. > full_transit_start \
and observations_A[obs]['BJD'] + exptime_seconds/2. < full_transit_end:
fileout_transit_full_list.write('{0:s}\n'.format(obs))
fileout_transit_in_list.close()
fileout_transit_out_list.close()
fileout_transit_full_list.close()
files_list, files_transit_out, files_transit_in, files_transit_full, files_telluric, files_star_telluric = get_filelists(night_dict_key)
lists_dict['transit_out'] = files_transit_out
lists_dict['transit_in'] = files_transit_in
lists_dict['transit_full'] = files_transit_full
lists_dict['n_transit_out'] = np.size(files_transit_out)
lists_dict['n_transit_in'] = np.size(files_transit_in)
lists_dict['n_transit_full'] = np.size(files_transit_full)
try:
lists_dict['telluric'] = files_telluric
lists_dict['n_tellurics'] = np.size(files_telluric)
except:
lists_dict['telluric'] = files_transit_out.copy()
lists_dict['n_tellurics'] = np.size(files_transit_out)
print(" # observations: ", lists_dict['n_observations'])
print(" # out-transit obs: ", lists_dict['n_transit_out'])
print(" # in-transit obs: ", lists_dict['n_transit_in'])
print(" # full-transit obs: ", lists_dict['n_transit_full'])
return lists_dict
def _get_observational_parameters(observations_A, lists_dict, night_dict_key, instrument_dict_key, star_dict, planet_dict):
observational_parameters = {
'instrument': night_dict_key['instrument'],
'mask': night_dict_key['mask'],
'archive_dir': instrument_dict_key['data_archive'],
'wavelength_rescaling': instrument_dict_key['wavelength_rescaling'],
'time_of_transit': np.atleast_1d(night_dict_key['time_of_transit'])[0],
'refraction_method': instrument_dict_key['refraction']['method'],
'refraction_fit_order': instrument_dict_key['refraction']['fit_order'],
'refraction_fit_iters': instrument_dict_key['refraction']['fit_iters'],
'refraction_fit_sigma': instrument_dict_key['refraction']['fit_sigma'],
'refraction_knots_spacing': instrument_dict_key['refraction']['knots_spacing'],
'linear_fit_method': instrument_dict_key['linear_fit_method'],
'n_orders': observations_A['n_orders'],
'n_pixels': observations_A['n_pixels'],
'RV_star': {}
}
rv_out = []
bjd0_out = []
for obs in lists_dict['transit_out']:
bjd0_out.extend([observations_A[obs]['BJD'] - observational_parameters['time_of_transit']])
rv_out.extend([observations_A[obs]['RVC']])
observational_parameters['RV_star']['slope'], \
observational_parameters['RV_star']['intercept'], \
observational_parameters['RV_star']['r_value'], \
observational_parameters['RV_star']['p_value'], \
observational_parameters['RV_star']['std_err'] = sci_stats.linregress(bjd0_out, rv_out)
berv_list = []
rvc_stack = np.zeros(len(lists_dict['observations']))
for i, obs in enumerate(lists_dict['observations']):
berv_list.extend([observations_A[obs]['BERV']])
observational_parameters[obs] = {
'BJD': observations_A[obs]['BJD'],
'mBJD': observations_A[obs]['BJD']-2450000.0000,
'RVC': observations_A[obs]['RVC'],
'AIRMASS': observations_A[obs]['AIRMASS'],
'BERV': observations_A[obs]['BERV'],
'EXPTIME': observations_A[obs]['EXPTIME']
}
rvc_stack[i] = observations_A[obs]['RVC']
observational_parameters['BERV_avg'] = np.average(berv_list)
observational_parameters['RV_star']['RV_from_CCF'] = False
observational_parameters['RV_star']['RV_from_analytical_solution'] = False
if night_dict_key['use_rv_from_ccf']:
observational_parameters['RV_star']['RV_from_CCF'] = True
rvc_systemic = np.average(rvc_stack)
elif night_dict_key['use_analytical_rvs']:
observational_parameters['RV_star']['RV_from_analytical_solution'] = True
rvc_systemic = star_dict['RV_gamma'][0]
observational_parameters['RV_star']['RV_semiamplitude'] = star_dict['RV_semiamplitude'][0]
else:
rvc_systemic = observational_parameters['RV_star']['intercept']
observational_parameters['RV_star']['RV_systemic'] = rvc_systemic
for obs in lists_dict['observations']:
if night_dict_key['use_rv_from_ccf']:
rvc_bjdshift = observations_A[obs]['RVC']
elif night_dict_key['use_analytical_rvs']:
rvc_bjdshift = - observational_parameters['RV_star']['RV_semiamplitude'] * np.sin(2 * np.pi * \
(observational_parameters[obs]['BJD'] - observational_parameters['time_of_transit'])/planet_dict['period'][0])
else:
rvc_bjdshift = observational_parameters['RV_star']['slope'] * \
(observational_parameters[obs]['BJD'] - observational_parameters['time_of_transit'])
observational_parameters[obs]['RV_bjdshift'] = rvc_bjdshift
observational_parameters[obs]['rv_shift_ORF2SRF'] = observational_parameters[obs]['BERV'] - \
(rvc_systemic + rvc_bjdshift)
""" Old definition
observational_parameters[obs]['rv_shift_ORF2SRF'] = observational_parameters[obs]['BERV'] - \
(observational_parameters['RV_star']['intercept'] +
observational_parameters['RV_star']['slope'] *
(observational_parameters[obs][
'BJD'] - time_of_transit))
"""
""" Slight modification of the RV shift to minimize the rebinning error at the wings of the spectra
BRF = Solar System Barycentric Reference frame
rv_shift_ORF2BRF = rv_shift_ORF2SRF_mod + rv_shift_ORF2SRF_res
"""
observational_parameters[obs]['rv_shift_ORF2BRF'] = \
observational_parameters[obs]['BERV']
observational_parameters[obs]['rv_shift_ORF2BRF_mod'] = \
observational_parameters[obs]['BERV'] - observational_parameters['BERV_avg']
""" Slight modification of the RV shift to minimize the rebinning error at the wings of the spectra
rv_shift_ORF2SRF = rv_shift_ORF2SRF_mod + rv_shift_ORF2SRF_res
"""
observational_parameters[obs]['rv_shift_ORF2SRF_mod'] = \
observational_parameters[obs]['BERV'] - observational_parameters['BERV_avg'] - rvc_bjdshift
observational_parameters[obs]['rv_shift_ORF2SRF_res'] = \
observational_parameters['BERV_avg'] - rvc_systemic
""" RV shift from the observer RF to the planet RF
STRONG ASSUMPTIONS:
- there is only the transiting planet in the system
- the planet has null eccentricity
- linear approximation or the orbit near the transit event
Computation is performed by moving to the Solar Barycenter, than to the Stellar System Barycenter
and finally onto the planet
"""
observational_parameters[obs]['rv_shift_ORF2PRF'] = \
observational_parameters[obs]['BERV'] \
- rvc_systemic \
- planet_dict['RV_semiamplitude'][0] \
* (observational_parameters[obs]['BJD'] - observational_parameters['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
""" RV shift from Stellar Rest Frame to Planetary Rest Frame
We have to take into account the RV of star relatively to the Barycenter
"""
observational_parameters[obs]['rv_shift_SRF2PRF'] = \
+ rvc_bjdshift \
- planet_dict['RV_semiamplitude'][0] \
* (observational_parameters[obs]['BJD'] - observational_parameters['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
observational_parameters['rv_shift_ORF2SRF_res'] = \
observational_parameters['BERV_avg'] - rvc_systemic
return observational_parameters
def plot_dataset(config_in, night_input=''):
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_dict:
print()
print("Plotting dataset Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
instrument = night_dict[night]['instrument']
wavelength_rescaling = instrument_dict[instrument]['wavelength_rescaling']
""" Retrieving the observations"""
input_data = load_from_cpickle('input_dataset_fibA', config_in['output'], night)
""" Retrieving the calibration data """
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
colors, cmap, line_colors = make_color_array(lists, input_data)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
rescaling = compute_rescaling(input_data[obs]['wave'], input_data[obs]['e2ds'], wavelength_rescaling)
for order in range(0,input_data[obs]['n_orders']):
ax1.plot(input_data[obs]['wave'][order,:], input_data[obs]['e2ds'][order,:]/ rescaling, zorder=i, lw=1,
c=line_colors[i], alpha=0.5)
ax2.plot(input_data[obs]['wave'][order,:], input_data[obs]['e2ds'][order,:] / rescaling, zorder=-i, lw=1,
c=line_colors[i], alpha=0.5)
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 33,511 | 49.092676 | 144 | py |
SLOPpy | SLOPpy-main/SLOPpy/master_out.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.rebin_subroutines import *
from astropy.convolution import convolve, Box1DKernel
__all__ = ['compute_master_out', 'plot_master_out', 'plot_compare_master_out']
subroutine_name = 'master_out'
def compute_master_out(config_in):
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
master_out_composite = {
'subroutine': 'master_out',
'wave': shared_data['coadd']['wave'],
'step': shared_data['coadd']['step'],
'size': shared_data['coadd']['size'],
}
wmean_wflux = np.zeros(master_out_composite['size'])
wmean_weight = np.zeros(master_out_composite['size'])
box_kernel = Box1DKernel(config_in['master-out'].get('boxcar_smoothing', 1))
for night in night_dict:
try:
master_out = load_from_cpickle('master_out', config_in['output'], night)
wmean_wflux += master_out['rescaled'] / master_out['rescaled_err'] ** 2
wmean_weight += 1. // master_out['rescaled_err'] ** 2
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': 'master_out'
}
master_out = {
'subroutine': subroutine_name,
'wave': shared_data['coadd']['wave'],
'step': shared_data['coadd']['step'],
'size': shared_data['coadd']['size'],
'total_flux': np.zeros(shared_data['coadd']['size'], dtype=np.double),
'total_flux_err': np.zeros(shared_data['coadd']['size'], dtype=np.double)
}
for obs in lists['transit_out']:
processed[obs] = {}
processed[obs]['rescaling'], \
processed[obs]['rescaled'], \
processed[obs]['rebinned_err'] = perform_rescaling(
input_data[obs]['wave'], input_data[obs]['e2ds'], input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['rebinned'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds'],
calib_data['blaze'],
master_out['wave'],
master_out['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF_mod'])
processed[obs]['rebinned_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds_err'],
calib_data['blaze'],
master_out['wave'],
master_out['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF_mod'])
master_out['total_flux'] += processed[obs]['rebinned']
master_out['total_flux_err'] += processed[obs]['rebinned_err']**2.0
master_out['total_flux_err'] = np.sqrt(master_out['total_flux_err'])
master_out['rescaling'], \
master_out['rescaled'], \
master_out['rescaled_err'] = perform_rescaling(
master_out['wave'], master_out['total_flux'],
master_out['total_flux_err'],
observational_pams['wavelength_rescaling'])
master_out['rescaled'], master_out['rescaled_err'], master_out['null'] = \
replace_values_errors(master_out['rescaled'], master_out['rescaled_err'],
threshold=0.0001, replacement=1.0000)
master_out['smoothed'] = convolve(master_out['rescaled'].copy(), box_kernel)
master_out['smoothed_err'] = np.sqrt(convolve((master_out['rescaled_err'])**2, box_kernel))
sel = (master_out['smoothed']<0.01) | (master_out['smoothed']>1.5)
master_out['smoothed'][sel] = 1.0
master_out['smoothed_err'][sel] = 1.0
selection = (master_out['wave']>0)
spline_iter = 5
for n_iter in range(0, spline_iter):
residuals = master_out['rescaled'] / master_out['smoothed']
wave = master_out_composite['wave']
""" picking the number of knots """
nknots = ((np.amax(wave) - np.amin(wave)) / config_in['master-out'].get('spline_step', 0.10))
""" picking the indices of the knots"""
idx_knots = (np.arange(1, len(wave[selection]) - 1, (len(wave[selection]) - 2.) / nknots)).astype('int')
""" passing from indices to knots values """
knots = wave[selection][idx_knots]
coeff = sci_int.splrep(wave[selection], residuals[selection], task=-1, k=2, t=knots)
spline = sci_int.splev(wave, coeff)
dif = residuals - spline
std = np.std(dif)
selection = np.where(np.abs(dif) < 4 * std) # & (refraction[obs]['flag'])
master_out['spline'] = spline
master_out['smoothed'] *= spline
master_out['smoothed_err'] *= spline
master_out['SRF'] = {}
master_out['SRF']['rescaled']= \
rebin_1d_to_1d(master_out['wave'],
master_out['step'],
master_out['rescaled'],
master_out['wave'], master_out['step'],
rv_shift=observational_pams['rv_shift_ORF2SRF_res'],
preserve_flux=False)
master_out['SRF']['rescaled_err']= \
rebin_1d_to_1d(master_out['wave'],
master_out['step'],
master_out['rescaled_err'],
master_out['wave'], master_out['step'],
rv_shift=observational_pams['rv_shift_ORF2SRF_res'],
preserve_flux=False,
is_error=True)
wmean_wflux += master_out['SRF']['rescaled']/master_out['SRF']['rescaled_err']**2
wmean_weight += 1.//master_out['SRF']['rescaled_err']**2
"""
rv_shift = observational_pams['BERV_avg'] - observational_pams['RV_star']['intercept']
# bringing the master-out to the aboslute reference system
wave_shifted, _ = shift_wavelength(master_out['wave'], master_out['step'], rv_shift)
# master-out is printed to .dat for compatibility with other programs
master_data_out = get_filename('master_out', config_in['output'], night, extension=".dat")
file_out = open(master_data_out, 'w')
for w, f, e in zip(wave_shifted, master_out['rescaled'], master_out['rescaled_err']):
file_out.write('{0:10.4f} {1:f} {2:f}\n'.format(w,f,e))
file_out.close()
print()
print("NON OPTIMAL MASTER-OUT DAT FILE!!!!")
print()
"""
save_to_cpickle('master_out_processed', processed, config_in['output'], night)
save_to_cpickle('master_out', master_out, config_in['output'], night)
master_out_composite['SRF'] = {}
master_out_composite['SRF']['rescaled'] = wmean_wflux/wmean_weight
master_out_composite['SRF']['rescaled_err'] = np.sqrt(1./wmean_weight)
master_out_composite['SRF']['smoothed'] = convolve(master_out_composite['SRF']['rescaled'].copy(), box_kernel)
master_out_composite['SRF']['smoothed_err'] = \
np.sqrt(convolve((master_out_composite['SRF']['rescaled_err']) ** 2, box_kernel))
print()
for night in night_dict:
try:
master_out_composite = load_from_cpickle('master_out_composite', config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format('master_out_composite', night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format('master_out_composite', night, 'Computing'))
print()
master_out = load_from_cpickle('master_out', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
master_out_composite['rescaled']= \
rebin_1d_to_1d(master_out_composite['wave'],
master_out_composite['step'],
master_out_composite['SRF']['rescaled'],
master_out_composite['wave'],
master_out_composite['step'],
rv_shift=-observational_pams['rv_shift_ORF2SRF_res'],
preserve_flux=False)
master_out_composite['rescaled_err']= \
rebin_1d_to_1d(master_out_composite['wave'],
master_out_composite['step'],
master_out_composite['SRF']['rescaled_err'],
master_out_composite['wave'],
master_out_composite['step'],
rv_shift=-observational_pams['rv_shift_ORF2SRF_res'],
preserve_flux=False,
is_error=True)
master_out_composite['smoothed'] = \
rebin_1d_to_1d(master_out_composite['wave'],
master_out_composite['step'],
master_out_composite['SRF']['smoothed'],
master_out_composite['wave'],
master_out_composite['step'],
rv_shift=-observational_pams['rv_shift_ORF2SRF_res'],
preserve_flux=False)
master_out_composite['smoothed_err']= \
rebin_1d_to_1d(master_out_composite['wave'],
master_out_composite['step'],
master_out_composite['SRF']['smoothed_err'],
master_out_composite['wave'],
master_out_composite['step'],
rv_shift=-observational_pams['rv_shift_ORF2SRF_res'],
preserve_flux=False,
is_error=True)
#master_out_composite['smoothed'] = convolve(master_out_composite['rescaled'].copy(), box_kernel)
#master_out_composite['smoothed_err'] = \
# np.sqrt(convolve((master_out_composite['rescaled_err']) ** 2, box_kernel))
sel = (master_out_composite['smoothed']<0.01) | (master_out_composite['smoothed']>1.5)
master_out_composite['smoothed'][sel] = 1.0
master_out_composite['smoothed_err'][sel] = 1.0
selection = (master_out_composite['wave']>0)
spline_iter = 5
for n_iter in range(0, spline_iter):
residuals = master_out['rescaled'] / master_out_composite['smoothed']
wave = master_out_composite['wave']
""" picking the number of knots """
nknots = ((np.amax(wave) - np.amin(wave)) / config_in['master-out'].get('spline_step', 0.10))
""" picking the indices of the knots"""
idx_knots = (np.arange(1, len(wave[selection]) - 1, (len(wave[selection]) - 2.) / nknots)).astype('int')
""" passing from indices to knots values """
knots = wave[selection][idx_knots]
coeff = sci_int.splrep(wave[selection], residuals[selection], task=-1, k=2, t=knots)
spline = sci_int.splev(wave, coeff)
dif = residuals - spline
std = np.std(dif)
selection = np.where(np.abs(dif) < 4 * std) # & (refraction[obs]['flag'])
master_out_composite['spline'] = spline
master_out_composite['smoothed'] *= spline
master_out_composite['smoothed_err'] *= spline
save_to_cpickle('master_out_composite', master_out_composite, config_in['output'], night)
def plot_master_out(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
if night_input=='':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
print("plot_master_out Night: ", night)
""" Retrieving the analysis"""
try:
master_out = load_from_cpickle('master_out', config_in['output'], night)
master_out_composite = load_from_cpickle('master_out_composite', config_in['output'], night)
except:
print()
print("No master_out , no plots")
continue
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
plt.figure(figsize=(12, 6))
plt.title('Master out - night ' + night)
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
obs = lists['transit_out'][0]
plt.scatter(input_data[obs]['wave'],
calib_data['blaze'],
color='C3', zorder=3., label='blaze', alpha=0.25)
plt.errorbar(master_out['wave'],
master_out['rescaled'],
yerr=master_out['rescaled_err'],
fmt='.', c='C0', label='master-out ' + night)
plt.plot(master_out['wave'],
master_out['smoothed'],
color='C1', zorder=3., label='smoothed master-out ' + night)
plt.scatter(master_out_composite['wave'],
master_out_composite['rescaled'],
s=2, c='C3')
plt.plot(master_out_composite['wave'],
master_out_composite['smoothed'],
c='C3', label='composite master-out')
plt.scatter(master_out['wave'],
master_out['rescaled']/master_out['smoothed']*master_out['spline']+0.05,
s=2, c='C4', label='rescaled/smoothed')
plt.scatter(master_out['wave'],
master_out['rescaled']/master_out_composite['smoothed']*master_out_composite['spline']+0.1,
s=2, c='C5', label='rescaled/ comp smoothed')
plt.plot(master_out['wave'],
master_out['spline']+0.05,
c='C7', label='spline fit of the residuals')
plt.plot(master_out['wave'],
master_out_composite['spline']+0.1,
c='C7')
plt.ylim(0, 1.25)
plt.xlabel('$\lambda$ [$\AA$]')
plt.ylabel('Rescaled flux')
plt.legend()
plt.show()
def plot_compare_master_out(config_in):
plt.figure(figsize=(12, 6))
plt.title('Master out - comparison between nights ')
night_dict = from_config_get_nights(config_in)
for i, night in enumerate(night_dict):
""" Retrieving the analysis"""
try:
master_out = load_from_cpickle('master_out', config_in['output'], night)
master_out_composite = load_from_cpickle('master_out_composite', config_in['output'], night)
except:
continue
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
plt.errorbar(master_out['wave'],
master_out['SRF']['rescaled'],
yerr=master_out['SRF']['rescaled_err'],
fmt='.', c='C'+repr(i), label='master-out ' + night, alpha=0.5)
if i == 0:
plt.plot(master_out_composite['wave'],
master_out_composite['SRF']['rescaled'],
color='k', zorder=10, label='composite master-out')
plt.ylim(0, 1.25)
plt.xlabel('$\lambda$ [$\AA$]')
plt.ylabel('Rescaled flux')
plt.legend()
plt.show() | 16,971 | 43.197917 | 116 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_template.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_telluric_template",
"plot_telluric_template",
"compute_telluric_template_reference",
"plot_telluric_template_reference"]
def compute_telluric_template(config_in):
compute_telluric_template_routine(config_in,
n_iterations=1,
use_berv=True,
use_reference_airmass=False,
use_template=True,
subroutine_name='telluric_template')
def compute_telluric_template_reference(config_in):
compute_telluric_template_routine(config_in,
n_iterations=1,
use_berv=True,
use_reference_airmass=True,
use_template=True,
subroutine_name='telluric_template')
def plot_telluric_template_reference(config_in, night_input=''):
plot_telluric_template(config_in, night_input=night_input)
def compute_telluric_template_routine(config_in, **kwargs):
"""
Lazy workaround
:param config_in:
:param kwargs:
:return:
"""
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
for night in night_dict:
instrument_name = night_dict[night]['instrument']
template_dict = instrument_dict[instrument_name]['telluric_template']
print()
print("compute_telluric_template Night: ", night)
print()
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
continue
except:
print(" No telluric correction file found, computing now ")
print()
print(' instrument :', instrument_name)
print(' template :', template_dict['file'])
print(' fit_range :', template_dict['fit_range'])
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': kwargs['subroutine_name'],
'n_orders': 0,
'n_pixels': 0
}
telluric = {
'subroutine': kwargs['subroutine_name'],
'reference_frame': 'observer'
}
""" Retrieving the template data"""
telluric_template_data = np.genfromtxt(template_dict['file'])
obs_reference = lists['observations'][0]
telluric['template'] = {
'input': {
'range': [np.amin(telluric_template_data[:, 0]), np.amax(telluric_template_data[-1, 0])],
'wave': telluric_template_data[:, 0],
'flux': telluric_template_data[:, 1],
'ferr': telluric_template_data[:, 2],
'step': telluric_template_data[:, 3]
},
'rebinned':{
'wave': input_data[obs_reference]['wave'],
'step': input_data[obs_reference]['step']
}
}
""" Reference airmass for iterative correction of airmass"""
if kwargs['use_reference_airmass']:
airmass_temp = np.zeros(lists['n_transit_in'])
for n_obs, obs in enumerate(lists['transit_in']):
# This is to ensure that airmass, berv and rvc are associated to the correct spectra
airmass_temp[n_obs] = input_data[obs]['AIRMASS']
processed['airmass_ref'] = np.average(airmass_temp)
else:
processed['airmass_ref'] = 0.000
processed['telluric'] = {}
airmass = np.zeros(lists['n_observations'], dtype=np.double)
berv = np.zeros(lists['n_observations'], dtype=np.double)
rvc = np.zeros(lists['n_observations'], dtype=np.double)
# There must be a more elegant way to do this, but I'm, not aware of it
for n_obs, obs in enumerate(lists['observations']):
tel_selection = (input_data[obs]['wave'] > template_dict['fit_range'][0]) & \
(input_data[obs]['wave'] < template_dict['fit_range'][1])
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
""" for plotting purpose only"""
processed[obs]['wave'] = input_data[obs]['wave']
processed[obs]['e2ds'] = input_data[obs]['e2ds']
processed[obs]['e2ds_err'] = input_data[obs]['e2ds_err']
processed[obs]['e2ds_rescaling'], processed[obs]['e2ds_rescaled'], processed[obs]['e2ds_rescaled_err'] = \
perform_rescaling(input_data[obs]['wave'],
input_data[obs]['e2ds'],
input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
processed[obs]['e2ds_sel'] = processed[obs]['e2ds_rescaled'][tel_selection]
if processed['n_orders'] == 0:
processed['wave_sel'] = input_data[obs]['wave'][tel_selection]
processed['step_sel'] = input_data[obs]['step'][tel_selection]
processed['n_orders'] = input_data[obs]['orders']
processed['n_pixels'] = input_data[obs]['wave_size']
# This is to ensure that airmass, berv and rvc are associated to the correct spectra
processed['telluric'][obs] = {'n_obs': n_obs}
airmass[n_obs] = input_data[obs]['AIRMASS']
berv[n_obs] = input_data[obs]['BERV']
rvc[n_obs] = input_data[obs]['RVC']
processed['template_fit'] = {}
rescaling_array = np.arange(0.05, 2.0, 0.05)
computed_std = np.zeros(len(rescaling_array))
processed['template_fit']['array_data'] = {}
processed['template_fit']['array_move'] = {}
processed['template_fit']['array_wave'] = []
""" saving the wavelength array for plotting purpose"""
for obs in lists['telluric']:
processed['template_fit']['array_wave'].extend(processed['wave_sel'])
for n_rescaling_factor, rescaling_factor in enumerate(rescaling_array):
""" Template spectrum is rebinned onto the observations wavelength scale, using only the wavelength range
selected for the computation of the rescaling factor
"""
template_rebinned_flux = \
rebin_1d_to_1d(telluric['template']['input']['wave'],
telluric['template']['input']['step'],
telluric['template']['input']['flux'],
processed['wave_sel'],
processed['step_sel'],
preserve_flux=False)
""" Saving the outcome to dictionary """
template_rebinned_flux -= 1.00
template_rebinned_flux *= rescaling_factor
template_rebinned_flux += 1.00
e2ds_corrected = []
## DEBUG
# wave_corrected = []
# e2ds_original = []
for obs in lists['telluric']:
e2ds_corrected.extend(processed[obs]['e2ds_sel'] /
np.power(template_rebinned_flux, input_data[obs]['AIRMASS']))
# wave_corrected.extend(processed['wave_sel'])
# e2ds_original.extend(processed[obs]['e2ds_sel'])
computed_std[n_rescaling_factor] = np.std(e2ds_corrected)
label_dict = '{0}'.format(rescaling_factor)
# print(label_dict, computed_std[n_rescaling_factor])
# processed['template_fit']['array_data'][label_dict] = e2ds_corrected
# processed['template_fit']['array_move'][label_dict] = rescaling_factor
# plt.scatter(wave_corrected, e2ds_original, s=2, alpha=0.5)
# plt.scatter(wave_corrected, e2ds_corrected, s=2, alpha=0.5)
# plt.plot(processed['wave_sel'],template_rebinned_flux)
# plt.show()
""" selection of the rescaling factor with the lowest scatter """
ind_factor = np.argmin(computed_std)
ind_range = 3
if ind_factor < ind_range:
sel_factor = rescaling_array[0:ind_factor+ind_range]
sel_stdev = computed_std[0:ind_factor+ind_range]
elif ind_factor > len(rescaling_array) - ind_range:
sel_factor = rescaling_array[ind_factor-ind_range:]
sel_stdev = computed_std[ind_factor-ind_range:]
else:
sel_factor = rescaling_array[ind_factor-ind_range:ind_factor+ind_range]
sel_stdev = computed_std[ind_factor-ind_range:ind_factor+ind_range]
coeff = np.polyfit(sel_factor, sel_stdev, 2)
telluric_factor = - coeff[1] / (2*coeff[0])
print(' telluric factor: {0:7f}'.format(telluric_factor))
print()
processed['template_fit']['telluric_factor'] = telluric_factor
processed['template_fit']['rescaling_factor'] = rescaling_factor
processed['template_fit']['computed_std'] = computed_std
processed['template_fit']['polyfit'] = {
'package': 'numpy', # in case we forget what we used...
'order': 2,
'coeff': coeff,
'sel_factor': sel_factor,
'sel_stdev': sel_stdev
}
""" After being rescaled for the proper factor, the template telluric spectrum is rebinned onto the 2D
scale of the observations """
telluric['template']['rebinned']['flux'] = \
rebin_1d_to_2d(telluric['template']['input']['wave'],
telluric['template']['input']['step'],
telluric['template']['input']['flux'],
telluric['template']['rebinned']['wave'],
telluric['template']['rebinned']['step'],
preserve_flux=False)
telluric['template']['rebinned']['ferr'] = \
rebin_1d_to_2d(telluric['template']['input']['wave'],
telluric['template']['input']['step'],
telluric['template']['input']['ferr'],
telluric['template']['rebinned']['wave'],
telluric['template']['rebinned']['step'],
preserve_flux=False,
is_error=True)
sel_out_of_range = ~((telluric['template']['rebinned']['wave'] > telluric['template']['input']['range'][0]+1.) \
& (telluric['template']['rebinned']['wave'] < telluric['template']['input']['range'][1]-1.))
telluric['template']['rebinned']['flux'][sel_out_of_range] = 1.
telluric['template']['rebinned']['ferr'][sel_out_of_range] = 0.1
processed['telluric']['spectrum_noairmass'] = \
(telluric['template']['rebinned']['flux'] - 1.) * telluric_factor + 1.0
telluric['airmass_ref'] = processed['airmass_ref']
for obs in lists['observations']:
""" Correction of telluric lines for the average airmass value, following Wyttenbach et al. 2015 """
processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_rescaled'] / \
np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_rescaled_err'] / \
np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
for obs in lists['observations']:
# Correction of telluric lines
telluric[obs] = {}
telluric[obs]['spectrum_noairmass'] = processed['telluric']['spectrum_noairmass']
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
telluric[obs]['airmass_ref'] = processed['airmass_ref']
""" Set anomalosly low point to one (e.g. when the template is not computed)"""
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][telluric[obs]['null']] = 1.0
telluric[obs]['spectrum'] = np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
telluric[obs]['spline_noairmass'] = telluric[obs]['spectrum_noairmass'].copy()
""" No need to compute the spline approximation since we are already dealing with a very high SNR template"""
telluric[obs]['spline'] = np.power(telluric[obs]['spline_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
""" copy the keyword for future use"""
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
telluric[obs]['telluric_corrected'] = processed[obs]['e2ds_corrected']
telluric[obs]['telluric_corrected_err'] = processed[obs]['e2ds_corrected_err']
save_to_cpickle('telluric', telluric, config_in['output'], night)
save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
def plot_telluric_template(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
#plt.scatter(rescaling_array, computed_std, c='C0', zorder=1)
#plt.scatter(sel_factor, sel_stdev, c='C1', zorder=2)
#plt.plot(rescaling_array, np.polyval(coeff, rescaling_array))
#plt.plot(rescaling_array, 2*rescaling_array*coeff[0] + coeff[1] )
#plt.plot()
print("plot_telluric_template Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('telluric_processed', config_in['output'], night)
telluric = load_from_cpickle('telluric', config_in['output'], night)
except:
print()
print("No telluric correction, no plots")
continue
colors, cmap, line_colors = make_color_array(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
cbax1 = plt.subplot(gs[:, 1])
lift_spectrum = 0.25
for i, obs in enumerate(lists['observations']):
color_array = cmap(i / len(lists['observations']))
_, e2ds_rescaled , _ = \
perform_rescaling(processed[obs]['wave'],
processed[obs]['e2ds'],
processed[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
e2ds_rescaled_corrected_spectrum = e2ds_rescaled / telluric[obs]['spectrum']
e2ds_rescaled_corrected_spline = e2ds_rescaled / telluric[obs]['spline']
for order in range(0, processed[obs]['n_orders']):
if order == 0 and i==0:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5, label='uncorrected')
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array), label='corrected')
else:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5)
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array))
#ax1.plot(processed[obs]['wave'][order, :],
# e2ds_rescaled[order, :]+lift_spectrum,
# c=color_array, lw=1, alpha=0.5)
#ax1.scatter(processed[obs]['wave'][order, :],
# e2ds_rescaled_corrected_spline[order, :]+lift_spectrum,
# s=1, c=np.atleast_2d(color_array))
ax2.plot(processed[obs]['wave'][order, :],
telluric[obs]['spectrum'][order, :],
c=color_array)
ax2.axhline(1.00, c='k')
#ax2.plot(processed[obs]['wave'][order, :],
# telluric[obs]['spline'][order, :]+lift_spectrum,
# c=color_array)
#ax2.axhline(1.00+lift_spectrum, c='k')
#ax2.plot(input_data['coadd']['wave'],telluric['stellarRF']['spline_eval']+0.1,c='k')
#ax2.scatter(input_data['coadd']['wave'],telluric['stellarRF']['spectrum']+0.1,c='r', s=2)
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
try:
instrument = night_dict[night]['instrument']
comparison_file = config_in['instruments'][instrument]['telluric_comparison']
comparison_data = np.genfromtxt(comparison_file, skip_header=1)
if comparison_data[0,0]<1000.0:
nm2Ang = 10.
else:
nm2Ang = 1.
ax1.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
ax2.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
except:
pass
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 19,545 | 43.221719 | 121 | py |
SLOPpy | SLOPpy-main/SLOPpy/spectra_lightcurve_bkp.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.rebin_subroutines import *
from SLOPpy.subroutines.clv_rm_subroutines import *
from astropy.convolution import convolve, Box1DKernel
__all__ = ['compute_spectra_lightcurve',
'compute_spectra_lightcurve_clv_rm_correction',
'plot_spectra_lightcurve',
'plot_spectra_lightcurve_clv_rm_correction']
def compute_spectra_lightcurve_clv_rm_correction(config_in, lines_label):
compute_spectra_lightcurve(config_in, lines_label)
def plot_spectra_lightcurve_clv_rm_correction(config_in, night_input=''):
plot_spectra_lightcurve(config_in, night_input)
def compute_spectra_lightcurve(config_in, lines_label):
subroutine_name = 'spectra_lightcurve'
sampler_name = 'emcee'
do_average_instead_of_sum = True
night_dict = from_config_get_nights(config_in)
#instrument_dict = from_config_get_instrument(config_in)
#system_dict = from_config_get_system(config_in)
planet_dict = from_config_get_planet(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label]
clv_rm_correction = lines_dict.get('clv_rm_correction', True)
# from_config_get_transmission_lightcurve(config_in)
#lightcurve_dict = from_config_get_transmission_lightcurve(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
""" Using the MCMC fit range to define the transmission spectrum region """
shared_selection = (shared_data['coadd']['wave'] >= lines_dict['range'][0]) \
& (shared_data['coadd']['wave'] < lines_dict['range'][1])
processed_template = {
'subroutine': subroutine_name,
'range': lines_dict['range'],
'wave': shared_data['coadd']['wave'][shared_selection],
'step': shared_data['coadd']['step'][shared_selection],
'size': np.int(np.sum(shared_selection)),
}
# doublet sodium in the lab reference frame
"""
C stands for central
"""
C_bands = {}
for passband_key, passband_val in spectral_lines['passbands'].items():
C_bands[passband_key] = {}
for line_key, line_val in spectral_lines['lines'].items():
C_bands[passband_key][line_key] = (np.abs(shared_data['coadd']['wave'] - line_val) < passband_val / 2.)
"""
S stands for side
"""
S_bands = {}
for band_key, band_val in spectral_lines['continuum'].items():
S_bands[band_key] = (shared_data['coadd']['wave'] >= band_val[0]) & (shared_data['coadd']['wave'] <= band_val[1])
"""
The transit phase [0-1] is divided in N (=5) bins. Two arrays are computed:
- transit_in_bins: array with the boundaries of the bins, size=N+1
- transit_in_step: average size of the bin, size=1
"""
transit_in_bins = np.linspace(
-planet_dict['transit_duration'][0]/2./planet_dict['period'][0],
planet_dict['transit_duration'][0]/2./planet_dict['period'][0],
6
)
transit_in_step = np.average(transit_in_bins[1:]-transit_in_bins[:-1])
for night in night_dict:
try:
lightcurve = load_from_cpickle(subroutine_name, config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations( config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = processed_template.copy()
lightcurve = {
'subroutine': subroutine_name,
'arrays': {
'observations': {
'obs_name': np.zeros(len(lists['observations']), dtype=str),
'phase': np.zeros(len(lists['observations'])),
},
'transit_in': {},
'transit_out': {},
},
'C_bands': C_bands,
'S_bands': S_bands,
'average': {},
'bins': {
'transit_in_bins': transit_in_bins,
'transit_in_step': transit_in_step
}
}
""" Adding the C-bands arrays to the dictionary"""
for band_key in C_bands:
lightcurve['arrays']['observations']['ratio_' + band_key] = np.zeros([len(lists['observations']), 2])
transit_out_flag = np.zeros(len(lists['observations']), dtype=bool)
transit_in_flag = np.zeros(len(lists['observations']), dtype=bool)
if clv_rm_correction:
try:
clv_rm_models = load_from_cpickle('clv_rm_models', config_in['output'], night, lines_label)
except (FileNotFoundError, IOError):
clv_rm_models = load_from_cpickle('clv_rm_models', config_in['output'], night)
for n_obs, obs in enumerate( lists['observations']):
processed[obs] = {}
lightcurve[obs] = {}
processed[obs]['rescaling'], \
processed[obs]['rescaled'], \
processed[obs]['rescaled_err'] = perform_rescaling(
input_data[obs]['wave'], input_data[obs]['e2ds'], input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['uncorrected'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds'],
calib_data['blaze'],
processed['wave'],
processed['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'])
processed[obs]['uncorrected_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds_err'],
calib_data['blaze'],
processed['wave'],
processed['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'])
if clv_rm_correction:
rv_shift = 0.0 # we always stay in SRF
correction, _ = clv_rm_correction_factor_computation(
clv_rm_modelling, shared_data['coadd']['wave'], shared_data['coadd']['step'], rv_shift, obs)
processed[obs]['clv_rm_correction'] = correction
processed[obs]['corrected'] /= correction
processed[obs]['corrected_err'] /= correction
try:
phase_internal = (observational_pams[obs]['BJD'] - night_dict[night]['time_of_transit'][0])/planet_dict['period'][0]
except:
phase_internal = (observational_pams[obs]['BJD'] - night_dict[night]['time_of_transit'])/planet_dict['period'][0]
processed[obs]['bands'] = {
'phase': phase_internal
}
s_integrated = 0.000
s_sigmaq_sum = 0.00
n_bands = 0.00
for band_key, band_val in S_bands.items():
if do_average_instead_of_sum:
processed[obs]['bands'][band_key] = \
[np.average(processed[obs]['rebinned'][band_val]),
np.sum((processed[obs]['rebinned_err'][band_val])**2)
/ len(processed[obs]['rebinned_err'][band_val])**2]
else:
processed[obs]['bands'][band_key] = \
[np.sum(processed[obs]['rebinned'][band_val]),
np.sum((processed[obs]['rebinned_err'][band_val])**2)]
s_integrated += processed[obs]['bands'][band_key][0]
s_sigmaq_sum += processed[obs]['bands'][band_key][1]
n_bands += 1
s_integrated *= (2. / n_bands)
s_sigmaq_sum *= (2. / n_bands)**2
s_factor_term = np.power(s_integrated, -2.0)
for band_key, band_dict in C_bands.items():
processed[obs]['bands'][band_key] = {}
c_integrated = 0.000
c_sigmaq_sum = 0.000
n_bands = 0.00
for line_key, line_val in band_dict.items():
if do_average_instead_of_sum:
processed[obs]['bands'][band_key][line_key] = \
[np.average(processed[obs]['rebinned'][line_val]),
np.sum((processed[obs]['rebinned_err'][line_val]) ** 2)
/ len(processed[obs]['rebinned_err'][line_val]) ** 2]
else:
processed[obs]['bands'][band_key][line_key] = \
[np.sum(processed[obs]['rebinned'][line_val]),
np.sum((processed[obs]['rebinned_err'][line_val]) ** 2)]
c_integrated += processed[obs]['bands'][band_key][line_key][0]
c_sigmaq_sum += processed[obs]['bands'][band_key][line_key][1]
n_bands += 1
c_integrated *= (2. / n_bands)
c_sigmaq_sum *= (2. / n_bands) ** 2
ratio = c_integrated / s_integrated
lightcurve[obs]['ratio_' + band_key] = [ratio, ratio * np.sqrt( c_sigmaq_sum / c_integrated ** 2 +
s_sigmaq_sum / s_integrated ** 2)]
#np.sqrt(s_factor_term
# * (c_sigmaq_sum + ratio**2 * s_sigmaq_sum))]
lightcurve['arrays']['observations']['ratio_' + band_key][n_obs, :] = \
lightcurve[obs]['ratio_' + band_key][:]
lightcurve[obs]['phase'] = processed[obs]['bands']['phase']
lightcurve['arrays']['observations']['obs_name'][n_obs] = obs
lightcurve['arrays']['observations']['phase'][n_obs] = lightcurve[obs]['phase']
if obs in lists['transit_out']:
transit_out_flag[n_obs] = True
else:
transit_in_flag[n_obs] = True
for band_key in C_bands:
lightcurve['arrays']['rescaling_' + band_key] = \
np.average(lightcurve['arrays']['observations']['ratio_' + band_key][transit_out_flag, 0], axis=0)
sorting_index = np.argsort(lightcurve['arrays']['observations']['phase'])
transit_out_flag = transit_out_flag[sorting_index]
transit_in_flag = transit_in_flag[sorting_index]
lightcurve['arrays']['observations']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][sorting_index]
lightcurve['arrays']['observations']['phase'] = lightcurve['arrays']['observations']['phase'][sorting_index]
lightcurve['arrays']['transit_in']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][transit_in_flag]
lightcurve['arrays']['transit_in']['phase'] = lightcurve['arrays']['observations']['phase'][transit_in_flag]
lightcurve['arrays']['transit_out']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][transit_out_flag]
lightcurve['arrays']['transit_out']['phase'] = lightcurve['arrays']['observations']['phase'][transit_out_flag]
for band_key in C_bands:
lightcurve['arrays']['observations']['ratio_' + band_key] = \
lightcurve['arrays']['observations']['ratio_' + band_key][sorting_index] \
/ lightcurve['arrays']['rescaling_' + band_key]
lightcurve['arrays']['transit_in']['ratio_' + band_key] = \
lightcurve['arrays']['observations']['ratio_' + band_key][transit_in_flag]
lightcurve['arrays']['transit_out']['ratio_' + band_key] = \
lightcurve['arrays']['observations']['ratio_' + band_key][transit_out_flag]
avg_out, avg_out_sq = \
np.average(lightcurve['arrays']['transit_out']['ratio_' + band_key][:, 0],
weights=1./(lightcurve['arrays']['transit_out']['ratio_' + band_key][:, 1])**2,
returned=True)
avg_in, avg_in_sq = \
np.average(lightcurve['arrays']['transit_in']['ratio_' + band_key][:, 0],
weights=1. / (lightcurve['arrays']['transit_in']['ratio_' + band_key][:, 1]) ** 2,
returned=True)
lightcurve['average'][band_key] = {
'average_out': np.asarray([avg_out, 1./np.power(avg_out_sq, 0.5)]),
'average_in': np.asarray([avg_in, 1. / np.power(avg_in_sq, 0.5)]),
}
delta_fac = \
lightcurve['average'][band_key]['average_in'][0]/lightcurve['average'][band_key]['average_out'][0]
delta_err = delta_fac * np.sqrt(
(lightcurve['average'][band_key]['average_out'][1]
/ lightcurve['average'][band_key]['average_out'][0]) ** 2
+ (lightcurve['average'][band_key]['average_in'][1]
/ lightcurve['average'][band_key]['average_in'][0]) ** 2)
lightcurve['average'][band_key]['delta'] = np.asarray([(1.-delta_fac)*100., delta_err*100.])
lightcurve['arrays']['observations']['transit_out_flag'] = transit_out_flag
lightcurve['arrays']['observations']['transit_in_flag'] = transit_in_flag
""" Compute the duration of the pre-transit observations, using as scale
the number of bins, with the same size as those used inside the transit.
The value is given by the difference of the phase of the beginning of the transit minus
the phase of the first observation, keeping in mind that the centre of the transit has phase = 0
An additional bin is added if there are observations left out from the actual number of bins
"""
pre_duration = transit_in_bins[0] - lightcurve['arrays']['transit_out']['phase'][0]
if pre_duration > 0:
nsteps_pre = int(pre_duration/transit_in_step)
if pre_duration % transit_in_step > 0.0:
nsteps_pre += 1
else:
nsteps_pre = 0
""" same as pre-transit, but suing the post-transit instead"""
post_duration = lightcurve['arrays']['transit_out']['phase'][-1] - transit_in_bins[-1]
if post_duration > 0:
nsteps_post = int(post_duration / transit_in_step)
if post_duration % transit_in_step > 0.0:
nsteps_post += 1
else:
nsteps_post = 0
""" THe full array with both in-transit and out-transit phase, built in such a way that the
- the lower boundary of the first in-transit bin corresponds to the beginning of the transit
- the upper boundary of the last in-transit bin corresponds to the end of the transit
"""
transit_bins = np.arange(transit_in_bins[0]-nsteps_pre*transit_in_step,
transit_in_bins[-1] + (nsteps_post+1.1) * transit_in_step,
transit_in_step)
lightcurve['binned'] = {
'observations': {
'phase': np.zeros(len(transit_bins)),
},
'transit_in': {},
'transit_out': {},
}
for band_key in C_bands:
lightcurve['binned']['observations']['ratio_' + band_key] = np.zeros([len(transit_bins), 2])
transit_out_flag = np.zeros(len(transit_bins), dtype=bool)
transit_in_flag = np.zeros(len(transit_bins), dtype=bool)
n_a = 0
for nb in range(0, len(transit_bins)-1):
sel = (lightcurve['arrays']['observations']['phase'] >= transit_bins[nb]) \
& (lightcurve['arrays']['observations']['phase'] < transit_bins[nb+1])
if np.sum(sel) <= 0: continue
lightcurve['binned']['observations']['phase'][n_a] = np.average(lightcurve['arrays']['observations']['phase'][sel])
for band_key in C_bands:
lightcurve['binned']['observations']['ratio_' + band_key][n_a, 0], sum_weights = np.average(
lightcurve['arrays']['observations']['ratio_' + band_key][sel, 0],
weights=1. / lightcurve['arrays']['observations']['ratio_' + band_key][sel, 1]**2,
returned=True)
lightcurve['binned']['observations']['ratio_' + band_key][n_a, 1] = np.sqrt(1. / sum_weights)
if np.abs(lightcurve['binned']['observations']['phase'][n_a]) >= \
planet_dict['transit_duration'][0]/2./planet_dict['period'][0]:
transit_out_flag[n_a] = True
else:
transit_in_flag[n_a] = True
n_a += 1 # bins actually computed
lightcurve['binned']['transit_in']['phase'] = lightcurve['binned']['observations']['phase'][transit_in_flag]
lightcurve['binned']['transit_out']['phase'] = lightcurve['binned']['observations']['phase'][transit_out_flag]
lightcurve['binned']['observations']['phase'] = lightcurve['binned']['observations']['phase'][:n_a]
for band_key in C_bands:
lightcurve['binned']['transit_in']['ratio_' + band_key] = \
lightcurve['binned']['observations']['ratio_' + band_key][transit_in_flag, :]
lightcurve['binned']['transit_out']['ratio_' + band_key] = \
lightcurve['binned']['observations']['ratio_' + band_key][transit_out_flag, :]
lightcurve['binned']['observations']['ratio_' + band_key] = \
lightcurve['binned']['observations']['ratio_' + band_key][:n_a, :]
save_to_cpickle(subroutine_name+'_processed', processed, config_in['output'], night)
save_to_cpickle(subroutine_name, lightcurve, config_in['output'], night)
def plot_spectra_lightcurve(config_in, night_input='', clv_rm_correction=False):
import matplotlib.pyplot as plt
if clv_rm_correction:
subroutine_name = 'spectra_lightcurve_clv_rm_correction'
else:
subroutine_name = 'spectra_lightcurve'
night_dict = from_config_get_nights(config_in)
if night_input=='':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
""" Retrieving the analysis"""
try:
lightcurve = load_from_cpickle(subroutine_name, config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Plotting'))
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Skipped'))
continue
#observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
C_bands = lightcurve['C_bands']
print()
for band_key in C_bands:
print("Night: {0:s} Band: {1:s} Delta:{2:8.4f} +- {3:8.4f} [%]".format(night, band_key,
lightcurve['average'][band_key]['delta'][0], lightcurve['average'][band_key]['delta'][1]))
for band_key in C_bands:
plt.figure(figsize=(12, 6))
plt.title('Spectra lightcurve - night {0:s} \n {1:s}'.format(night, band_key))
plt.errorbar(lightcurve['arrays']['observations']['phase'],
lightcurve['arrays']['observations']['ratio_' + band_key][:,0]*100 -100.,
yerr= lightcurve['arrays']['observations']['ratio_' + band_key][:,1]*100 ,
fmt='.', c='k', alpha=0.25, label='observations')
plt.errorbar(lightcurve['binned']['observations']['phase'],
lightcurve['binned']['observations']['ratio_' + band_key][:, 0]*100 -100.,
yerr= lightcurve['binned']['observations']['ratio_' + band_key][:,1]*100 ,
fmt='.', c='k', alpha=1.0, label='observations')
plt.axvspan(-1, lightcurve['bins']['transit_in_bins'][0], alpha=0.25, color='green')
plt.axvspan(lightcurve['bins']['transit_in_bins'][-1], 1., alpha=0.25, color='green')
plt.axhline(0, c='C1')
plt.xlim(lightcurve['arrays']['observations']['phase'][0]-0.01,
lightcurve['arrays']['observations']['phase'][-1]+0.01)
plt.xlabel('orbital phase')
plt.ylabel('$\mathcal{R}$ - 1. [%]')
plt.legend()
plt.show()
print()
| 21,465 | 44.478814 | 132 | py |
SLOPpy | SLOPpy-main/SLOPpy/interstellar_lines.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_interstellar_lines", "plot_interstellar_lines"]
subroutine_name = 'interstellar_lines'
# def plot_identify_stellar_lines(config_in)
def compute_interstellar_lines(config_in):
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
interstellar_lines = from_config_get_interstellar_lines(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
if not interstellar_lines:
return
for night in night_dict:
print()
print("compute_interstellar_lines Night: ", night)
try:
interstellar = load_from_cpickle('interstellar_lines_processed', config_in['output'], night)
skip_lineselection = True
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
instrument = night_dict[night]['instrument']
processed = {
'subroutine': subroutine_name,
'line_rebin': {},
'line_shift': {}
}
interstellar = {
'subroutine': subroutine_name,
}
for obs in lists['observations']:
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
interstellar[obs] = {
'wave_BRF': shift_wavelength_array(input_data[obs]['wave'], observational_pams[obs]['rv_shift_ORF2BRF']),
'correction': np.ones(np.shape(input_data[obs]['wave']))
}
""" for plotting purpose only"""
processed[obs]['flux'] = input_data[obs]['e2ds'] / calib_data['blaze'] / input_data[obs]['step']
processed[obs]['flux_err'] = np.sqrt(input_data[obs]['e2ds']) / calib_data['blaze'] / input_data[obs][
'step']
for line_name, line in interstellar_lines.items():
processed[line_name] = {
'line_rebin': {},
'poly_coeff': {},
'normalized': {},
'line_shift': {
'selected_points': []
}
}
processed[line_name]['min_wave'] = max(shared_data['coadd']['wavelength_range'][0], line[0] - line[2]*2)
processed[line_name]['max_wave'] = min(shared_data['coadd']['wavelength_range'][1], line[0] + line[2]*2)
processed[line_name]['wave'] = np.arange(processed[line_name]['min_wave'],
processed[line_name]['max_wave'],
instrument_dict[instrument]['wavelength_step'])
processed[line_name]['size'] = np.size(processed[line_name]['wave'], axis=0)
processed[line_name]['step'] = np.ones(processed[line_name]['size'])\
* instrument_dict[instrument]['wavelength_step']
processed[line_name]['correction'] = np.ones(processed[line_name]['size'])
for obs in lists['observations']:
preserve_flux = input_data[obs].get('absolute_flux', True)
""" shifting a chunk of the spectra to the Solar System Barycenter reference """
processed[line_name]['line_rebin'][obs] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds'],
calib_data['blaze'],
processed[line_name]['wave'],
processed[line_name]['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2BRF'])
argmin_sel = np.argmin(processed[line_name]['line_rebin'][obs][5:-5]) + 5
wave_sel = processed[line_name]['wave'][argmin_sel]
processed[line_name]['line_shift']['selected_points'].append(wave_sel)
processed[line_name]['line_shift']['position'] = np.median(
processed[line_name]['line_shift']['selected_points'])
processed[line_name]['line_shift']['delta_lambda'] = processed[line_name]['line_shift']['position'] - line[0]
try:
if interstellar_lines['automatic'] == True:
interstellar[line_name] = processed[line_name]['line_shift']['position']
else:
interstellar[line_name] = line[0]
except:
interstellar[line_name] = line[0]
""" selection of spectral range for continuum normalization and interstellar line modelling"""
processed[line_name]['wavelength_selection'] = \
(np.abs(processed[line_name]['wave'] - interstellar[line_name]) < line[1])
processed[line_name]['continuum_selection'] = \
(~processed[line_name]['wavelength_selection']) \
& (np.abs(processed[line_name]['wave'] - interstellar[line_name]) < line[2])
processed[line_name]['interstellar_selection'] = \
(processed[line_name]['wavelength_selection'] | processed[line_name]['continuum_selection'])
spline_wave_points = []
spline_norm_points = []
# TODO
#! 1) Rescale by median each observation and collect all the value
#! 2) Perform a continuum normalization on the collect values, with
#! iterative sigma-clipping
#! 3) Perform a spline / gaussian fit of the spectral line
for obs in lists['telluric']:
# sel1 = (np.abs(processed[line_name]['wave'] - interstellar[line_name]) < line[1])
# sel2 = (~sel1) & (np.abs(processed[line_name]['wave'] - interstellar[line_name]) < line[2])
# sel3 = (sel1 | sel2)
""" normalization around the interstellar line """
processed[line_name]['poly_coeff'][obs] = \
np.polyfit(processed[line_name]['wave'][processed[line_name]['continuum_selection']],
processed[line_name]['line_rebin'][obs][processed[line_name]['continuum_selection']],
2)
processed[line_name]['normalized'][obs] = \
processed[line_name]['line_rebin'][obs][processed[line_name]['interstellar_selection']] \
/ np.polyval(processed[line_name]['poly_coeff'][obs],
processed[line_name]['wave'][processed[line_name]['interstellar_selection']])
spline_wave_points.extend(processed[line_name]['wave'][processed[line_name]['interstellar_selection']])
spline_norm_points.extend(processed[line_name]['normalized'][obs])
""" sorting the array to avoid problems with the spline function"""
spline_sorting_index = np.argsort(spline_wave_points)
spline_wave_points = np.asarray(spline_wave_points)[spline_sorting_index]
spline_norm_points = np.asarray(spline_norm_points)[spline_sorting_index]
processed[line_name]['spline_eval'], \
processed[line_name]['spline_coeff'], \
processed[line_name]['spline_knots'] = \
compute_spline(spline_wave_points, spline_norm_points, 0.08, knot_order=3)
processed[line_name]['correction'][processed[line_name]['wavelength_selection']] = \
sci_int.splev(processed[line_name]['wave'][processed[line_name]['wavelength_selection']],
processed[line_name]['spline_coeff'])
for obs in lists['observations']:
interstellar[obs]['wavelength_selection'] = \
(np.abs(interstellar[obs]['wave_BRF']-interstellar[line_name]) < line[1])
interstellar[obs]['continuum_selection'] = \
(~interstellar[obs]['wavelength_selection']) \
& (np.abs(interstellar[obs]['wave_BRF']-interstellar[line_name]) < line[2])
interstellar[obs]['interstellar_selection'] = \
(interstellar[obs]['wavelength_selection'] | interstellar[obs]['continuum_selection'])
interstellar[obs]['correction'][interstellar[obs]['wavelength_selection']] = \
sci_int.splev(interstellar[obs]['wave_BRF'][interstellar[obs]['wavelength_selection']],
processed[line_name]['spline_coeff'])
save_to_cpickle('interstellar_lines_processed', processed, config_in['output'], night)
save_to_cpickle('interstellar_lines', interstellar, config_in['output'], night)
def plot_interstellar_lines(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
interstellar_lines = from_config_get_interstellar_lines(config_in)
if not interstellar_lines:
return
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
print("plot_interstellar_lines Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('interstellar_lines_processed', config_in['output'], night)
interstellar = load_from_cpickle('interstellar_lines', config_in['output'], night)
except:
print()
print('No interstellar correction, no plots')
continue
colors_properties, colors_plot, colors_scatter = make_color_array_matplotlib3(lists, observational_pams)
# fig = plt.figure(figsize=(12, 6))
# gs = GridSpec(2, 2, width_ratios=[50, 1])
#
# ax1 = plt.subplot(gs[0, 0])
# ax2 = plt.subplot(gs[1, 0], sharex=ax1)
# cbax1 = plt.subplot(gs[:, 1])
fig, gs, cbax1, ax1, ax2 = grid_2plot()
for i, obs in enumerate(lists['observations']):
"""rescaling"""
processed[obs]['flux_rescaling'], processed[obs]['flux_rescaled'], processed[obs]['flux_rescaled_err'] = \
perform_rescaling(interstellar[obs]['wave_BRF'],
processed[obs]['flux'],
processed[obs]['flux_err'],
observational_pams['wavelength_rescaling'])
if i == 0:
ax1.scatter(interstellar[obs]['wave_BRF'], processed[obs]['flux_rescaled'],
c=colors_scatter['mBJD'][obs], s=1, alpha=0.5, label='observations (BRF)')
else:
ax1.scatter(interstellar[obs]['wave_BRF'], processed[obs]['flux_rescaled'],
c=colors_scatter['mBJD'][obs], s=1, alpha=0.5)
# ax1.plot(interstellar['wave'], interstellar['correction'], c='black')
ax2.scatter(interstellar[obs]['wave_BRF'], processed[obs]['flux_rescaled']/interstellar[obs]['correction'],
c=colors_scatter['mBJD'][obs], s=1, alpha=0.5)
for line_name, line in interstellar_lines.items():
ax1.axvline(interstellar[line_name]-line[1], c='b')
ax1.axvline(interstellar[line_name]+line[1], c='b')
ax1.axvline(interstellar[line_name]-line[2], c='g')
ax1.axvline(interstellar[line_name]+line[2], c='g')
ax2.axvline(interstellar[line_name]-line[1], c='b')
ax2.axvline(interstellar[line_name]+line[1], c='b')
ax2.axvline(interstellar[line_name]-line[2], c='g')
ax2.axvline(interstellar[line_name]+line[2], c='g')
# ax1.plot(processed[line_name]['wave'], processed[line_name]['flux_rescaled'], c='b')
try:
wave_min = min(wave_min, interstellar[line_name])
wave_max = max(wave_max, interstellar[line_name])
range_max = max(range_max, line[2])
except:
wave_min = interstellar[line_name]
wave_max = interstellar[line_name]
range_max = line[2]
ax1.set_title('Night: {0:s} \n Input spectra'.format(night))
ax1.set_xlim(wave_min-2*range_max, wave_max+2*range_max)
ax1.legend(loc=1)
ax2.set_title('After interstellar line correction')
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
# fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 14,029 | 44.700326 | 121 | py |
SLOPpy | SLOPpy-main/SLOPpy/compare_clv_rm_effects.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ['plot_compare_clv_rm_effects_planetRF',
'plot_compare_clv_rm_effects_observerRF',
'plot_compare_clv_rm_effects_stellarRF',
'plot_compare_clv_rm_effects']
def plot_compare_clv_rm_effects_planetRF(config_in, night_input=''):
plot_compare_clv_rm_effects(config_in, night_input, reference='planetRF')
def plot_compare_clv_rm_effects_observerRF(config_in, night_input=''):
plot_compare_clv_rm_effects(config_in, night_input, reference='observerRF')
def plot_compare_clv_rm_effects_stellarRF(config_in, night_input=''):
plot_compare_clv_rm_effects(config_in, night_input, reference='stellarRF')
def plot_compare_clv_rm_effects(config_in, night_input='', reference='planetRF'):
transmission_average = load_from_cpickle('transmission_average_'+reference, config_in['output'])
transmission_clv_rm_average = load_from_cpickle('transmission_clv_rm_average_'+reference, config_in['output'])
night_dict = from_config_get_nights(config_in)
fig = plt.figure(figsize=(12, 9))
gs = GridSpec(2, 1)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
ax1.errorbar(transmission_clv_rm_average['wave'],
transmission_clv_rm_average['average'],
yerr=transmission_clv_rm_average['average_err'],
fmt='ko', ms=1, zorder=10, alpha=0.10, label='average, with CLV RM')
ax1.errorbar(transmission_clv_rm_average['binned_wave'],
transmission_clv_rm_average['binned'],
yerr=transmission_clv_rm_average['binned_err'],
fmt='ko', ms=3, zorder=20, label='binned, with CLV RM')
#ax1.errorbar(transmission_average['binned_wave'],
# transmission_average['binned'],
# yerr=transmission_average['binned_err'],
# fmt='mo', ms=3, zorder=15, label='binned, no CLV RM')
ax2.errorbar(transmission_average['wave'],
transmission_average['average'],
yerr=transmission_average['average_err'],
fmt='ko', ms=1, zorder=10, alpha=0.10, label='average, no CLV RM')
ax2.errorbar(transmission_average['binned_wave'],
transmission_average['binned'],
yerr=transmission_average['binned_err'],
fmt='ko', ms=3, zorder=20, label='binned, no CLV RM')
#ax2.errorbar(transmission_clv_rm_average['binned_wave'],
# transmission_clv_rm_average['binned'],
# yerr=transmission_clv_rm_average['binned_err'],
# fmt='mo', ms=3, zorder=15, alpha=0.5, label='binned, with CLV RM')
total_night = len(night_dict)
side_step = config_in['master-out']['wavelength_step'] * config_in['master-out']['binning_factor'] / 10
for n_night, night in enumerate(night_dict):
ax1.errorbar(transmission_clv_rm_average['binned_wave'] + (n_night-total_night/2) * side_step,
transmission_clv_rm_average[night]['binned'],
yerr=transmission_clv_rm_average[night]['binned_err'],
color='C'+repr(n_night), label='{0:s} with CLV RM'.format(night),
fmt='o', ms=1, zorder=17, alpha=0.75)
ax2.errorbar(transmission_average['binned_wave'] + (n_night-total_night/2) * side_step,
transmission_average[night]['binned'],
yerr=transmission_average[night]['binned_err'],
color='C' + repr(n_night), label='{0:s} no CLV RM'.format(night),
fmt='o', ms=1, zorder=17, alpha=0.75)
#ax1.set_ylim(0.95-spec_offset*(1.+n_night), 1.05)
ax1.set_xlim(config_in['master-out']['wavelength_range'][0], config_in['master-out']['wavelength_range'][1])
ax1.set_ylim(0.985, 1.01)
ax2.set_xlabel('$\lambda$ [$\AA$]')
ax1.legend(loc=3)
ax1.set_title('Average transmission spectrum with CLV and RM correction, in {0:s}'.format(reference))
ax2.set_title('Average transmission spectrum without CLV and RM correction, in {0:s}'.format(reference))
plt.show()
| 4,395 | 45.273684 | 114 | py |
SLOPpy | SLOPpy-main/SLOPpy/spectra_lightcurve_average.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.rebin_subroutines import *
from astropy.convolution import convolve, Box1DKernel
__all__ = ['compute_spectra_lightcurve_average', 'plot_spectra_lightcurve_average',
'compute_spectra_lightcurve_average_clv_rm_correction',
'plot_spectra_lightcurve_average_clv_rm_correction']
def compute_spectra_lightcurve_average_clv_rm_correction(config_in, lines_label):
compute_spectra_lightcurve_average(config_in, lines_label)
def plot_spectra_lightcurve_average_clv_rm_correction(config_in, night_input=''):
plot_spectra_lightcurve_average(config_in, night_input)
subroutine_name = 'spectra_lightcurve_average'
pick_files = 'spectra_lightcurve'
sampler_name = 'emcee'
def compute_spectra_lightcurve_average(config_in, lines_label):
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
planet_dict = from_config_get_planet(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label] # from_config_get_transmission_lightcurve(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
results_list = ['user',
'mcmc_night_MED',
'mcmc_night_MAP',
'mcmc_global_MED',
'mcmc_global_MAP'
'user_uncorrected']
append_list = ['', '_uncorrected', '_clv_model']
for results_selection in results_list:
skip_iteration = False
try:
lightcurve_average = load_from_cpickle(subroutine_name+ '_' + results_selection, config_in['output'], lines=lines_label)
print("{0:45s} {1:s}".format(subroutine_name, 'Retrieved'))
return
except (FileNotFoundError, IOError):
print(" No average transmission lightcurve found for case:{0:s}, computing now ".format(results_selection))
print("{0:45s} {1:s}".format(subroutine_name, 'Computing'))
print()
"""
C stabds for central
"""
C_bands = {}
for passband_key, passband_val in lines_dict['passbands'].items():
C_bands[passband_key] = {}
for line_key, line_val in lines_dict['lines'].items():
C_bands[passband_key][line_key] = (np.abs(shared_data['coadd']['wave'] - line_val)*2. <passband_val)
"""
S stand for side
"""
S_bands = {}
for band_key, band_val in lines_dict['continuum'].items():
S_bands[band_key] = (shared_data['coadd']['wave'] >= band_val[0]) & (shared_data['coadd']['wave'] <= band_val[1])
if 'full_transit_duration' in planet_dict:
full_transit_duration = planet_dict['total_transit_duration'][0]
else:
full_transit_duration = planet_dict['transit_duration'][0]
if 'total_transit_duration' in planet_dict:
total_transit_duration = planet_dict['total_transit_duration'][0]
else:
total_transit_duration = planet_dict['transit_duration'][0]
transit_in_bins = np.linspace(
-total_transit_duration/2./planet_dict['period'][0],
total_transit_duration/2./planet_dict['period'][0],
6
)
transit_full_bins = np.linspace(
-full_transit_duration/2./planet_dict['period'][0],
full_transit_duration/2./planet_dict['period'][0],
6
)
transit_in_step = np.average(transit_in_bins[1:]-transit_in_bins[:-1])
transit_full_step = np.average(transit_full_bins[1:]-transit_full_bins[:-1])
lightcurve_average = {
'subroutine': subroutine_name,
'transit_in_flag': [],
'transit_full_flag': [],
'transit_out_flag': [],
'transit_in': {},
'transit_full': {},
'transit_out': {},
'observations': {
'phase': []
},
'bands_list': [],
'C_bands': C_bands,
'S_bands': S_bands,
'average': {},
'bins': {
'transit_in_bins': transit_in_bins,
'transit_in_step': transit_in_step,
'transit_full_bins': transit_full_bins,
'transit_full_step': transit_full_step
}
}
for band_key in C_bands:
for name_append in append_list:
lightcurve_average['observations']['ratio_' + band_key + name_append] = []
lightcurve_average['bands_list'].extend([band_key])
for night in night_dict:
try:
lightcurve = load_from_cpickle(pick_files + '_' + results_selection, config_in['output'], night, lines_label)
except:
print(" No night spectra lightcurve found for case:{0:s}, skipping ".format(results_selection))
skip_iteration = True
continue
#lightcurve = load_from_cpickle(pick_files, config_in['output'], night)
lightcurve_average['observations']['phase'].extend(lightcurve['arrays']['observations']['phase'].tolist())
lightcurve_average['transit_in_flag'].extend(
lightcurve['arrays']['observations']['transit_in_flag'].tolist())
lightcurve_average['transit_full_flag'].extend(
lightcurve['arrays']['observations']['transit_full_flag'].tolist())
lightcurve_average['transit_out_flag'].extend(
lightcurve['arrays']['observations']['transit_out_flag'].tolist())
for band_key in lightcurve_average['bands_list']:
for name_append in append_list:
lightcurve_average['observations']['ratio_' + band_key+ name_append].extend(
lightcurve['arrays']['observations']['ratio_' + band_key+ name_append].tolist())
if skip_iteration: continue
sorting_index = np.argsort(lightcurve_average['observations']['phase'])
lightcurve_average['observations']['phase'] = np.asarray(lightcurve_average['observations']['phase'])[sorting_index]
lightcurve_average['transit_in_flag'] = np.asarray(lightcurve_average['transit_in_flag'], dtype=np.int16)[sorting_index]
lightcurve_average['transit_full_flag'] = np.asarray(lightcurve_average['transit_full_flag'], dtype=np.int16)[sorting_index]
lightcurve_average['transit_out_flag'] = np.asarray(lightcurve_average['transit_out_flag'], dtype=np.int16)[sorting_index]
lightcurve_average['transit_in']['phase'] = \
lightcurve_average['observations']['phase'][lightcurve_average['transit_in_flag']]
lightcurve_average['transit_full']['phase'] = \
lightcurve_average['observations']['phase'][lightcurve_average['transit_full_flag']]
lightcurve_average['transit_out']['phase'] = \
lightcurve_average['observations']['phase'][lightcurve_average['transit_out_flag']]
#for band_key in lightcurve_average['bands_list']:
for band_key in C_bands:
for name_append in append_list:
lightcurve_average['observations']['ratio_' + band_key + name_append] = \
np.asarray(lightcurve_average['observations']['ratio_' + band_key + name_append])[sorting_index]
lightcurve_average['transit_in']['ratio_' + band_key + name_append] = \
lightcurve_average['observations']['ratio_' + band_key + name_append][lightcurve_average['transit_in_flag']]
lightcurve_average['transit_full']['ratio_' + band_key + name_append] = \
lightcurve_average['observations']['ratio_' + band_key + name_append][lightcurve_average['transit_full_flag']]
lightcurve_average['transit_out']['ratio_' + band_key + name_append] = \
lightcurve_average['observations']['ratio_' + band_key + name_append][lightcurve_average['transit_out_flag']]
avg_out, avg_out_sq = \
np.average(lightcurve_average['transit_out']['ratio_' + band_key + name_append][:, 0],
weights=1. / (lightcurve_average['transit_out']['ratio_' + band_key + name_append][:, 1]) ** 2,
returned=True)
avg_in, avg_in_sq = \
np.average(lightcurve_average['transit_in']['ratio_' + band_key + name_append][:, 0],
weights=1. / (lightcurve_average['transit_in']['ratio_' + band_key + name_append][:, 1]) ** 2,
returned=True)
avg_full, avg_full_sq = \
np.average(lightcurve_average['transit_full']['ratio_' + band_key + name_append][:, 0],
weights=1. / (lightcurve_average['transit_full']['ratio_' + band_key + name_append][:, 1]) ** 2,
returned=True)
avg_out = \
np.average(lightcurve_average['transit_out']['ratio_' + band_key + name_append][:, 0])
avg_in = \
np.average(lightcurve_average['transit_in']['ratio_' + band_key + name_append][:, 0])
avg_full = \
np.average(lightcurve_average['transit_full']['ratio_' + band_key + name_append][:, 0])
lightcurve_average['average'][band_key + name_append] = {
'average_out': np.asarray([avg_out, 1. / np.power(avg_out_sq, 0.5)]),
'average_in': np.asarray([avg_in, 1. / np.power(avg_in_sq, 0.5)]),
'average_full': np.asarray([avg_full, 1. / np.power(avg_full_sq, 0.5)]),
}
delta_fac = \
lightcurve_average['average'][band_key + name_append]['average_full'][0] / lightcurve_average['average'][band_key + name_append]['average_out'][0]
delta_err = delta_fac * np.sqrt(
(lightcurve_average['average'][band_key + name_append]['average_out'][1]
/ lightcurve_average['average'][band_key + name_append]['average_out'][0]) ** 2
+ (lightcurve_average['average'][band_key + name_append]['average_full'][1]
/ lightcurve_average['average'][band_key + name_append]['average_full'][0]) ** 2)
lightcurve_average['average'][band_key + name_append]['delta'] = np.asarray([(1. - delta_fac) * 100., delta_err * 100.])
pre_duration = transit_full_bins[0] - lightcurve_average['transit_out']['phase'][0]
if pre_duration > 0:
nsteps_pre = int(pre_duration / transit_full_step)
if pre_duration % transit_full_step > 0.0:
nsteps_pre += 1
else:
nsteps_pre = 0
post_duration = lightcurve_average['transit_out']['phase'][-1] - transit_full_bins[-1]
if post_duration > 0:
nsteps_post = int(post_duration / transit_full_step)
if post_duration % transit_full_step > 0.0:
nsteps_post += 1
else:
nsteps_post = 0
transit_bins = np.arange(transit_full_bins[0] - nsteps_pre * transit_full_step,
transit_full_bins[-1] + (nsteps_post + 1.1) * transit_full_step,
transit_full_step)
lightcurve_average['binned'] = {
'observations': {
'phase': np.zeros(len(transit_bins)),
},
'transit_in': {},
'transit_full': {},
'transit_out': {},
}
for band_key in C_bands:
for name_append in append_list:
lightcurve_average['binned']['observations']['ratio_' + band_key + name_append] = np.zeros([len(transit_bins), 2])
transit_out_flag = np.zeros(len(transit_bins), dtype=bool)
transit_full_flag = np.zeros(len(transit_bins), dtype=bool)
transit_in_flag = np.zeros(len(transit_bins), dtype=bool)
n_a = 0
for nb in range(0, len(transit_bins) - 1):
sel = (lightcurve_average['observations']['phase'] >= transit_bins[nb]) \
& (lightcurve_average['observations']['phase'] < transit_bins[nb + 1])
if np.sum(sel) <= 0: continue
lightcurve_average['binned']['observations']['phase'][n_a] = np.average(
lightcurve_average['observations']['phase'][sel])
for band_key in C_bands:
for name_append in append_list:
lightcurve_average['binned']['observations']['ratio_' + band_key + name_append][n_a, 0], sum_weights = np.average(
lightcurve_average['observations']['ratio_' + band_key + name_append][sel, 0],
weights=1. / lightcurve_average['observations']['ratio_' + band_key + name_append][sel, 1] ** 2,
returned=True)
lightcurve_average['binned']['observations']['ratio_' + band_key + name_append][n_a, 1] = np.sqrt(1. / sum_weights)
if np.abs(lightcurve_average['binned']['observations']['phase'][n_a]) >= \
total_transit_duration/2./planet_dict['period'][0]:
transit_out_flag[n_a] = True
elif np.abs(lightcurve_average['binned']['observations']['phase'][n_a]) >= \
full_transit_duration/2./planet_dict['period'][0]:
transit_in_flag[n_a] = True
else:
transit_full_flag[n_a] = True
n_a += 1 # bins actually computed
lightcurve_average['binned']['transit_in']['phase'] = lightcurve_average['binned']['observations']['phase'][transit_in_flag]
lightcurve_average['binned']['transit_out']['phase'] = lightcurve_average['binned']['observations']['phase'][transit_out_flag]
lightcurve_average['binned']['observations']['phase'] = lightcurve_average['binned']['observations']['phase'][:n_a]
for band_key in C_bands:
for name_append in append_list:
lightcurve_average['binned']['transit_in']['ratio_' + band_key + name_append] = \
lightcurve_average['binned']['observations']['ratio_' + band_key + name_append][transit_in_flag, :]
lightcurve_average['binned']['transit_out']['ratio_' + band_key + name_append] = \
lightcurve_average['binned']['observations']['ratio_' + band_key + name_append][transit_out_flag, :]
lightcurve_average['binned']['observations']['ratio_' + band_key + name_append] = \
lightcurve_average['binned']['observations']['ratio_' + band_key + name_append][:n_a, :]
save_to_cpickle(subroutine_name+ '_' + results_selection, lightcurve_average, config_in['output'], lines=lines_label)
def plot_spectra_lightcurve_average(config_in, night_input='', clv_rm_correction=False):
import matplotlib.pyplot as plt
if clv_rm_correction:
subroutine_name = 'spectra_lightcurve_average_clv_rm_correction'
else:
subroutine_name = 'spectra_lightcurve_average'
try:
lightcurve_average = load_from_cpickle(subroutine_name, config_in['output'])
print("{0:45s} {1:s}".format(subroutine_name, 'Plotting'))
except:
print("{0:45s} {1:s}".format(subroutine_name, 'Plot skipped'))
return
C_bands = lightcurve_average['C_bands']
print()
for band_key in C_bands:
print("Average Band: {1:s} Delta:{2:8.4f} +- {3:8.4f} [%]".format(' ', band_key,
lightcurve_average['average'][band_key][
'delta'][0],
lightcurve_average['average'][band_key][
'delta'][1]))
for band_key in C_bands:
plt.figure(figsize=(12, 6))
plt.title('Average spectra lightcurve\n {0:s}'.format(band_key))
plt.errorbar(lightcurve_average['observations']['phase'],
lightcurve_average['observations']['ratio_' + band_key][:,0]*100 -100.,
yerr= lightcurve_average['observations']['ratio_' + band_key][:,1]*100 ,
fmt='.', c='k', alpha=0.25, label='observations')
plt.errorbar(lightcurve_average['binned']['observations']['phase'],
lightcurve_average['binned']['observations']['ratio_' + band_key][:, 0]*100 -100.,
yerr= lightcurve_average['binned']['observations']['ratio_' + band_key][:,1]*100 ,
fmt='.', c='k', alpha=1.0, label='observations')
plt.axvspan(-1, lightcurve_average['bins']['transit_in_bins'][0], alpha=0.25, color='green')
plt.axvspan(lightcurve_average['bins']['transit_in_bins'][-1], 1., alpha=0.25, color='green')
plt.axhline(0, c='C1')
plt.xlim(lightcurve_average['observations']['phase'][0]-0.01,
lightcurve_average['observations']['phase'][-1]+0.01)
plt.xlabel('orbital phase')
plt.ylabel('$\mathcal{R}$ - 1.')
plt.legend()
plt.show()
| 17,778 | 49.652422 | 166 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_airmass_stellarRF.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_telluric_airmass_stellarRF",
"plot_telluric_airmass_stellarRF",
"compute_telluric_airmass_reference_stellarRF",
"plot_telluric_airmass_reference_stellarRF"]
subroutine_name = 'telluric_airmass_stellarRF'
def compute_telluric_airmass_stellarRF(config_in):
compute_telluric_stellarRF(config_in,
use_reference_airmass=False,
subroutine_name='telluric_airmass_stellarRF')
def compute_telluric_airmass_reference_stellarRF(config_in):
compute_telluric_stellarRF(config_in,
use_reference_airmass=True,
subroutine_name='telluric_airmass_reference_stellarRF')
def plot_telluric_airmass_reference_stellarRF(config_in, night_input):
""" Alias to simplify the configuration file"""
plot_telluric_airmass_stellarRF(config_in, night_input)
def compute_telluric_stellarRF(config_in, **kwargs):
night_dict = from_config_get_nights(config_in)
for night in night_dict:
print()
print("compute_telluric_airmass_stellarRF Night: ", night)
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
continue
except:
print()
print(" No telluric correction file found, computing now ")
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': subroutine_name
}
telluric = {
'subroutine': kwargs['subroutine_name'],
'reference_frame': 'stellar'
}
""" Reference airmass for iterative correction of airmass"""
if kwargs['use_reference_airmass']:
airmass_temp = np.zeros(lists['n_transit_in'])
for n_obs, obs in enumerate(lists['transit_in']):
# This is to ensure that airmass, berv and rvc are associated to the correct spectra
airmass_temp[n_obs] = input_data[obs]['AIRMASS']
processed['airmass_ref'] = np.average(airmass_temp)
else:
processed['airmass_ref'] = 0.000
# There must be a more elegant way to do this, but I'm, not aware of it
for obs in lists['observations']:
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
telluric[obs] = {}
""" for plotting purpose only"""
processed[obs]['wave'] = input_data[obs]['wave']
processed[obs]['e2ds'] = input_data[obs]['e2ds']
processed[obs]['e2ds_err'] = input_data[obs]['e2ds_err']
processed[obs]['flux'] = input_data[obs]['e2ds']/calib_data['blaze']/input_data[obs]['step']
processed[obs]['flux_err'] = np.sqrt(input_data[obs]['e2ds'])/calib_data['blaze']/input_data[obs]['step']
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['flux_SRF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds'],
calib_data['blaze'],
input_data['coadd']['wave'],
input_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF_mod'])
processed[obs]['flux_SRF_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds_err'],
calib_data['blaze'],
input_data['coadd']['wave'],
input_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF_mod'],
is_error=True)
""" Zero or negative values are identified, flagged and substituted with another value """
processed[obs]['flux_SRF'], processed[obs]['flux_SRF_err'], processed[obs]['null'] = \
replace_values_errors(processed[obs]['flux_SRF'], processed[obs]['flux_SRF_err'], 0.0001)
"""rescaling"""
processed[obs]['flux_SRF_rescaling'], processed[obs]['flux_SRF_rescaled'], processed[obs]['flux_SRF_rescaled_err'] = \
perform_rescaling(input_data['coadd']['wave'],
processed[obs]['flux_SRF'],
processed[obs]['flux_SRF_err'],
observational_pams['wavelength_rescaling'])
processed[obs]['logI'] = np.log(processed[obs]['flux_SRF_rescaled'])
processed[obs]['logI_err'] = processed[obs]['flux_SRF_rescaled_err'] / processed[obs]['flux_SRF_rescaled']
processed['telluric'] = {}
n_coadd = np.size(input_data['coadd']['wave'])
abs_slope = np.ones(n_coadd, dtype=np.double)
line_shift = np.ones(n_coadd, dtype=np.double)
zero_point = np.ones(n_coadd, dtype=np.double)
pearson_r = np.zeros(n_coadd, dtype=np.double)
pearson_p = np.zeros(n_coadd, dtype=np.double)
airmass = np.zeros(lists['n_tellurics'], dtype=np.double)
berv = np.zeros(lists['n_tellurics'], dtype=np.double)
rvc = np.zeros(lists['n_tellurics'], dtype=np.double)
for n_obs, obs in enumerate(lists['telluric']):
# This is to ensure that airmass, berv and rvc are associated to the correct spectra
processed['telluric'][obs] = {'n_obs': n_obs}
airmass[n_obs] = observational_pams[obs]['AIRMASS']
berv[n_obs] = observational_pams[obs]['BERV']
rvc[n_obs] = observational_pams[obs]['RVC']
logi_array = np.empty([lists['n_tellurics'], n_coadd], dtype=np.double)
sigi_array = np.empty([lists['n_tellurics'], n_coadd], dtype=np.double)
for obs in lists['telluric']:
n_obs = processed['telluric'][obs]['n_obs']
logi_array[n_obs, :] = processed[obs]['logI'][:]
sigi_array[n_obs, :] = processed[obs]['logI_err'][:]
""" The user has the option to select between different approaches to
extract the telluric absorption spectrum
To-Do: move this section to a subroutine for cythonization"""
if observational_pams['linear_fit_method'] == 'linear_curve_fit':
abs_slope, zero_point = \
airmass_linear_curve_fit(airmass, logi_array, sigi_array, n_coadd)
else:
abs_slope, zero_point = \
airmass_linear_lstsq(airmass, logi_array)
telluric['stellarRF'] = {
'wave': input_data['coadd']['wave'],
'step': input_data['coadd']['step']
}
telluric['stellarRF']['spectrum'] = np.exp(abs_slope)
telluric['stellarRF']['emission'] = (telluric['stellarRF']['spectrum'] > 1.00000)
telluric['stellarRF']['spectrum_fixed'] = telluric['stellarRF']['spectrum'][:]
telluric['stellarRF']['spectrum_fixed'][telluric['stellarRF']['emission']]= 1.000
telluric['stellarRF']['spline_eval'], \
telluric['stellarRF']['spline_coeff'], \
telluric['stellarRF']['spline_knots'] = \
compute_spline(input_data['coadd']['wave'],
telluric['stellarRF']['spectrum_fixed'],
0.05)
telluric['airmass_ref'] = processed['airmass_ref']
""" Moving the spline to the observerRF in the e2ds"""
for obs in lists['observations']:
""" 1) shifting the telluric correction spline to the observer RV"""
wave_ORF = shift_wavelength_array(np.asarray(telluric['stellarRF']['spline_coeff'][0]),
-observational_pams[obs]['rv_shift_ORF2SRF_mod'])
""" 2) new spline coefficients """
tck1 = [shift_wavelength_array(np.asarray(telluric['stellarRF']['spline_coeff'][0]),
- observational_pams[obs]['rv_shift_ORF2SRF_mod']),
telluric['stellarRF']['spline_coeff'][1],
telluric['stellarRF']['spline_coeff'][2]]
""" 3) computation of the spline at the location of the spectra, taking care of the regions
out of the coadded spectrum """
inside_spline = (input_data[obs]['wave'] > wave_ORF[0]) & (input_data[obs]['wave'] < wave_ORF[-1])
telluric[obs]['spline_noairmass'] = np.ones([input_data[obs]['n_orders'],
input_data[obs]['n_pixels']],
dtype=np.double)
for order in range(0, input_data[obs]['n_orders']):
if np.sum(inside_spline[order, :])>0 :
telluric[obs]['spline_noairmass'][order, inside_spline[order, :]] = \
sci_int.splev(input_data[obs]['wave'][order, inside_spline[order, :]], tck1)
telluric[obs]['spline'] = np.power(telluric[obs]['spline_noairmass'],
observational_pams[obs]['AIRMASS'] - processed['airmass_ref'])
""" Now a similar approach is followed for the telluric spectrum before spline fit
"""
telluric[obs]['spectrum_noairmass'] = \
rebin_1d_to_2d(input_data['coadd']['wave'],
input_data['coadd']['step'],
telluric['stellarRF']['spectrum'],
input_data[obs]['wave'],
input_data[obs]['step'],
rv_shift=-observational_pams[obs]['rv_shift_ORF2SRF_mod'],
preserve_flux=False)
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][ telluric[obs]['null']] = 1.0
telluric[obs]['spectrum'] = np.power(telluric[obs]['spectrum_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
telluric[obs]['airmass_ref'] = processed['airmass_ref']
telluric[obs]['rv_shift_ORF2SRF_mod'] = observational_pams[obs]['rv_shift_ORF2SRF_mod']
save_to_cpickle('telluric_processed', processed, config_in['output'], night)
save_to_cpickle('telluric', telluric, config_in['output'], night)
print()
print("Night ", night, " completed")
def plot_telluric_airmass_stellarRF(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
print("plot_telluric_airmass_stellarRF Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('telluric_processed', config_in['output'], night)
telluric = load_from_cpickle('telluric', config_in['output'], night)
except:
print()
print("No telluric correction, no plots")
continue
colors, cmap, line_colors = make_color_array(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
cbax1 = plt.subplot(gs[:, 1])
lift_spectrum = 0.25
for i, obs in enumerate(lists['observations']):
color_array = cmap(i / len(lists['observations']))
_, e2ds_rescaled , _ = \
perform_rescaling(processed[obs]['wave'],
processed[obs]['e2ds'],
processed[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
e2ds_rescaled_corrected_spectrum = e2ds_rescaled / telluric[obs]['spectrum']
e2ds_rescaled_corrected_spline = e2ds_rescaled / telluric[obs]['spline']
for order in range(0, processed[obs]['n_orders']):
if order == 0 and i==0:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5, label='uncorrected')
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array), label='corrected')
else:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5)
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array))
#ax1.plot(processed[obs]['wave'][order, :],
# e2ds_rescaled[order, :]+lift_spectrum,
# c=color_array, lw=1, alpha=0.5)
#ax1.scatter(processed[obs]['wave'][order, :],
# e2ds_rescaled_corrected_spline[order, :]+lift_spectrum,
# s=1, c=np.atleast_2d(color_array))
ax2.plot(processed[obs]['wave'][order, :],
telluric[obs]['spectrum'][order, :],
c=color_array)
ax2.axhline(1.00, c='k')
#ax2.plot(processed[obs]['wave'][order, :],
# telluric[obs]['spline'][order, :]+lift_spectrum,
# c=color_array)
#ax2.axhline(1.00+lift_spectrum, c='k')
#ax2.plot(input_data['coadd']['wave'],telluric['stellarRF']['spline_eval']+0.1,c='k')
#ax2.scatter(input_data['coadd']['wave'],telluric['stellarRF']['spectrum']+0.1,c='r', s=2)
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 16,023 | 43.885154 | 131 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_airmass_observerRF_chunks.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_telluric_airmass_observerRF_chunks"]
def compute_telluric_airmass_observerRF_chunks(config_in):
compute_telluric_observerRF_chunks(config_in,
n_iterations=1,
use_berv=False,
use_reference_airmass=False,
subroutine_name='compute_telluric_airmass_observerRF_chunks')
def compute_telluric_observerRF_chunks(config_in, **kwargs):
night_dict = from_config_get_nights(config_in)
for night in night_dict:
print()
print("compute_telluric_airmass_observerRF_chunks Night: ", night)
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
continue
except:
print("No telluric correction file found, computing now ")
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': kwargs['subroutine_name'],
'n_orders': 0,
'n_pixels': 0
}
telluric = {
'subroutine': kwargs['subroutine_name'],
'reference_frame': 'observer'
}
# There must be a more elegant way to do this, but I'm, not aware of it
for obs in lists['observations']:
processed[obs] = {}
processed[obs]['e2ds_rescaling'], processed[obs]['e2ds_rescaled'], processed[obs]['e2ds_rescaled_err'] = \
perform_rescaling(input_data[obs]['wave'],
input_data[obs]['e2ds'],
input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
if processed['n_orders'] == 0:
processed['n_orders'] = input_data[obs]['orders']
processed['n_pixels'] = input_data[obs]['wave_size']
""" Reference airmass for iterative correction of airmass"""
if kwargs['use_reference_airmass']:
airmass_temp = np.zeros(lists['n_transit_in'])
for n_obs, obs in enumerate(lists['transit_in']):
# This is to ensure that airmass, berv and rvc are associated to the correct spectra
airmass_temp[n_obs] = input_data[obs]['AIRMASS']
processed['airmass_ref'] = np.average(airmass_temp)
else:
processed['airmass_ref'] = 0.000
for obs in lists['observations']:
processed[obs]['e2ds_precorrected'] = processed[obs]['e2ds_rescaled'][:]
processed[obs]['e2ds_precorrected_err'] = input_data[obs]['e2ds_err'] / processed[obs]['e2ds_rescaling']
""" for plotting purpose only"""
processed[obs]['wave'] = input_data[obs]['wave']
processed[obs]['e2ds'] = input_data[obs]['e2ds']
processed[obs]['e2ds_err'] = input_data[obs]['e2ds_err']
for niter in xrange(0, kwargs['n_iterations']):
print("NITER: ", niter)
for obs in lists['telluric']:
processed[obs]['logI'] = np.log(processed[obs]['e2ds_precorrected'])
processed[obs]['logI_err'] = processed[obs]['e2ds_precorrected_err']/processed[obs]['e2ds_precorrected']
processed['telluric'] = {}
abs_slope = np.ones([processed['n_orders'], processed['n_pixels']], dtype=np.double)
line_shift = np.ones([processed['n_orders'], processed['n_pixels']], dtype=np.double)
zero_point = np.ones([processed['n_orders'], processed['n_pixels']], dtype=np.double)
pearson_r = np.zeros([processed['n_orders'], processed['n_pixels']], dtype=np.double)
pearson_p = np.zeros([processed['n_orders'], processed['n_pixels']], dtype=np.double)
airmass = np.zeros(lists['n_tellurics'], dtype=np.double)
berv = np.zeros(lists['n_tellurics'], dtype=np.double)
rvc = np.zeros(lists['n_tellurics'], dtype=np.double)
for n_obs, obs in enumerate(lists['telluric']):
# This is to ensure that airmass, berv and rvc are associated to the correct spectra
processed['telluric'][obs] = {'n_obs': n_obs}
airmass[n_obs] = input_data[obs]['AIRMASS']
berv[n_obs] = input_data[obs]['BERV']
rvc[n_obs] = input_data[obs]['RVC']
for order in xrange(0, processed['n_orders']):
print(" - order ", repr(order))
logi_array = np.empty([lists['n_tellurics'], processed['n_pixels']], dtype=np.double)
sigi_array = np.empty([lists['n_tellurics'], processed['n_pixels']], dtype=np.double)
for obs in lists['telluric']:
n_obs = processed['telluric'][obs]['n_obs']
logi_array[n_obs, :] = processed[obs]['logI'][order, :]
sigi_array[n_obs, :] = processed[obs]['logI_err'][order, :]
""" The user has the option to select between different approaches to
extract the telluric absorption spectrum
To-Do: move this section to a subroutine for cythonization"""
if kwargs['use_berv']:
if observational_pams['linear_fit_method'] == 'linear_curve_fit':
abs_slope[order, :], line_shift[order, :], zero_point[order, :] = \
berv_linear_curve_fit_modified(airmass, berv, logi_array, sigi_array, processed['n_pixels'])
else:
abs_slope[order, :], line_shift[order, :], zero_point[order, :] = \
berv_linear_lstsq(airmass, berv, logi_array)
else:
if observational_pams['linear_fit_method']== 'linear_curve_fit':
abs_slope[order, :], zero_point[order, :] = \
airmass_linear_curve_fit(airmass, logi_array, sigi_array, processed['n_pixels'])
else:
abs_slope[order, :], zero_point[order, :] = \
airmass_linear_lstsq(airmass, logi_array)
""" Saving the outcome to dictionary """
processed['telluric']['order_'+repr(order)] = {'logi_array': logi_array, 'sigi_array': sigi_array}
processed['telluric']['spectrum_noairmass'] = np.exp(abs_slope)
for obs in lists['observations']:
""" Correction of telluric lines for the average airmass value, following Wyttenbach et al. 2015 """
processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_precorrected'] / \
np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] -
processed['airmass_ref'])
processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_precorrected_err'] / \
np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] -
processed['airmass_ref'])
for obs in lists['observations']:
# Correction of telluric lines
telluric[obs] = {}
telluric[obs]['spectrum_noairmass'] = processed['telluric']['spectrum_noairmass']
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
telluric[obs]['airmass_ref'] = processed['airmass_ref']
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][ telluric[obs]['null']] = 1.0
telluric[obs]['spectrum'] = np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
telluric[obs]['spline_noairmass'] = np.ones([input_data[obs]['n_orders'],
input_data[obs]['n_pixels']],
dtype=np.double)
for order in xrange(0, processed['n_orders']):
telluric[obs]['spline_noairmass'][order, :], _, _ = \
compute_spline(input_data[obs]['wave'][order, :],
telluric[obs]['spectrum_noairmass'][order, :],
0.05)
telluric[obs]['spline'] = np.power(telluric[obs]['spline_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
telluric[obs]['airmass_ref'] = processed['airmass_ref']
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][ telluric[obs]['null']] = 1.0
telluric[obs]['telluric_corrected'] = processed[obs]['e2ds_corrected']
telluric[obs]['telluric_corrected_err'] = processed[obs]['e2ds_corrected_err']
save_to_cpickle('telluric', telluric, config_in['output'], night)
save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
| 10,263 | 48.346154 | 120 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_molecfit_coadd.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.telluric_molecfit_preparation import compute_telluric_molecfit_preparation
__all__ = ["compute_telluric_molecfit_coadd",
"plot_telluric_molecfit_coadd"]
subroutine_name = 'telluric_molecfit_coadd'
def compute_telluric_molecfit_coadd(config_in):
"""
Lazy workaround
:param config_in:
:param kwargs:
:return:
"""
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
molecfit_dict = from_config_get_molecfit(config_in)
compute_telluric_molecfit_preparation(config_in)
aer_version = molecfit_dict.get('aer_version', '3.8')
for night in night_dict:
instrument_name = night_dict[night]['instrument']
template_dict = instrument_dict[instrument_name]['telluric_template']
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
print(' instrument :', instrument_name)
print()
tellprep = load_from_cpickle('telluric_molecfit_preparation', config_in['output'], night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': 'telluric_molecfit',
'n_orders': 0,
'n_pixels': 0,
}
telluric = {
'subroutine': 'telluric_molecfit',
'reference_frame': 'observer'
}
processed['airmass_ref'] = 0.000
processed['telluric'] = {}
processed['rebin'] = {}
processed['work_dir'] = tellprep['work_dir']
"""
Molecfit works on pixel grid, so we must ensure that the spectra are rebinned always on the same wavelength
scale and same wavelength step. We use local arrays for this purpose
"""
processed['rebin']['wave'] = np.arange(input_data['coadd']['wavelength_range'][0],
input_data['coadd']['wavelength_range'][1],
molecfit_dict['rebinning_step'],
dtype=np.double)
processed['rebin']['size'] = np.size(processed['rebin']['wave'])
processed['rebin']['step'] = np.ones(processed['rebin']['size'],
dtype=np.double) * molecfit_dict['rebinning_step']
processed['rebin'] = {
'wave': input_data['coadd']['wave'],
'size': input_data['coadd']['size'],
'step': input_data['coadd']['step'],
}
# TODO: fix the wave:include files
wave_include = '"'
for wli_s, wli_e in zip(tellprep['include']['vacuum'][:, 0], tellprep['include']['vacuum'][:, 1]):
wave_include = wave_include+str(wli_s)+','+str(wli_e)+','
wave_include = wave_include[:-1]+'"'
n_coadd = 0
n_reference = 0
texp_cumulated = 0.00
texp_total = 0.000
coadd_list = []
# Computing the total integration time
for n_obs, obs in enumerate(lists['observations']):
texp_total += input_data[obs]['EXPTIME']
print(' Writing data and configuration files for molecfit+calctrans')
print()
# There must be a more elegant way to do this, but I'm, not aware of it
for n_obs, obs in enumerate(lists['observations']):
input_data[obs]['molecfit']['aer_version'] = aer_version
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
""" e2ds spectra are rescaled and then rebinned while keeping them in the Observer Reference Frame"""
processed[obs]['e2ds_rescaling'], processed[obs]['e2ds_rescaled'], processed[obs]['e2ds_rescaled_err'] = \
perform_rescaling(input_data[obs]['wave'],
input_data[obs]['e2ds'],
input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['rebin_ORF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
processed[obs]['e2ds_rescaled'],
calib_data['blaze'],
processed['rebin']['wave'],
processed['rebin']['step'],
preserve_flux=preserve_flux,
rv_shift=0.00)
""" This part is relative to the coadded spectrum, must be placed here because
some variables such as direcotry names must be defined before the next step
spectra are coadded to increase the SNR of the spectrum analyzed by molecfit
"""
if n_coadd == 0:
reference_name = 'coadded_{0:03d}'.format(n_reference)
reference_dirname = './' + processed['work_dir'] + '/' + reference_name + '/'
os.system('mkdir -p ' + reference_dirname)
rebin_coadd = processed[obs]['rebin_ORF'].copy()
molecfit_pams = {
'MJD': input_data[obs]['MJD'],
'UTC': input_data[obs]['UTC'],
'ELEVATION': input_data[obs]['ELEVATION'],
'HUMIDITY': input_data[obs]['HUMIDITY'],
'PRESSURE': input_data[obs]['PRESSURE'],
'TEMPERATURE_EN': input_data[obs]['TEMPERATURE_EN'],
'TEMPERATURE_M1': input_data[obs]['TEMPERATURE_M1']}
coadded_files = open(reference_dirname + reference_name + '_files.list', 'w')
coadd_list.append(reference_name)
observations_dirlist = []
observations_exelist = []
else:
rebin_coadd += processed[obs]['rebin_ORF']
molecfit_pams['MJD'] += input_data[obs]['MJD']
molecfit_pams['UTC'] += input_data[obs]['UTC']
molecfit_pams['ELEVATION'] += input_data[obs]['ELEVATION']
molecfit_pams['HUMIDITY'] += input_data[obs]['HUMIDITY']
molecfit_pams['PRESSURE'] += input_data[obs]['PRESSURE']
molecfit_pams['TEMPERATURE_EN'] += input_data[obs]['TEMPERATURE_EN']
molecfit_pams['TEMPERATURE_M1'] += input_data[obs]['TEMPERATURE_M1']
n_coadd += 1
coadded_files.write(obs + '\n')
texp_cumulated += input_data[obs]['EXPTIME']
# """ Molecfit analysis is skipped if the telluric correction has been computed already"""
# if os.path.isfile('./molecfit_'+night +'/output/'+obs+'_ORF_s1d_TAC.dat'):
# print(' molecfit+calctrans results for ' + obs + ' already available')
# continue
"""
This is the directory for MOLECFIT_CALCTRANS and MOLECFIT_CORRECT,
which is different from the one where the coadded spectrum is saved
"""
observation_dirname = './' + processed['work_dir'] + '/' + 'obs_{0:03d}'.format(n_obs) + '/'
os.system('mkdir -p ' + observation_dirname)
""" the spectrum is saved as a BinTable Fits file in a format suitable for molecfit
this is the spectrum for MOLECFIT_CALCTRANS and MOLECFIT_CORRECT, so it is saved inside
the folder with the observation name
"""
observation_name = obs
observation_tabname = obs + '_ORF_s1d.fits'
write_molecfit_input_spectrum(processed['rebin']['wave'],
processed[obs]['rebin_ORF'],
observation_dirname + observation_tabname)
observation_calctrans_parname = observation_name + '_calctrans.rc'
write_calctrans_par(observation_dirname + observation_calctrans_parname)
""" Writing the SOF files for MOLECFIT_CALCTRANS and MOLECFIT_CORRECT
For the observed spectrum
"""
observation_calctrans_sofname = obs + '_calctrans.sof'
observation_calctrans_soffile = open(observation_dirname + observation_calctrans_sofname, 'w')
observation_calctrans_soffile.write(observation_tabname+' SCIENCE\n')
observation_calctrans_soffile.write('../' + reference_name + '/MODEL_MOLECULES.fits MODEL_MOLECULES\n')
observation_calctrans_soffile.write('../' + reference_name + '/ATM_PARAMETERS.fits ATM_PARAMETERS\n')
observation_calctrans_soffile.write(
'../' + reference_name + '/BEST_FIT_PARAMETERS.fits BEST_FIT_PARAMETERS\n')
observation_calctrans_soffile.close()
""" Writing the bash script to execute MOLECFIT_CALCTRANS in the directory containing the science fits
"""
bash_file = './' + processed['work_dir'] + '/calctrans_exec_' + obs + '.source'
bash_script = open(bash_file, 'w')
bash_script.write('#!/bin/bash \n')
bash_script.write('export TMPDIR=$PWD\n')
bash_script.write('echo " " executing calctrans on ' + obs + ' \n')
bash_script.write('cd ' + observation_dirname + ' \n')
bash_script.write(molecfit_dict['esorex_exec'] + ' --recipe-config=' + observation_calctrans_parname
+ ' molecfit_calctrans ' + observation_calctrans_sofname + '> ' + obs + '_calctrans.log\n')
bash_script.write('cd $TMPDIR \n')
bash_script.close()
observations_dirlist.append(observation_dirname)
observations_exelist.append(bash_file)
processed[obs]['dir_name'] = observation_dirname
processed[obs]['tab_name'] = observation_tabname
if (texp_cumulated >= molecfit_dict['exptime_coadd'] and
texp_total-texp_cumulated >= molecfit_dict['exptime_coadd']) \
or n_obs == len(lists['observations'])-1:
coadded_files.close()
print(' Coadded spectrum: ', n_reference)
if os.path.exists(reference_dirname + 'TELLURIC_CORR.fits'):
print(' molecfit for ' + reference_name + ' previously completed')
print()
else:
rebin_coadd /= n_coadd
""" the spectra is saved as an ASCII file in a format suitable for molecfit """
reference_tabname = reference_name + '_ORF_s1d.fits'
write_molecfit_input_spectrum(processed['rebin']['wave'],
rebin_coadd,
reference_dirname + reference_tabname)
""" Average of the observational parameters """
for key in molecfit_pams:
molecfit_pams[key] /= n_coadd
molecfit_pams['GEOELEV'] = input_data[obs]['GEOELEV']
molecfit_pams['GEOLONG'] = input_data[obs]['GEOLONG']
molecfit_pams['GEOLAT'] = input_data[obs]['GEOLAT']
reference_molecfit_parname = reference_name + '_molecfit.rc'
write_molecfit_par(reference_dirname + reference_molecfit_parname,
wave_include,
input_data[obs]['molecfit'],
molecfit_pams)
reference_calctrans_parname = reference_name + '_calctrans.rc'
write_calctrans_par(reference_dirname + reference_calctrans_parname)
reference_molecfit_sofname = reference_name + '_molecfit.sof'
reference_molecfit_soffile = open(reference_dirname + reference_molecfit_sofname, 'w')
reference_molecfit_soffile.write(reference_tabname + ' SCIENCE\n')
reference_molecfit_soffile.close()
""" Writing the SOF files for MOLECFIT_CALCTRANS and MOLECFIT_CORRECT
For the observed spectrum
"""
reference_calctrans_sofname = obs + '_calctrans.sof'
reference_calctrans_soffile = open(reference_dirname + reference_calctrans_sofname, 'w')
reference_calctrans_soffile.write(reference_tabname+' SCIENCE\n')
reference_calctrans_soffile.write('MODEL_MOLECULES.fits MODEL_MOLECULES\n')
reference_calctrans_soffile.write('ATM_PARAMETERS.fits ATM_PARAMETERS\n')
reference_calctrans_soffile.write('BEST_FIT_PARAMETERS.fits BEST_FIT_PARAMETERS\n')
reference_calctrans_soffile.close()
""" Writing the bash script to execute MOLECFIT_MODEL and MOLECFIT_CALCTRANS in the directory containing the coadded fits
"""
bash_file = './' + processed['work_dir'] + '/molecfit_exec_' + reference_name + '.source'
bash_script = open(bash_file, 'w')
bash_script.write('#!/bin/bash \n')
bash_script.write('export TMPDIR=$PWD\n')
bash_script.write('echo " " executing molecfit on ' + reference_name + ' \n')
bash_script.write('cd ' + reference_dirname + ' \n')
bash_script.write(molecfit_dict['esorex_exec'] + ' --recipe-config=' + reference_molecfit_parname
+ ' molecfit_model ' + reference_molecfit_sofname + '> ' + obs + '_molecfit.log\n')
bash_script.write(molecfit_dict['esorex_exec'] + ' --recipe-config=' + reference_calctrans_parname
+ ' molecfit_calctrans ' + reference_calctrans_sofname + '> ' + obs + '_calctrans.log\n')
bash_script.write('cd $TMPDIR \n')
bash_script.close()
os.system('. ' + bash_file)
for dirname, exename in zip(observations_dirlist, observations_exelist):
if os.path.exists(dirname + 'TELLURIC_CORR.fits'):
print(' molecfit for ' + dirname + ' previously completed')
print()
else:
os.system('. ' + exename)
n_coadd = 0
n_reference += 1
texp_total -= texp_cumulated
texp_cumulated = 0.0
print()
for n_obs, obs in enumerate(lists['observations']):
telluric[obs] = {}
observation_dirname = processed[obs]['dir_name']
print(' Telluric correction for ', obs, 'retrieved from ', observation_dirname + 'TELLURIC_CORR.fits')
""" Loading the telluric spectrum from the output directory of molecfit """
corr_fits = fits.open(observation_dirname + 'TELLURIC_CORR.fits')
# orig_fits = fits.open(observation_dirname + observation_tabname)
telluric_molecfit = corr_fits[1].data
""" rebinning onto the e2ds wave scale"""
if molecfit_dict.get('fix_telluric', True):
print(' fix_telluric applied - temporary workaround for line at 5885.97 A [ORF]')
line_boundaries = [5885.74, 5886.21]
sel = (processed['rebin']['wave'] > line_boundaries[0]) \
& (processed['rebin']['wave'] < line_boundaries[1])
tell_cont = np.amax(telluric_molecfit[sel])
telluric_molecfit[sel] = (telluric_molecfit[sel] - tell_cont) / 2.0 + tell_cont
telluric[obs]['spectrum'] = \
rebin_1d_to_2d(processed['rebin']['wave'],
processed['rebin']['step'],
telluric_molecfit,
input_data[obs]['wave'],
input_data[obs]['step'],
preserve_flux=False)
try:
telluric[obs]['spectrum'] = np.nan_to_num(nan=1.0, posinf=1.0, neginf=1.0)
except:
temp = ~(np.isfinite(telluric[obs]['spectrum']))
telluric[obs]['spectrum'][temp] = 1.0
sel = telluric[obs]['spectrum'] < 0.0001
telluric[obs]['spectrum'][sel] = 1.0
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
" for compatibilty to some plots, even if it doesn't make any sense"
telluric[obs]['airmass_ref'] = 0.000
telluric[obs]['spectrum_noairmass'] = np.power(telluric[obs]['spectrum'],
telluric[obs]['airmass_ref'] - input_data[obs]['AIRMASS'])
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][telluric[obs]['null']] = 1.0
# we just copy the spectrum file, it's it's a model itself
telluric[obs]['spline'] = telluric[obs]['spectrum'].copy()
processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_rescaled'] / telluric[obs]['spectrum']
processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_rescaled_err'] / telluric[obs]['spectrum']
save_to_cpickle('telluric', telluric, config_in['output'], night)
save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
def plot_telluric_molecfit_coadd(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
# plt.scatter(rescaling_array, computed_std, c='C0', zorder=1)
# plt.scatter(sel_factor, sel_stdev, c='C1', zorder=2)
# plt.plot(rescaling_array, np.polyval(coeff, rescaling_array))
# plt.plot(rescaling_array, 2*rescaling_array*coeff[0] + coeff[1] )
# plt.plot()
print("plot_telluric_molecfit_coadd Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('telluric_processed', config_in['output'], night)
telluric = load_from_cpickle('telluric', config_in['output'], night)
except:
print()
print("No telluric correction, no plots")
continue
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
colors, cmap, line_colors = make_color_array(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
cbax1 = plt.subplot(gs[:, 1])
lift_spectrum = 0.25
for i, obs in enumerate(lists['observations']):
color_array = cmap(i / len(lists['observations']))
for order in range(0, processed[obs]['n_orders']):
if order == 0 and i == 0:
ax1.plot(input_data[obs]['wave'][order, :],
processed[obs]['e2ds_rescaled'][order, :],
c=color_array, lw=1, alpha=0.5, label='uncorrected')
ax1.scatter(input_data[obs]['wave'][order, :],
processed[obs]['e2ds_corrected'][order, :],
s=1, c=np.atleast_2d(color_array), label='corrected')
else:
ax1.plot(input_data[obs]['wave'][order, :],
processed[obs]['e2ds_rescaled'][order, :],
c=color_array, lw=1, alpha=0.5)
ax1.scatter(input_data[obs]['wave'][order, :],
processed[obs]['e2ds_corrected'][order, :],
s=1, c=np.atleast_2d(color_array))
# ax1.plot(processed[obs]['wave'][order, :],
# e2ds_rescaled[order, :]+lift_spectrum,
# c=color_array, lw=1, alpha=0.5)
# ax1.scatter(processed[obs]['wave'][order, :],
# e2ds_rescaled_corrected_spline[order, :]+lift_spectrum,
# s=1, c=np.atleast_2d(color_array))
ax2.plot(input_data[obs]['wave'][order, :],
telluric[obs]['spectrum'][order, :],
c=color_array)
ax2.axhline(1.00, c='k')
# ax2.plot(processed[obs]['wave'][order, :],
# telluric[obs]['spline'][order, :]+lift_spectrum,
# c=color_array)
# ax2.axhline(1.00+lift_spectrum, c='k')
# ax2.plot(input_data['coadd']['wave'],telluric['stellarRF']['spline_eval']+0.1,c='k')
# ax2.scatter(input_data['coadd']['wave'],telluric['stellarRF']['spectrum']+0.1,c='r', s=2)
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
try:
instrument = night_dict[night]['instrument']
comparison_file = config_in['instruments'][instrument]['telluric_comparison']
comparison_data = np.genfromtxt(comparison_file, skip_header=1)
if comparison_data[0, 0] < 1000.0:
nm2Ang = 10.
else:
nm2Ang = 1.
ax1.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
ax2.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
except:
pass
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 23,722 | 45.976238 | 141 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_observerRF_skycalc.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.eso_skycalc_cli import get_eso_sckycalc_harps
__all__ = ["compute_telluric_observerRF_skycalc", "plot_telluric_observerRF_skycalc"]
def compute_telluric_observerRF_skycalc(config_in):
night_dict = from_config_get_nights(config_in)
for night in night_dict:
print()
print("compute_telluric_airmass_observerRF Night: ", night)
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
continue
except:
print("No telluric correction file found, computing now ")
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': 'telluric_observerRF_skycalc',
'n_orders': 0,
'n_pixels': 0
}
telluric = {
'subroutine': 'telluric_observerRF_skycalc',
'reference_frame': 'observer'
}
# There must be a more elegant way to do this, but I'm, not aware of it
for obs in lists['observations']:
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
""" for plotting purpose only"""
processed[obs]['wave'] = input_data[obs]['wave']
processed[obs]['e2ds'] = input_data[obs]['e2ds']
processed[obs]['e2ds_err'] = input_data[obs]['e2ds_err']
processed[obs]['e2ds_rescaling'], processed[obs]['e2ds_rescaled'], processed[obs]['e2ds_rescaled_err'] = \
perform_rescaling(input_data[obs]['wave'],
input_data[obs]['e2ds'],
input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
if processed['n_orders'] == 0:
processed['n_orders'] = input_data[obs]['orders']
processed['n_pixels'] = input_data[obs]['wave_size']
""" Reference airmass for iterative correction of airmass"""
telluric['sky_template'] = {}
instrument = night_dict[night]['instrument']
for n_obs, obs in enumerate(lists['transit_in']):
if n_obs >= lists['n_transit_in']/2.:
obs_ref = obs
break
telluric['sky_template']['ref_observation'] = obs_ref
if instrument == 'HARPS':
telluric['sky_template']['use_eso_skycalc'] = True
telluric['sky_template']['ref_airmass'] = input_data[obs_ref]['AIRMASS']
telluric['sky_template']['data'] = np.ones([processed['n_orders'], processed['n_pixels']], dtype=np.double)
else:
telluric_model_file = config_in['instruments'][instrument]['telluric_model']
telluric['sky_template']['use_eso_skycalc'] = False
telluric['sky_template']['ref_airmass'] = 1.00000
telluric['sky_template']['data'] = fits.open(telluric_model_file)
for obs in lists['observations']:
processed[obs]['e2ds_precorrected'] = processed[obs]['e2ds_rescaled'][:]
processed[obs]['e2ds_precorrected_err'] = input_data[obs]['e2ds_err'] / processed[obs]['e2ds_rescaling']
for niter in xrange(0, 1):
print("NITER: ", niter)
for obs in lists['telluric']:
processed[obs]['logI'] = np.log(processed[obs]['e2ds_precorrected'])
processed[obs]['logI_err'] = processed[obs]['e2ds_precorrected_err']/processed[obs]['e2ds_precorrected']
processed['telluric'] = {}
abs_slope = np.ones([processed['n_orders'], processed['n_pixels']], dtype=np.double)
line_shift = np.ones([processed['n_orders'], processed['n_pixels']], dtype=np.double)
zero_point = np.ones([processed['n_orders'], processed['n_pixels']], dtype=np.double)
pearson_r = np.zeros([processed['n_orders'], processed['n_pixels']], dtype=np.double)
pearson_p = np.zeros([processed['n_orders'], processed['n_pixels']], dtype=np.double)
airmass = np.zeros(lists['n_tellurics'], dtype=np.double)
berv = np.zeros(lists['n_tellurics'], dtype=np.double)
rvc = np.zeros(lists['n_tellurics'], dtype=np.double)
for n_obs, obs in enumerate(lists['telluric']):
# This is to ensure that airmass, berv and rvc are associated to the correct spectra
processed['telluric'][obs] = {'n_obs': n_obs}
airmass[n_obs] = input_data[obs]['AIRMASS']
berv[n_obs] = input_data[obs]['BERV']
rvc[n_obs] = input_data[obs]['RVC']
for order in range(0, processed['n_orders']):
logi_array = np.empty([lists['n_tellurics'], processed['n_pixels']], dtype=np.double)
sigi_array = np.empty([lists['n_tellurics'], processed['n_pixels']], dtype=np.double)
for obs in lists['telluric']:
n_obs = processed['telluric'][obs]['n_obs']
logi_array[n_obs, :] = processed[obs]['logI'][order, :]
sigi_array[n_obs, :] = processed[obs]['logI_err'][order, :]
""" The user has the option to select between different approaches to
extract the telluric absorption spectrum
To-Do: move this section to a subroutine for cythonization"""
if observational_pams['linear_fit_method']== 'linear_curve_fit':
abs_slope[order, :], line_shift[order, :], zero_point[order, :] = \
berv_linear_curve_fit_modified(airmass, berv, logi_array, sigi_array, processed['n_pixels'])
else:
abs_slope[order, :], line_shift[order, :], zero_point[order, :] = \
berv_linear_lstsq(airmass, berv, logi_array)
""" Saving the outcome to dictionary """
processed['telluric']['order_'+repr(order)] = {'logi_array': logi_array, 'sigi_array': sigi_array}
if telluric['sky_template']['use_eso_skycalc']:
wave_model, step_model, tran_model, terr_model = \
get_eso_sckycalc_harps(obs_ref,
[input_data[obs_ref]['wave'][0], input_data[obs_ref]['wave'][-1]],
input_data[obs_ref]['RA'],
input_data[obs_ref]['DEC'],
night, config_in['output'])
tran_model_rebinned = \
rebin_1d_to_1d(wave_model,
step_model,
tran_model,
input_data[obs_ref]['wave'],
input_data[obs_ref]['step'],
preserve_flux=False)
telluric['sky_template']['data'][order, :] = \
np.power(tran_model_rebinned,
1./telluric['sky_template']['ref_airmass'])
processed['telluric']['spectrum_noairmass'] = np.exp(abs_slope)
for obs in lists['observations']:
""" Correction of telluric lines for the average airmass value, following Wyttenbach et al. 2015 """
processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_precorrected'] / \
np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] -
processed['airmass_ref'])
processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_precorrected_err'] / \
np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] -
processed['airmass_ref'])
for obs in lists['observations']:
# Correction of telluric lines
telluric[obs] = {}
telluric[obs]['spectrum_noairmass'] = processed['telluric']['spectrum_noairmass']
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
telluric[obs]['airmass_ref'] = processed['airmass_ref']
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][ telluric[obs]['null']] = 1.0
telluric[obs]['spectrum'] = np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
telluric[obs]['spline_noairmass'] = np.ones([input_data[obs]['n_orders'],
input_data[obs]['n_pixels']],
dtype=np.double)
for order in range(0, processed['n_orders']):
telluric[obs]['spline_noairmass'][order, :], _, _ = \
compute_spline(input_data[obs]['wave'][order, :],
telluric[obs]['spectrum_noairmass'][order, :],
0.05)
telluric[obs]['spline'] = np.power(telluric[obs]['spline_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
telluric[obs]['airmass_ref'] = processed['airmass_ref']
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][ telluric[obs]['null']] = 1.0
telluric[obs]['telluric_corrected'] = processed[obs]['e2ds_corrected']
telluric[obs]['telluric_corrected_err'] = processed[obs]['e2ds_corrected_err']
save_to_cpickle('telluric', telluric, config_in['output'], night)
save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
def plot_telluric_observerRF_skycalc(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input=='':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
print("plot_telluric_airmass_stellarRF Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('telluric_processed', config_in['output'], night)
telluric = load_from_cpickle('telluric', config_in['output'], night)
except:
print()
print("No telluric correction, no plots")
continue
colors, cmap, line_colors = make_color_array(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
cbax1 = plt.subplot(gs[:, 1])
lift_spectrum = 0.25
for i, obs in enumerate(lists['observations']):
_, e2ds_rescaled , _ = \
perform_rescaling(processed[obs]['wave'],
processed[obs]['e2ds'],
processed[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
e2ds_rescaled_corrected_spectrum = e2ds_rescaled / telluric[obs]['spectrum']
for order in range(0, processed[obs]['n_orders']):
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=line_colors[i], lw=1, alpha=0.5)
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=line_colors[i])
ax2.plot(processed[obs]['wave'][order, :],
telluric[obs]['spectrum'][order, :],
c=line_colors[i])
ax2.axhline(1.00, c='k')
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
obs_ref = telluric['sky_template']['ref_observation']
for order in range(0, processed[obs]['n_orders']):
ax2.scatter(processed[obs_ref]['wave'][order, :], telluric[obs_ref]['spectrum_noairmass'][order, :], s=2, c='C0', zorder=1000)
ax2.plot(processed[obs_ref]['wave'][order, :], telluric['sky_template']['data'][order, :], c='C0', zorder=1000)
ax1.plot(processed[obs_ref]['wave'][order, :], telluric['sky_template']['data'][order, :], c='C0', zorder=1000)
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show() | 14,532 | 46.338762 | 138 | py |
SLOPpy | SLOPpy-main/SLOPpy/clv_rm_models_lines.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.constants import *
from SLOPpy.subroutines.kepler_exo import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.math_functions import *
from astropy.convolution import Gaussian1DKernel, convolve
__all__ = ['compute_clv_rm_models_lines', 'plot_clv_rm_models_lines']
subroutine_name = 'clv_rm_models_lines'
def compute_clv_rm_models_lines(config_in, lines_label):
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
planet_dict = from_config_get_planet(config_in)
star_dict = from_config_get_star(config_in)
clv_rm_dict = from_config_get_clv_rm(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
line_iter_dict = spectral_lines[lines_label]
clv_rm_correction = line_iter_dict.get('clv_rm_correction', True)
if not clv_rm_correction:
return
# wave_extension: additional range in wavelength added to avoid convolution
# problems at the side of the spectrum
wave_extension = 5.0
# un-convolved portion of the spectrum given by range_boundaries +-
# (wave_extension - wave_fix_convo)
wave_fix_convo = 1.0
# Added back-compatibility to old or "wrong" keys
norm_dict = line_iter_dict.get('normalization', clv_rm_dict.get('normalization', {}))
norm_pams={}
norm_pams['model_poly_degree'] = norm_dict.get('model_poly_degree', 2)
norm_pams['spectra_poly_degree'] = norm_dict.get('spectra_poly_degree', 2)
norm_pams['lower_threshold'] = norm_dict.get('lower_threshold', 0.950)
norm_pams['percentile_selection'] = norm_dict.get('percentile_selection', 10)
try:
synthesis = load_from_cpickle('clv_rm_synthesis', config_in['output'])
star_grid = load_from_cpickle('clv_rm_star_grid', config_in['output'])
#if not config_in['settings'].get('full_output', False):
# for night in night_dict:
# clv_rm_models = load_from_cpickle(
# 'clv_rm_models', config_in['output'], night)
print("{0:45s} {1:s}".format(
subroutine_name, 'Retrieved'))
except (FileNotFoundError, IOError):
print("{0:45s} {1:s}".format(
subroutine_name, 'Computing'))
print()
"""
Loading the spectral synthesis results, at the moment only SME output is supported.
Properties of the synthesis data files
- limb_angles: this is an input to SME, so it is specific on how the synthesis has been performed
- spectra: stellar spectrum as a function of the limb angle, sampled near the spectral lines
- model: integrated spectrum of the star
"""
synthesis_data_limb_angles = np.genfromtxt(
clv_rm_dict['synthesis_files'] + '_muvals.txt', dtype=np.double)
synthesis_data_spectra = np.genfromtxt(
clv_rm_dict['synthesis_files'] + '_spectra.txt', dtype=np.double)
synthesis_data_model = np.genfromtxt(
clv_rm_dict['synthesis_files'] + '_model.txt', dtype=np.double)
synthesis = {
'surface': {
'wave': synthesis_data_spectra[:, 0],
'flux': synthesis_data_spectra[:, 1:],
'n_mu': np.size(synthesis_data_limb_angles),
'mu': synthesis_data_limb_angles
},
'total': {
'wave': synthesis_data_model[:, 0],
'norm': synthesis_data_model[:, 1],
}
}
""" Setting up the array for model computation """
synthesis['total']['step'] = synthesis['total']['wave'] * 0.0
synthesis['total']['step'][1:] = synthesis['total']['wave'][1:] - \
synthesis['total']['wave'][:-1]
synthesis['total']['step'][0] = synthesis['total']['step'][1]
synthesis['surface']['step'] = synthesis['surface']['wave'] * 0.0
synthesis['surface']['step'][1:] = synthesis['surface']['wave'][1:] - \
synthesis['surface']['wave'][:-1]
synthesis['surface']['step'][0] = synthesis['surface']['step'][1]
synthesis['surface']['wave_out'] = np.arange(synthesis['surface']['wave'][0],
synthesis['surface']['wave'][-1],
clv_rm_dict['rebinning_step'])
synthesis['surface']['size_out'] = np.size(
synthesis['surface']['wave_out'], axis=0)
synthesis['surface']['step_out'] = np.ones(
synthesis['surface']['size_out']) * clv_rm_dict['rebinning_step']
synthesis['total']['norm_out'] = rebin_1d_to_1d(synthesis['total']['wave'],
synthesis['total']['step'],
synthesis['total']['norm'],
synthesis['surface']['wave_out'],
synthesis['surface']['step_out'],
method='exact_flux',
preserve_flux=False)
""" Check if the number of spectra corresponds to the number of limb angle values """
if np.size(synthesis['surface']['flux'], axis=1) != synthesis['surface']['n_mu']:
print('ERROR in loading the stellar spectra')
"""
Setting up the grid of stellar spectra for the CLV and RM computation
odd number of points to include the zero value
"""
star_grid = {
'n_grid': clv_rm_dict['n_gridpoints'],
'half_grid': int((clv_rm_dict['n_gridpoints'] - 1) / 2)
}
""" Coordinates of the centers of each grid cell (add offset) """
star_grid['xx'] = np.linspace(-1.0000000000000, 1.0000000000000,
star_grid['n_grid'], dtype=np.double)
star_grid['xc'], star_grid['yc'] = np.meshgrid(
star_grid['xx'], star_grid['xx'], indexing='xy')
# check the Note section of the wiki page of meshgrid
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.meshgrid.html
""" Distance of each grid cell from the center of the stellar disk """
star_grid['rc'] = np.sqrt(star_grid['xc'] ** 2 + star_grid['yc'] ** 2)
# Must avoid negative numbers inside the square root
star_grid['inside'] = star_grid['rc'] < 1.0000000000000
# Must avoid negative numbers inside the square root
star_grid['outside'] = star_grid['rc'] >= 1.00000000000000
""" Determine the mu angle for each grid cell, as a function of radius. """
star_grid['mu'] = np.zeros([star_grid['n_grid'], star_grid['n_grid']],
dtype=np.double) # initialization of the matrix with the mu values
star_grid['mu'][star_grid['inside']] = np.sqrt(
1. - star_grid['rc'][star_grid['inside']] ** 2)
""" 2.2 Determine the Doppler shift to apply to the spectrum of each grid cell, from Cegla+2015 """
star_grid['x_ortho'] = star_grid['xc'] * np.cos(star_dict['lambda'][0] * deg2rad) \
- star_grid['yc'] * np.sin(
star_dict['lambda'][0] * deg2rad) # orthogonal distances from the spin-axis
star_grid['y_ortho'] = star_grid['xc'] * np.sin(star_dict['lambda'][0] * deg2rad) \
+ star_grid['yc'] * np.cos(star_dict['lambda'][0] * deg2rad)
star_grid['r_ortho'] = np.sqrt(
star_grid['x_ortho'] ** 2 + star_grid['y_ortho'] ** 2)
star_grid['z_ortho'] = np.zeros([star_grid['n_grid'], star_grid['n_grid']],
dtype=np.double) # initialization of the matrix
star_grid['z_ortho'][star_grid['inside']] = np.sqrt(
1. - star_grid['r_ortho'][star_grid['inside']] ** 2)
""" rotate the coordinate system around the x_ortho axis by an agle: """
star_grid['beta'] = (np.pi / 2.) - \
star_dict['inclination'][0] * deg2rad
""" orthogonal distance from the stellar equator """
star_grid['yp_ortho'] = star_grid['z_ortho'] * np.sin(star_grid['beta']) + star_grid['y_ortho'] * np.cos(
star_grid['beta'])
""" stellar rotational velocity for a given position """
star_grid['v_star'] = star_grid['x_ortho'] * star_dict['vsini'][0] * (
1. - star_dict['alpha'][0] * star_grid['yp_ortho'] ** 2)
# Null velocity for points outside the stellar surface
star_grid['v_star'][star_grid['outside']] = 0.0
""" Associate a synthetic spectrum to each cell """
""" recomputation of spectra_mu - most likely it has been deleted from the
output file
"""
star_grid['spectra_mu'] = [[0] * star_grid['n_grid']
for i in range(star_grid['n_grid'])]
for x in range(0, star_grid['n_grid']):
for y in range(0, star_grid['n_grid']):
if star_grid['outside'][y, x]:
continue
index_closer = np.abs(
synthesis['surface']['mu'] - star_grid['mu'][y, x]).argmin() # take the index of the closer value
if star_grid['mu'][y, x] in synthesis['surface']['mu']:
star_grid['spectra_mu'][x][y] = synthesis['surface']['flux'][:, index_closer]
continue
elif index_closer == synthesis['surface']['n_mu'] - 1 or \
synthesis['surface']['mu'][index_closer] > star_grid['mu'][y, x]:
mu_ind0 = index_closer - 1
mu_ind1 = index_closer
else:
mu_ind0 = index_closer
mu_ind1 = index_closer + 1
diff_mu = synthesis['surface']['mu'][mu_ind1] - \
synthesis['surface']['mu'][mu_ind0]
star_grid['spectra_mu'][x][y] = synthesis['surface']['flux'][:, mu_ind0] \
+ (star_grid['mu'][y, x] - synthesis['surface']['mu'][mu_ind0]) / diff_mu \
* (synthesis['surface']['flux'][:, mu_ind1]
- synthesis['surface']['flux'][:, mu_ind0])
""" Computation of the continuum level (total flux is already normalized)"""
star_grid['continuum'] = [[0] * star_grid['n_grid']
for i in range(star_grid['n_grid'])]
spectral_window = ((synthesis['surface']['wave'] > clv_rm_dict['continuum_range'][0]) &
(synthesis['surface']['wave'] < clv_rm_dict['continuum_range'][1]))
for x in range(0, star_grid['n_grid']):
for y in range(0, star_grid['n_grid']):
if star_grid['outside'][y, x]:
continue
star_grid['continuum'][x][y] = np.median(
star_grid['spectra_mu'][x][y][spectral_window])
star_grid['continuum_level'] = np.sum(star_grid['continuum'])
"""
Setting up the grid for the rescaling factor of the planetary radius
"""
try:
radius_grid = np.arange(clv_rm_dict['radius_factor'][0],
clv_rm_dict['radius_factor'][1] +
clv_rm_dict['radius_factor'][2],
clv_rm_dict['radius_factor'][2])
except KeyError:
radius_grid = np.arange(0.5, 2.6, 0.1)
""" CLV + RM model computation is performed only on the wavelength range of
interest, with the addition of a few Angstroms """
wave_selection = (synthesis['surface']['wave_out'] > line_iter_dict['range'][0]-wave_extension) \
& (synthesis['surface']['wave_out'] < line_iter_dict['range'][1]+wave_extension)
for night in night_dict:
""" Retrieving the list of observations"""
print()
print("compute_CLV_RM_models for lines {0:s}, Night: {1:s}".format(lines_label, night))
try:
clv_rm_models = load_from_cpickle(
'clv_rm_models', config_in['output'], night, lines_label)
continue
except:
print()
print(" No CLV & RM correction files found for lines {0:s}, Night: {1:s} , computing now".format(lines_label, night))
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle(
'observational_pams', config_in['output'], night)
instrument = night_dict[night]['instrument']
clv_rm_models = {
'common': {
'wave': synthesis['surface']['wave_out'][wave_selection],
'step': synthesis['surface']['step_out'][wave_selection],
'norm': synthesis['total']['norm_out'][wave_selection],
'size': int(np.sum(wave_selection)),
'continuum_level': star_grid['continuum_level'],
'radius_grid': radius_grid,
'n_radius_grid': len(radius_grid)
}
}
clv_rm_models['common']['convolution_dlambda'] = \
np.median(clv_rm_models['common']['wave']) / \
instrument_dict[instrument]['resolution']
clv_rm_models['common']['convolution_sigma'] = \
clv_rm_models['common']['convolution_dlambda'] / \
np.median(clv_rm_models['common']['step'])
gaussian = Gaussian1DKernel(
stddev=clv_rm_models['common']['convolution_sigma'])
clv_rm_models['common']['norm_convolved'] = convolve(
clv_rm_models['common']['norm'], gaussian)
""" Fixing border effect (we took already wave_extension angstrom outside of the
actual range, so doing it this way is fine)"""
wave_fix = wave_extension - wave_fix_convo
wave_fix_convolution = (clv_rm_models['common']['wave'] < line_iter_dict['range'][0]-wave_fix) | (clv_rm_models['common']['wave'] > line_iter_dict['range'][1]+wave_fix)
clv_rm_models['common']['norm_convolved'][wave_fix_convolution] = clv_rm_models['common']['norm'][wave_fix_convolution]
#import matplotlib.pyplot as plt
#plt.plot(clv_rm_models['common']['wave'], clv_rm_models['common']['norm'], c='C1')
#plt.plot(clv_rm_models['common']['wave'], clv_rm_models['common']['norm_convolved'], c='C2')
#plt.show()
#quit()
"""
Computation of the first derivative, useful to identify
continuum level. This method is prone to errors for
observational data, but it's quite robust for synthetic spectra
if jumps in wavelngth are small
"""
clv_rm_models['common']['norm_convolved_derivative'] = \
first_derivative(clv_rm_models['common']['wave'],
clv_rm_models['common']['norm_convolved'])
wave_fix = wave_extension - wave_fix_convo*2
exclude_borders = (clv_rm_models['common']['wave'] > line_iter_dict['range'][0]+wave_fix) & (clv_rm_models['common']['wave'] < line_iter_dict['range'][1]-wave_fix)
print(' Range for continuum normalization: ',line_iter_dict['range'][0]-wave_fix, line_iter_dict['range'][1]+wave_fix)
# Using only the 10percentile of values of the derivative around zero
cont_10perc = np.percentile(np.abs(clv_rm_models['common']['norm_convolved_derivative']), norm_pams['percentile_selection'])
clv_rm_models['common']['norm_convolved_bool'] = (np.abs(clv_rm_models['common']['norm_convolved_derivative']) < cont_10perc) \
& (exclude_borders) \
& (clv_rm_models['common']['norm_convolved']> norm_pams['lower_threshold'])
print(' Number of points within 10percentile: {0:10.0f}'.format(np.sum((np.abs(clv_rm_models['common']['norm_convolved_derivative']) < cont_10perc))))
print(' Number of points included in restricted borders: {0:10.0f}'.format(np.sum(exclude_borders)))
print(' Number of points above threshold: {0:10.0f}'.format(np.sum( (clv_rm_models['common']['norm_convolved']> norm_pams['lower_threshold']))))
norm_convolved_bool = (np.abs(clv_rm_models['common']['norm_convolved_derivative']) < cont_10perc) \
& (exclude_borders) \
& (clv_rm_models['common']['norm_convolved']> norm_pams['lower_threshold'])
if np.sum(norm_convolved_bool) < np.sum(exclude_borders)/50:
print(' Lower threshold decreased by 80% to allow point selection ', norm_pams['lower_threshold']*0.80)
clv_rm_models['common']['norm_convolved_bool'] = (np.abs(clv_rm_models['common']['norm_convolved_derivative']) < cont_10perc) \
& (exclude_borders) & (clv_rm_models['common']['norm_convolved']> norm_pams['lower_threshold']*0.80)
else:
clv_rm_models['common']['norm_convolved_bool'] = norm_convolved_bool
print(' Number of points for continuum normalization: {0:10.0f}'.format(np.sum(clv_rm_models['common']['norm_convolved_bool'])))
processed = {}
print()
for obs in lists['observations']:
print(' Computing CLV+RM correction for ', obs)
processed[obs] = {}
clv_rm_models[obs] = {}
n_oversampling = int(
observational_pams[obs]['EXPTIME'] / clv_rm_dict['time_step'])
if n_oversampling % 2 == 0:
n_oversampling += 1
half_time = observational_pams[obs]['EXPTIME'] / 2 / 86400.
processed[obs]['bjd_oversampling'] = np.linspace(observational_pams[obs]['BJD'] - half_time,
observational_pams[obs]['BJD'] + half_time,
n_oversampling, dtype=np.double)
if planet_dict['orbit'] == 'circular':
# Time of pericenter concides with transit time, if we assume e=0 and omega=np.pi/2.
eccentricity = 0.00
omega_rad = np.pi / 2.
# Tcent is assumed as reference time
Tref = planet_dict['reference_time_of_transit'][0]
Tcent_Tref = 0.000
else:
omega_rad = planet_dict['omega'][0] * deg2rad
Tref = planet_dict['reference_time']
Tcent_Tref = planet_dict['reference_time_of_transit'][0] - Tref
eccentricity = planet_dict['eccentricity'][0]
inclination_rad = planet_dict['inclination'][0] * deg2rad
true_anomaly, orbital_distance_ratio = kepler_true_anomaly_orbital_distance(
processed[obs]['bjd_oversampling'] - Tref,
Tcent_Tref,
planet_dict['period'][0],
eccentricity,
omega_rad,
planet_dict['semimajor_axis_ratio'][0])
""" planet position during its orbital motion, in unit of stellar radius"""
# Following Murray+Correia 2011 , with the argument of the ascending node set to zero.
# 1) the ascending node coincide with the X axis
# 2) the reference plance coincide with the plane of the sky
processed[obs]['planet_position'] = {
'xp': -orbital_distance_ratio * (np.cos(omega_rad + true_anomaly)),
'yp': orbital_distance_ratio * (np.sin(omega_rad + true_anomaly) * np.cos(inclination_rad)),
'zp': orbital_distance_ratio * (np.sin(inclination_rad) * np.sin(omega_rad + true_anomaly))
}
# projected distance of the planet's center to the stellar center
processed[obs]['planet_position']['rp'] = np.sqrt(processed[obs]['planet_position']['xp'] ** 2
+ processed[obs]['planet_position']['yp'] ** 2)
# obscured flux integrated over the full epoch
# grid n_radius_grid X size_out (of spectral model)
clv_rm_models[obs]['missing_flux'] = np.zeros(
[len(radius_grid), clv_rm_models['common']['size']], dtype=np.double)
# iterating on the sub-exposures
for j, zeta in enumerate(processed[obs]['planet_position']['zp']):
if zeta > 0 and processed[obs]['planet_position']['rp'][j] < 1. + planet_dict['radius_ratio'][0]:
# the planet is in the foreground or inside the stellar disk, continue
# adjustment: computation is performed even if only part of the planet is shadowing the star
rd = np.sqrt((processed[obs]['planet_position']['xp'][j] - star_grid['xc']) ** 2 +
(processed[obs]['planet_position']['yp'][j] - star_grid['yc']) ** 2)
# iterating on the cell grid
for x in range(0, star_grid['n_grid']):
for y in range(0, star_grid['n_grid']):
# skip the step if the cell is outside the stellar disk
# or if the cell is not shadowed by the planet when the largest possible size is considered
if star_grid['outside'][y, x] or rd[y, x] > planet_dict['radius_ratio'][0]*radius_grid[-1]:
continue
# rescaled planetary radius selection
grid_sel = (
rd[y, x] <= planet_dict['radius_ratio'][0]*radius_grid)
# stellar flux in the masked region
flux_tmp = rebin_1d_to_1d(synthesis['surface']['wave'],
synthesis['surface']['step'],
star_grid['spectra_mu'][x][y],
clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
rv_shift=star_grid['v_star'][y, x],
method='exact_flux',
preserve_flux=False)
# fixing zero values that may have been introduced by
# the rebinning process from an extremely irregular sampling
ind_sel = np.where(flux_tmp < 0.)[0]
for ii in ind_sel:
if ii == 0:
flux_tmp[ii] = flux_tmp[ii + 1]
elif ii == np.size(flux_tmp) - 1:
flux_tmp[ii] = flux_tmp[ii - 1]
else:
flux_tmp[ii] = (
flux_tmp[ii - 1] + flux_tmp[ii + 1]) / 2.
"""
Outer product of the radius selection array (size=M)
and the flux array (N) so that it can be summed
properly to the MxN missing_flux matrix.
"""
clv_rm_models[obs]['missing_flux'] += \
np.outer(grid_sel, flux_tmp)
clv_rm_models[obs]['missing_flux'] /= n_oversampling
clv_rm_models[obs]['stellar_spectra'] = \
np.outer(np.ones(len(radius_grid)), clv_rm_models['common']['norm']) \
- (clv_rm_models[obs]['missing_flux'] /
clv_rm_models['common']['continuum_level'])
clv_rm_models[obs]['stellar_spectra_convolved'] = \
np.zeros([len(radius_grid), clv_rm_models['common']['size']],
dtype=np.double)
clv_rm_models[obs]['clv_rm_model_convolved'] = \
np.zeros([len(radius_grid), clv_rm_models['common']['size']],
dtype=np.double)
clv_rm_models[obs]['clv_rm_model_convolved_derivative'] = \
np.zeros([len(radius_grid), clv_rm_models['common']['size']],
dtype=np.double)
clv_rm_models[obs]['clv_rm_model_convolved_continuum_bool'] = \
np.zeros([len(radius_grid), clv_rm_models['common']['size']],
dtype=bool)
clv_rm_models[obs]['clv_rm_model_convolved_normalized'] = \
np.zeros([len(radius_grid), clv_rm_models['common']['size']],
dtype=np.double)
for ii in range(0, len(radius_grid)):
clv_rm_models[obs]['stellar_spectra_convolved'][ii, :] = \
convolve(clv_rm_models[obs]['stellar_spectra'][ii, :],
gaussian)
clv_rm_models[obs]['stellar_spectra_convolved'][ii, wave_fix_convolution] = \
clv_rm_models[obs]['stellar_spectra'][ii, wave_fix_convolution]
"""
This is the theoretical transmission spectrum in the stellar reference frame
when only CLV and RM effects are present (no atmospheric
transmission)
"""
clv_rm_models[obs]['clv_rm_model_convolved'][ii, :] = \
clv_rm_models[obs]['stellar_spectra_convolved'][ii, :] \
/ clv_rm_models['common']['norm_convolved']
"""
High-resolution transmission spectra are always rescaled for
their continuum because in fiber-fed spectrographs the
information on the absolute flux of the star is lost.
If not using the normalized spectrum, normalization factor must
be included somehow when correcting for the CLV+RM, before
fitting the atomic absoprtion lines
"""
normalization_function = np.polynomial.chebyshev.Chebyshev.fit(
clv_rm_models['common']['wave'][clv_rm_models['common']['norm_convolved_bool']],
clv_rm_models[obs]['clv_rm_model_convolved'][ii, :][clv_rm_models['common']['norm_convolved_bool']],
deg=norm_pams['model_poly_degree']
)
clv_rm_models[obs]['clv_rm_model_convolved_normalized'][ii, :] = clv_rm_models[obs]['clv_rm_model_convolved'][ii, :] / normalization_function(clv_rm_models['common']['wave'])
##if ii!= 5: continue
##import matplotlib.pyplot as plt
##plt.plot(clv_rm_models['common']['wave'], clv_rm_models[obs]['stellar_spectra_convolved'][ii, :], c='C0')
##plt.plot(clv_rm_models['common']['wave'], clv_rm_models['common']['norm_convolved'], c='C1')
##plt.show()
##
##
##print(np.sum(clv_rm_models['common']['norm_convolved_bool']))
##plt.plot(clv_rm_models['common']['wave'], clv_rm_models[obs]['clv_rm_model_convolved_normalized'][ii, :], label='convolved, normalized')
##plt.plot(clv_rm_models['common']['wave'], clv_rm_models[obs]['clv_rm_model_convolved'][ii, :], label='convolved')
##plt.plot(clv_rm_models['common']['wave'], normalization_function(clv_rm_models['common']['wave']), c='C5', zorder=10)
##plt.scatter(
## clv_rm_models['common']['wave'][clv_rm_models['common']['norm_convolved_bool']],
## clv_rm_models[obs]['clv_rm_model_convolved'][ii, :][clv_rm_models['common']['norm_convolved_bool']], s=5, c='C3')
##plt.legend()
##plt.show()
##if obs=='HARPN.2016-06-04T02-27-41.158': quit()
""" In the planetary reference frame, the corrected transmission
spectrum T_corr is given by
T_corr = T_input * (synthetic_convolved /
stellar_spectra_convolved),
where: T_input: transmission spectrum before the correction
synthetic_convolved: integrated synthetic stellar spectrum,
convolved for the instrumental resolution.
stellar_spectra_convolved: stellar spectrum after removing the
contribute of the stellar surface covered by the planet, convolved
for the instrumental resolution (synthetic_convolved and
stellar_spectra_convolved are in the stellar rest frame must be
rebinned in the planetary rest frame)
Since clv_rm_model_convolved = stellar_spectra_convolved /
synthetic_convolved the observed transmission spectrum must be
DIVIDED by clv_rm_model_convolved
"""
save_to_cpickle('clv_rm_models', clv_rm_models,
config_in['output'], night, lines_label)
clv_rm_models = None # Forcing memory de-allocation
if not config_in['settings'].get('full_output', False):
del star_grid['spectra_mu']
try:
synthesis = load_from_cpickle('clv_rm_synthesis', config_in['output'])
star_grid = load_from_cpickle('clv_rm_star_grid', config_in['output'])
except (FileNotFoundError, IOError):
save_to_cpickle('clv_rm_star_grid', star_grid, config_in['output'])
save_to_cpickle('clv_rm_synthesis', synthesis, config_in['output'])
def plot_clv_rm_models_lines(config_in, lines_label, night_input=''):
spectral_lines = from_config_get_spectral_lines(config_in)
line_iter_dict = spectral_lines[lines_label]
clv_rm_correction = line_iter_dict.get('clv_rm_correction', True)
if not clv_rm_correction:
return
night_dict = from_config_get_nights(config_in)
synthesis = load_from_cpickle('clv_rm_synthesis', config_in['output'])
star_grid = load_from_cpickle('clv_rm_star_grid', config_in['output'])
if night_input == '':
# Visualize the mu of star
fig = plt.figure(figsize=(8, 6.5))
plt.title('Limb angle')
plt.contourf(star_grid['xx'], star_grid['xx'],
star_grid['mu'], 60, cmap=plt.cm.viridis)
plt.colorbar(label='$\mu$') # draw colorbar
# plot data points.
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xlabel('x [R_s]')
plt.ylabel('y [R_s]')
plt.show()
# Visualize the RV of star
fig = plt.figure(figsize=(8, 6.5))
# CS = plt.contour(xx,xx,v_star,50,linewidths=0.5,colors='k')
plt.title('Radial velocity field')
plt.contourf(star_grid['xx'], star_grid['xx'],
star_grid['v_star'], 100, cmap=plt.cm.seismic)
plt.colorbar(label='v_star') # draw colorbar
plt.xlim(-1, 1)
plt.ylim(-1, 1)
plt.xlabel('x [R_s]')
plt.ylabel('y [R_s]')
plt.show()
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
lists = load_from_cpickle('lists', config_in['output'], night)
clv_rm_models = load_from_cpickle(
'clv_rm_models', config_in['output'], night, lines_label)
observational_pams = load_from_cpickle(
'observational_pams', config_in['output'], night)
colors_properties, colors_plot, colors_scatter = make_color_array_matplotlib3(
lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
i0_radius = np.argmin(
np.abs(clv_rm_models['common']['radius_grid']-1.00))
for obs in lists['transit_in']:
ax1.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['stellar_spectra'][i0_radius, :],
color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['missing_flux'][i0_radius, :] /
clv_rm_models['common']['continuum_level'],
color=colors_plot['mBJD'][obs], alpha=0.2)
ax2.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['stellar_spectra'][-1, :],
color=colors_plot['mBJD'][obs], alpha=0.2)
ax2.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['missing_flux'][-1, :] /
clv_rm_models['common']['continuum_level'],
color=colors_plot['mBJD'][obs], alpha=0.2)
# for obs in lists['transit_out']:
# ax2.plot(clv_rm_models['common']['wave'],
# clv_rm_models[obs]['stellar_spectra'],
# color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.set_title(
'Night: {0:s} \n Input spectra, stellar radius'.format(night))
ax2.set_title('Stellar radius x {0:2.2f}'.format(
clv_rm_models['common']['radius_grid'][-1]))
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(
cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
i0_radius = np.argmin(
np.abs(clv_rm_models['common']['radius_grid']-1.00))
for obs in lists['transit_out']:
ax1.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['stellar_spectra'][i0_radius, :],
color=colors_plot['mBJD'][obs], alpha=0.2)
ax2.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['stellar_spectra_convolved'][i0_radius, :],
color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.plot(clv_rm_models['common']['wave'],
clv_rm_models['common']['norm'][:],
color='C3')
ax2.plot(clv_rm_models['common']['wave'],
clv_rm_models['common']['norm_convolved'][:],
color='C3')
# for obs in lists['transit_out']:
# ax2.plot(clv_rm_models['common']['wave'],
# clv_rm_models[obs]['stellar_spectra'],
# color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.set_title(
'Night: {0:s} \n CLV+RM correction, convolved '.format(night))
ax2.set_title('Out of transit')
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(
cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
i0_radius = np.argmin(
np.abs(clv_rm_models['common']['radius_grid']-1.00))
for obs in lists['transit_in']:
ax1.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['clv_rm_model_convolved'][i0_radius, :],
color=colors_plot['mBJD'][obs], alpha=0.2)
for obs in lists['transit_out']:
ax2.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['clv_rm_model_convolved'][i0_radius, :],
color=colors_plot['mBJD'][obs], alpha=0.2)
# for obs in lists['transit_out']:
# ax2.plot(clv_rm_models['common']['wave'],
# clv_rm_models[obs]['stellar_spectra'],
# color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.set_title(
'Night: {0:s} \n CLV+RM correction, convolved '.format(night))
ax2.set_title('Out of transit')
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(
cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
i0_radius = np.argmin(
np.abs(clv_rm_models['common']['radius_grid']-1.00))
for obs in lists['transit_in']:
#ax1.plot(clv_rm_models['common']['wave'],
# clv_rm_models[obs]['clv_rm_model_convolved'][i0_radius, :],
# color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['clv_rm_model_convolved_normalized'][i0_radius, :],
color=colors_plot['mBJD'][obs], alpha=0.2)
for obs in lists['transit_out']:
#ax2.plot(clv_rm_models['common']['wave'],
# clv_rm_models[obs]['clv_rm_model_convolved'][-1, :],
# color=colors_plot['mBJD'][obs], alpha=0.2)
ax2.plot(clv_rm_models['common']['wave'],
clv_rm_models[obs]['clv_rm_model_convolved_normalized'][-1, :],
color=colors_plot['mBJD'][obs], alpha=0.2)
# for obs in lists['transit_out']:
# ax2.plot(clv_rm_models['common']['wave'],
# clv_rm_models[obs]['stellar_spectra'],
# color=colors_plot['mBJD'][obs], alpha=0.2)
ax1.set_title(
'Night: {0:s} \n CLV+RM correction, convolved and normalized '.format(night))
ax2.set_title('Out of transit')
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(
cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 39,165 | 44.808187 | 190 | py |
SLOPpy | SLOPpy-main/SLOPpy/write_output_transmission.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.math_functions import *
from SLOPpy.transmission_spectrum_preparation import compute_transmission_spectrum_preparation
from scipy.signal import savgol_filter
__all__ = ['write_output_transmission', 'plot_output_transmission',
'write_output_transmission_planetRF', 'plot_output_transmission_planetRF',
'write_output_transmission_stellarRF', 'plot_output_transmission_stellarRF',
'write_output_transmission_observerRF', 'plot_output_transmission_observerRF']
subroutine_name = 'write_output_transmission'
sampler_name = 'emcee'
def write_output_transmission_planetRF(config_in):
write_output_transmission(config_in, reference='planetRF')
def plot_output_transmission_planetRF(config_in, night_input, results_input=''):
plot_output_transmission(config_in, night_input, results_input, reference='planetRF')
def write_output_transmission_stellarRF(config_in):
write_output_transmission(config_in, reference='stellarRF')
def plot_output_transmission_stellarRF(config_in, night_input, results_input=''):
plot_output_transmission(config_in, night_input, results_input, reference='stellarRF')
def write_output_transmission_observerRF(config_in):
write_output_transmission(config_in, reference='observerRF')
def plot_output_transmission_observerRF(config_in, night_input, results_input=''):
plot_output_transmission(config_in, night_input, results_input, reference='observerRF')
def write_output_transmission(config_in, reference='planetRF', night_input='', preparation_only=False, pca_iteration=-1):
results_list_default = ['user']
# compute_transmission_spectrum_preparation(config_in)
pca_parameters = from_config_get_pca_parameters(config_in)
night_dict = from_config_get_nights(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
fullspectrum_dict = from_config_get_fullspectrum_parameters(config_in)
clv_rm_correction = fullspectrum_dict.get('clv_rm_correction', True)
norm_dict = fullspectrum_dict.get('normalization', {})
norm_pams = {}
norm_pams['normalize_transmission'] = norm_dict.get('normalize_transmission', True)
norm_pams['normalization_model'] = norm_dict.get('normalization_model', 'polynomial')
""" Normalization parameters for polynomial model"""
norm_pams['model_poly_degree'] = norm_dict.get('model_poly_degree', 2)
norm_pams['spectra_poly_degree'] = norm_dict.get('spectra_poly_degree', 2)
norm_pams['lower_threshold'] = norm_dict.get('lower_threshold', 0.950)
norm_pams['percentile_selection'] = norm_dict.get('percentile_selection', 10)
""" Normalization parameters using Savitzky-Golay filter"""
norm_pams['window_length'] = norm_dict.get('window_length', 101)
norm_pams['polyorder'] = norm_dict.get('polyorder', 3)
norm_pams['mode'] = norm_dict.get('mode', 'nearest')
norm_pams['cval'] = norm_dict.get('cval', 1.0)
range_temp = fullspectrum_dict.get('range', None)
if range_temp:
""" Using the line-specific range to define the transmission spectrum region """
shared_selection = (shared_data['coadd']['wave'] >= fullspectrum_dict['range'][0]) \
& (shared_data['coadd']['wave'] < fullspectrum_dict['range'][1])
binned_selection = (shared_data['binned']['wave'] >= fullspectrum_dict['range'][0]) \
& (shared_data['binned']['wave'] < fullspectrum_dict['range'][1])
transmission_template = {
'subroutine': subroutine_name,
'range': range_temp,
'wave': shared_data['coadd']['wave'][shared_selection],
'step': shared_data['coadd']['step'][shared_selection],
'size': np.int(np.sum(shared_selection)),
'binned_wave': shared_data['binned']['wave'][binned_selection],
'binned_step': shared_data['binned']['step'][binned_selection],
'binned_size': np.int(np.sum(binned_selection))
}
else:
transmission_template = {
'subroutine': subroutine_name,
'range': shared_data['coadd']['wavelength_range'],
'wave': shared_data['coadd']['wave'],
'step': shared_data['coadd']['step'],
'size': shared_data['coadd']['size'],
'binned_range': shared_data['binned']['wavelength_range'],
'binned_wave': shared_data['binned']['wave'],
'binned_step': shared_data['binned']['step'],
'binned_size': shared_data['binned']['size']
}
for night in night_dict:
print()
print("Running {0:45s} Night:{1:15s} ".format(subroutine_name, night))
preparation_input = load_from_cpickle('transmission_preparation', config_in['output'], night)
if preparation_input.get('pca_output', False):
if pca_iteration >= 0:
it_string = str(pca_iteration).zfill(2)
else:
it_string = str(pca_parameters.get('ref_iteration', 3)).zfill(2)
preparation = preparation_input[it_string]
else:
preparation = preparation_input
it_string = ''
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
results_list = results_list_default.copy()
print(' Observational parameters from configuration file')
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
try:
clv_rm_models = load_from_cpickle('clv_rm_models', config_in['output'], night)
except (FileNotFoundError, IOError):
clv_rm_correction = False
for results_selection in results_list:
try:
transmission = load_from_cpickle(subroutine_name+'_'+reference + '_' +
results_selection, config_in['output'], night, it_string=it_string)
print("{0:45s} Night:{1:15s} {2:s} {3:s}".format(
subroutine_name, night, results_selection, 'Retrieved'))
continue
except (FileNotFoundError, IOError):
print("{0:45s} Night:{1:15s} {2:s} {3:s}".format(
subroutine_name, night, results_selection, 'Computing'))
transmission = transmission_template.copy()
if len(it_string) > 0:
transmission['pca_output'] = True
else:
transmission['pca_output'] = False
print_warning = True
for obs in lists['observations']:
""" we start from the e2ds file, after correction for blaze and
division by the master-out
Observation data:
wave: input_data[obs]['wave']
step: input_data[obs]['step']
flux: preparation[obs]['deblazed']
ferr: preparation[obs]['deblazed_err']
"""
transmission[obs] = {}
transmission[obs] = {
'BJD': input_data[obs]['BJD'],
'AIRMASS': input_data[obs]['AIRMASS']
}
""" Shift into planetary reference system is the default
choice"""
if results_selection == 'user':
planet_R_factor = observational_pams.get('Rp_factor', 1.00000)
if reference in ['observer', 'observerRF', 'ORF']:
rv_shift = 0.000
rv_shift_clv = -observational_pams[obs]['rv_shift_ORF2SRF']
elif reference in ['stellar', 'stellarRF', 'SRF']:
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF']
rv_shift_clv = 0.0000
else:
rv_shift = observational_pams[obs]['rv_shift_ORF2PRF']
rv_shift_clv = observational_pams[obs]['rv_shift_SRF2PRF']
""" Step 2): rebin the 2D ratio spectra to 1D """
if transmission['pca_output']:
transmission[obs]['rebinned'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio'],
np.ones_like(calib_data['blaze']),
transmission['wave'],
transmission['step'],
preserve_flux=False,
rv_shift=rv_shift)
transmission[obs]['rebinned_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio_err'],
np.ones_like(calib_data['blaze']),
transmission['wave'],
transmission['step'],
rv_shift=rv_shift,
preserve_flux=False,
is_error=True)
else:
preserve_flux = input_data[obs].get('absolute_flux', True)
transmission[obs]['rebinned'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio'],
calib_data['blaze'],
transmission['wave'],
transmission['step'],
preserve_flux=preserve_flux,
rv_shift=rv_shift)
transmission[obs]['rebinned_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
preparation[obs]['ratio_err'],
calib_data['blaze'],
transmission['wave'],
transmission['step'],
preserve_flux=preserve_flux,
rv_shift=rv_shift,
is_error=True)
if transmission[obs]['rebinned_err'][0] ==0:
transmission[obs]['rebinned'][0] = transmission[obs]['rebinned'][1]
transmission[obs]['rebinned_err'][0] = transmission[obs]['rebinned_err'][1]
if transmission[obs]['rebinned_err'][-1] ==0:
transmission[obs]['rebinned'][-1] = transmission[obs]['rebinned'][-2]
transmission[obs]['rebinned_err'][-1] = transmission[obs]['rebinned_err'][-2]
#import matplotlib.pyplot as plt
#plt.scatter(transmission['wave'], transmission[obs]['corrected'])
#plt.plot(transmission['wave'], transmission[obs]['continuum'])
#plt.scatter(transmission['wave'][selection], transmission[obs]['corrected'][selection], c='r')
#plt.plot(input_data[obs]['wave'][0,:], preparation[obs]['ratio_err'][0,:])
#plt.scatter(transmission['wave'], transmission[obs]['rebinned_err'], c='b')
#plt.axhline(0.0000, c='C2')
#plt.show()
#quit()
#import matplotlib.pyplot as plt
#plt.scatter(input_data[obs]['wave'], preparation[obs]['ratio'], s=2)
#plt.xlim(lines_dict['range'][0], lines_dict['range'][1])
# plt.show()
if clv_rm_correction:
"""" CLV + RM computation in the planetary reference frame """
transmission[obs]['clv_model_stellarRF'] = interpolate1d_grid_nocheck(planet_R_factor,
clv_rm_models['common']['radius_grid'],
clv_rm_models[obs]['clv_rm_model_convolved_normalized'])
transmission[obs]['clv_model_rebinned'] = \
rebin_1d_to_1d(clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
transmission[obs]['clv_model_stellarRF'],
transmission['wave'],
transmission['step'],
preserve_flux=False,
rv_shift=rv_shift_clv,
reference_value=1.)
"Fix to avoid division by zero and border effects"
transmission[obs]['corrected'] = transmission[obs]['rebinned'] / \
transmission[obs]['clv_model_rebinned']
transmission[obs]['corrected_err'] = transmission[obs]['rebinned_err'] / \
transmission[obs]['clv_model_rebinned']
else:
transmission[obs]['clv_model_rebinned'] = np.ones(transmission['size'])
transmission[obs]['corrected'] = transmission[obs]['rebinned']
transmission[obs]['corrected_err'] = transmission[obs]['rebinned_err']
if print_warning:
print(' *** No CLV correction')
if norm_pams['normalize_transmission'] and norm_pams['normalization_model'] == 'polynomial':
""" Continuum normalization preparatory steps:
1) exclusion of regions with lines of interes
2) exclusion of regions with stellar lines
3) Polynomial fit of selected regions
Boolean array initialized to all True values
"""
transmission[obs]['line_exclusion'] = (transmission['wave'] > 0.)
""" Continuum normalization:
1) exclusion of regions with transmission lines under study, now
in the RF of the lines
SKIPPED as we are operating on the full spectrum
"""
""" Continuum normalization:
2) exclusion of regions with planetary lines, taking into account the planetary RV semi-amplitude
"""
#import matplotlib.pyplot as plt
#plt.scatter(transmission['wave'],transmission[obs]['line_exclusion'], c='C0', s=1)
if clv_rm_correction:
stellar_spectrum_rebinned = rebin_1d_to_1d(clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
clv_rm_models['common']['norm_convolved'],
transmission['wave'],
transmission['step'],
rv_shift=rv_shift_clv,
preserve_flux=False)
stellar_spectrum_derivative = first_derivative(transmission['wave'], stellar_spectrum_rebinned)
missing_model = (np.abs(stellar_spectrum_rebinned) < 0.0001)
cont_10perc = np.percentile(np.abs(stellar_spectrum_derivative[~missing_model]), norm_pams['percentile_selection'])
line_exclusion = transmission[obs]['line_exclusion'] \
& (np.abs(stellar_spectrum_derivative) < cont_10perc) \
& (stellar_spectrum_rebinned > norm_pams['lower_threshold'])
if np.sum(line_exclusion) < len(line_exclusion)/200:
transmission[obs]['line_exclusion'] = transmission[obs]['line_exclusion'] \
& ( missing_model | ((np.abs(stellar_spectrum_derivative) < cont_10perc) \
& (stellar_spectrum_rebinned > norm_pams['lower_threshold'])))
else:
transmission[obs]['line_exclusion'] = line_exclusion
#plt.plot(transmission['wave'],stellar_spectrum_rebinned)
#plt.plot(transmission['wave'],stellar_spectrum_derivative, c='C1')
#sel1 = (np.abs(stellar_spectrum_derivative) < cont_10perc)
#sel2 = (stellar_spectrum_rebinned > norm_pams['lower_threshold'])
#plt.scatter(transmission['wave'],transmission[obs]['line_exclusion'], c='C1', s=1)
#plt.scatter(transmission['wave'],sel1 + 0.1, c='C2', s=1)
#plt.scatter(transmission['wave'],sel2 + 0.2, c='C3', s=1)
#plt.ylim(0,1.3)
#plt.show()
elif print_warning:
print(" No stellar synthetic spectrum from CLV models")
print(" some stellar lines may be included in transmission normalization ")
print_warning = False
""" Continuum normalization:
3) Polynomial fit, everything is hard coded now but personalized
options can be implemented easily in the yaml file
"""
selection = transmission[obs]['line_exclusion'] & (
transmission[obs]['corrected'] > np.std(transmission[obs]['corrected']))
transmission[obs]['continuum_coeff'] = \
np.polynomial.chebyshev.chebfit(transmission['wave'][selection],
transmission[obs]['corrected'][selection],
norm_pams['spectra_poly_degree'])
transmission[obs]['continuum'] = np.polynomial.chebyshev.chebval(
transmission['wave'], transmission[obs]['continuum_coeff'])
transmission[obs]['normalized'] = transmission[obs]['corrected'] / transmission[obs]['continuum']
transmission[obs]['normalized_err'] = transmission[obs]['corrected_err'] / \
transmission[obs]['continuum']
#import matplotlib.pyplot as plt
#plt.scatter(transmission['wave'], transmission[obs]['corrected'])
#plt.plot(transmission['wave'], transmission[obs]['continuum'])
#plt.scatter(transmission['wave'][selection], transmission[obs]['corrected'][selection], c='r')
#plt.scatter(transmission['wave'], transmission[obs]['corrected_err']+0.05, c='b')
#plt.scatter(transmission['wave'], transmission[obs]['normalized_err'], c='r')
#plt.show()
#quit()
transmission[obs]['continuum_uncorrected_coeff'] = \
np.polynomial.chebyshev.chebfit(transmission['wave'][selection],
transmission[obs]['rebinned'][selection],
norm_pams['spectra_poly_degree'])
transmission[obs]['continuum_uncorrected'] = np.polynomial.chebyshev.chebval(
transmission['wave'], transmission[obs]['continuum_uncorrected_coeff'])
transmission[obs]['normalized_uncorrected'] = transmission[obs]['rebinned'] / \
transmission[obs]['continuum_uncorrected']
transmission[obs]['normalized_uncorrected_err'] = transmission[obs]['rebinned_err'] / \
transmission[obs]['continuum_uncorrected']
elif norm_pams['normalize_transmission'] and (
norm_pams['normalization_model'] == 'savgol'
or norm_pams['normalization_model'] == 'savitzky-golay'):
print(' ', obs, ' normalization using Savitzky-Golay filter')
transmission[obs]['continuum_coeff'] = None
transmission[obs]['continuum_uncorrected_coeff'] = None
transmission[obs]['continuum'] = savgol_filter(transmission[obs]['corrected'],
window_length=norm_pams['window_length'],
polyorder=norm_pams['polyorder'],
mode=norm_pams['mode'],
cval=norm_pams['cval'])
transmission[obs]['normalized'] = transmission[obs]['corrected'] / transmission[obs]['continuum']
transmission[obs]['normalized_err'] = transmission[obs]['corrected_err'] / \
transmission[obs]['continuum']
transmission[obs]['continuum_uncorrected'] = savgol_filter(transmission[obs]['rebinned'],
window_length=norm_pams['window_length'],
polyorder=norm_pams['polyorder'],
mode=norm_pams['mode'],
cval=norm_pams['cval'])
transmission[obs]['normalized_uncorrected'] = transmission[obs]['rebinned'] / transmission[obs]['continuum_uncorrected']
transmission[obs]['normalized_uncorrected_err'] = transmission[obs]['rebinned_err'] / \
transmission[obs]['continuum_uncorrected']
else:
transmission[obs]['continuum_coeff'] = None
transmission[obs]['continuum'] = np.ones_like(transmission['wave'])
transmission[obs]['normalized'] = transmission[obs]['corrected'].copy()
transmission[obs]['normalized_err'] = transmission[obs]['corrected_err'].copy()
#import matplotlib.pyplot as plt
#plt.scatter(transmission['wave'], transmission[obs]['corrected'])
#plt.plot(transmission['wave'], transmission[obs]['continuum'])
#plt.scatter(transmission['wave'][selection], transmission[obs]['corrected'][selection], c='r')
# plt.show()
transmission[obs]['continuum_uncorrected_coeff'] = None
transmission[obs]['continuum_uncorrected'] = np.ones_like(transmission['wave'])
transmission[obs]['normalized_uncorrected'] = transmission[obs]['rebinned'].copy()
transmission[obs]['normalized_uncorrected_err'] = transmission[obs]['rebinned_err'].copy()
print_warning = False
transm_average = np.zeros([len(lists['transit_full']), transmission['size']])
weights_average = np.zeros([len(lists['transit_full']), transmission['size']])
clvrm_average = np.zeros([len(lists['transit_full']), transmission['size']])
uncorr_average = np.zeros([len(lists['transit_full']), transmission['size']])
for i, obs in enumerate(lists['transit_full']):
transm_average[i, :] = transmission[obs]['normalized'][:]
weights_average[i, :] = 1./(transmission[obs]['normalized_err']**2.)
clvrm_average[i, :] = transmission[obs]['clv_model_rebinned'][:]
uncorr_average[i, :] = transmission[obs]['normalized_uncorrected'][:]
transmission['average'], transmission['sum_weights'] = np.average(
transm_average, axis=0, weights=weights_average, returned=True)
transmission['average_err'] = 1. / np.sqrt(transmission['sum_weights'])
transmission['average_clv_model'], _ = np.average(
clvrm_average, axis=0, weights=weights_average, returned=True)
transmission['average_uncorrected'], _ = np.average(
uncorr_average, axis=0, weights=weights_average, returned=True)
transmission['binned'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False)
transmission['binned_err'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average_err'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False,
is_error=True)
transmission['binned_clv_model'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average_clv_model'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False)
transmission['binned_uncorrected'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average_uncorrected'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False)
transm_average = np.zeros([len(lists['transit_out']), transmission['size']])
weights_average = np.zeros([len(lists['transit_out']), transmission['size']])
for i, obs in enumerate(lists['transit_out']):
transm_average[i, :] = transmission[obs]['normalized'][:]
weights_average[i, :] = 1./(transmission[obs]['normalized_err']**2.)
transmission['average_out'], transmission['sum_weights_out'] = np.average(
transm_average, axis=0, weights=weights_average, returned=True)
transmission['average_out_err'] = 1./np.sqrt(transmission['sum_weights_out'])
transmission['binned_out'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average_out'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False)
transmission['binned_out_err'] = \
rebin_1d_to_1d(transmission['wave'],
transmission['step'],
transmission['average_out_err'],
transmission['binned_wave'],
transmission['binned_step'],
preserve_flux=False,
is_error=True)
#save_to_cpickle('transmission_'+reference+'_processed', processed, config_in['output'], night)
save_to_cpickle(subroutine_name + '_' + reference + '_' + results_selection,
transmission, config_in['output'], night, it_string=it_string)
# Forcing memory deallocation
transmission = None
# Forcing memory deallocation
clv_rm_models = None
def plot_output_transmission(config_in, night_input='', results_input='', reference='planetRF', pca_iteration=-1):
night_dict = from_config_get_nights(config_in)
fullspectrum_dict = from_config_get_fullspectrum_parameters(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
if results_input == '':
results_list = ['user']
else:
results_list = np.atleast_1d(results_input)
clv_rm_correction = fullspectrum_dict.get('clv_rm_correction', True)
os.system('mkdir -p plots')
interactive_plots = from_config_get_interactive_plots(config_in)
for night in night_list:
# Workaround to check if the transmission spectrum has been obtained through PCA iterations
preparation_input = load_from_cpickle('transmission_preparation', config_in['output'], night)
if preparation_input.get('pca_output', False):
if pca_iteration >= 0:
it_string = str(pca_iteration).zfill(2)
else:
it_string = str(preparation_input.get('ref_iteration', 0)).zfill(2)
else:
it_string = ''
preparation_input = None
if clv_rm_correction:
clv_rm_models = load_from_cpickle('clv_rm_models', config_in['output'], night)
for results_selection in results_list:
filename_rad = subroutine_name + '_'+reference+'_'+results_selection
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the analysis"""
try:
#processed = load_from_cpickle('transmission_'+reference+'_processed', config_in['output'], night)
transmission = load_from_cpickle(filename_rad, config_in['output'], night, it_string)
except (FileNotFoundError, IOError):
print()
print("No transmission spectrum in {0:s}, no plots".format(reference))
continue
""" Creation of the color array, based on the BJD of the observations
"""
bjd = []
am = []
for obs in lists['observations']:
bjd.append(transmission[obs]['BJD'] - 2450000.0)
am.append(transmission[obs]['AIRMASS'])
color_cmap = plt.cm.viridis
color_norm = plt.Normalize(vmin=bjd[0], vmax=bjd[-1])
colors = color_cmap(color_norm(np.asarray(bjd)))
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
# commented out because the plot was too cumbersome
for obs in lists['transit_full']:
color = [color_cmap(color_norm(transmission[obs]['BJD'] - 2450000.0))[:-1]]
ax1.scatter(transmission['wave'],
transmission[obs]['normalized'],
c=color, s=1, zorder=3, alpha=0.25)
for obs in lists['transit_out']:
color = [color_cmap(color_norm(transmission[obs]['BJD'] - 2450000.0))[:-1]]
ax2.scatter(transmission['wave'],
transmission[obs]['normalized'],
c=color, s=1, zorder=3, alpha=0.25)
ax1.set_ylim(0.925, 1.075)
ax2.set_xlabel('$\lambda$ [$\AA$]')
ax2.legend(loc=3)
ax1.set_title('Night: {0:s} \n In-transit transmission spectrum in {1:s} \n Solution {2:s}'.format(
night, reference, results_selection))
ax2.set_title('Out-transit transmission spectrum in {0:s}'.format(reference))
try:
ax1.set_xlim(fullspectrum_dict['plot_range'][0], fullspectrum_dict['plot_range'][1])
except:
ax1.set_xlim(transmission['range'][0], transmission['range'][1])
sm = plt.cm.ScalarMappable(cmap=color_cmap, norm=color_norm)
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
output_file = get_filename(filename_rad + '_observations',
config_in['output'], night, it_string=it_string, extension='.pdf')
plt.savefig('plots/'+output_file, bbox_inches='tight', dpi=300)
if interactive_plots:
plt.show()
plt.close()
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
try:
master_out = load_from_cpickle('master_out', config_in['output'], night)
ax2.plot(master_out['wave'],
master_out['rescaled']-0.06,
color='k', zorder=10, label='master-out')
except (FileNotFoundError, IOError):
pass
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
ax2.plot(telluric['template']['input']['wave'],
telluric['template']['input']['flux'] - 0.06,
color='C1', zorder=10, label='telluric')
ax2.plot(telluric['template']['input']['wave'],
(telluric['template']['input']['flux']-1.)*10. + 1. - 0.06,
color='C2', alpha=0.5, zorder=9, label='telluric (x10)')
except (FileNotFoundError, IOError, KeyError):
pass
#master_out = load_from_cpickle('master_out', config_in['output'], night)
# ax1.errorbar(master_out['wave'],
# master_out['rescaled'],
# yerr=master_out['rescaled_err'],
# fmt='.', c='C0', label='master-out ' + night)
ax1.errorbar(transmission['wave'],
transmission['average'],
yerr=transmission['average_err'],
fmt='ko', ms=1, zorder=5, alpha=0.25)
ax1.errorbar(transmission['binned_wave'],
transmission['binned'],
yerr=transmission['binned_err'],
fmt='ro', ms=4, lw=2, zorder=10)
ax2.errorbar(transmission['wave'],
transmission['average_out'],
yerr=transmission['average_out_err'],
fmt='ko', ms=1, zorder=5, alpha=0.25, label='average')
ax2.errorbar(transmission['binned_wave'],
transmission['binned_out'],
yerr=transmission['binned_out_err'],
fmt='ro', ms=4, lw=2, zorder=10, label='binned average')
ax1.set_ylim(0.99, 1.01)
ax2.set_xlabel('$\lambda$ [$\AA$]')
ax2.legend(loc=3)
ax1.set_title('Night: {0:s} \n In-transit transmission spectrum in {1:s} \n Solution {2:s}'.format(
night, reference, results_selection))
ax2.set_title('Out-transit transmission spectrum in {0:s}'.format(reference))
sm = plt.cm.ScalarMappable(cmap=color_cmap, norm=color_norm)
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
try:
ax1.set_xlim(fullspectrum_dict['plot_range'][0], fullspectrum_dict['plot_range'][1])
except:
ax1.set_xlim(transmission['range'][0], transmission['range'][1])
#ax1.set_xlim(config_in['master-out']['wavelength_range'][0], config_in['master-out']['wavelength_range'][1])
output_file = get_filename(filename_rad + '_binned',
config_in['output'], night, it_string=it_string, extension='.pdf')
plt.savefig('plots/'+output_file, bbox_inches='tight', dpi=300)
if interactive_plots:
plt.show()
plt.close()
if not clv_rm_correction:
continue
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
# commented out because the plot was too cumbersome
for obs in lists['transit_full']:
color = [color_cmap(color_norm(transmission[obs]['BJD'] - 2450000.0))[:-1]]
ax1.plot(clv_rm_models['common']['wave'],
transmission[obs]['clv_model_stellarRF'],
zorder=3, alpha=0.25)
ax1.scatter(transmission['wave'],
transmission[obs]['clv_model_rebinned'],
c=color, s=1, zorder=10, alpha=0.5)
for obs in lists['transit_out']:
color = [color_cmap(color_norm(transmission[obs]['BJD'] - 2450000.0))[:-1]]
ax2.plot(clv_rm_models['common']['wave'],
transmission[obs]['clv_model_stellarRF'],
zorder=3, alpha=0.25)
ax2.scatter(transmission['wave'],
transmission[obs]['clv_model_rebinned'],
c=color, s=1, zorder=10, alpha=0.5)
ax2.set_xlabel('$\lambda$ [$\AA$]')
ax2.legend(loc=3)
ax1.set_title('Night: {0:s} \n CLV-RM correction in {1:s} \n Solution {2:s}'.format(
night, reference, results_selection))
ax2.set_title('Out-transit transmission spectrum in {0:s}'.format(reference))
try:
ax1.set_xlim(fullspectrum_dict['plot_range'][0], fullspectrum_dict['plot_range'][1])
except:
ax1.set_xlim(transmission['range'][0], transmission['range'][1])
sm = plt.cm.ScalarMappable(cmap=color_cmap, norm=color_norm)
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
output_file = get_filename(filename_rad + '_clv_rm_models',
config_in['output'], night, it_string=it_string, extension='.pdf')
plt.savefig('plots/'+output_file, bbox_inches='tight', dpi=300)
if interactive_plots:
plt.show()
plt.close()
| 39,983 | 48.917603 | 146 | py |
SLOPpy | SLOPpy-main/SLOPpy/transmission_spectrum_preparation.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.plot_subroutines import *
from scipy.interpolate import UnivariateSpline
from scipy.signal import savgol_filter
__all__ = ['compute_transmission_spectrum_preparation',
'plot_transmission_spectrum_preparation']
def compute_transmission_spectrum_preparation(config_in):
subroutine_name = 'transmission_spectrum_preparation'
night_dict = from_config_get_nights(config_in)
for night in night_dict:
try:
preparation = load_from_cpickle('transmission_preparation',
config_in['output'],
night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
if config_in['master-out'].get('use_composite', False):
master_out = load_from_cpickle('master_out_composite', config_in['output'], night)
print(' Using composite master-out from all nights')
else:
master_out = load_from_cpickle('master_out', config_in['output'], night)
if config_in['master-out'].get('use_smoothed', False):
master_out['rescaled'] = master_out['smoothed']
master_out['rescaled_err'] = master_out['smoothed_err']
print(' Using smoothed master-out')
preparation = {
'subroutine': subroutine_name,
}
for obs in lists['observations']:
preparation[obs] = {}
preparation[obs]['master_out'] = {}
preparation[obs]['wave'] = input_data[obs]['wave'] #Added for plotting purpose only
""" Step 1+2): bring back the master-out to the ORF and rebin the 1D master-out to the 2D observation scale"""
preparation[obs]['master_out']['rebinned'] = \
rebin_1d_to_2d(master_out['wave'],
master_out['step'],
master_out['rescaled'],
input_data[obs]['wave'],
input_data[obs]['step'],
rv_shift=-observational_pams[obs]['rv_shift_ORF2SRF_mod'],
preserve_flux=False)
preparation[obs]['master_out']['rebinned_err'] = \
rebin_1d_to_2d(master_out['wave'],
master_out['step'],
master_out['rescaled_err'],
input_data[obs]['wave'],
input_data[obs]['step'],
rv_shift=-observational_pams[obs]['rv_shift_ORF2SRF_mod'],
preserve_flux=False,
is_error=True)
for order in range(0, observational_pams['n_orders']):
preparation[obs]['master_out']['rebinned'][order, :], \
preparation[obs]['master_out']['rebinned_err'][order, :], \
_ = \
replace_values_errors_with_interpolation_1d(preparation[obs]['master_out']['rebinned'][order, :],
preparation[obs]['master_out']['rebinned_err'][order, :],
less_than=0.001, greater_than=5.0000)
#replace_values_errors(preparation[obs]['master_out']['rebinned'],
# preparation[obs]['master_out']['rebinned_err'],
# threshold=0.0001, replacement=1.0000)
""" Step 3): obtain the unscaled transmission spectrum for this observation """
preparation[obs]['ratio'] = input_data[obs]['e2ds']/\
preparation[obs]['master_out']['rebinned']
preparation[obs]['ratio_err'] = preparation[obs]['ratio'] * \
np.sqrt((input_data[obs]['e2ds_err']/
input_data[obs]['e2ds'])**2 +
(preparation[obs]['master_out']['rebinned_err']/
preparation[obs]['master_out']['rebinned'])**2)
preparation[obs]['ratio_precleaning'] = preparation[obs]['ratio'].copy()
preparation[obs]['ratio_precleaning_err'] = preparation[obs]['ratio_err'].copy()
if night_dict[night].get('spline_residuals', True):
print()
print(' Cleaning for telluric residuals with Univariate Spline - threshold about 5%')
# cleaning using spline_univariate
for order in range(0, observational_pams['n_orders']):
obs_reference = lists['observations'][0]
len_y = len(lists['observations'])
len_x = len(preparation[obs_reference]['wave'][order, :])
time_from_transit = np.empty(len_y, dtype=np.double)
data_array = np.empty([len_y, len_x], dtype=np.double)
median_array = np.empty(len_y, dtype=np.double)
for i_obs, obs in enumerate(lists['observations']):
time_from_transit[i_obs] = input_data[obs]['BJD'] - observational_pams['time_of_transit']
median_array[i_obs] = np.median(preparation[obs]['ratio_precleaning'][order ,:])
data_array[i_obs, :] = preparation[obs]['ratio_precleaning'][order ,:]/median_array[i_obs]
#wave = preparation[obs]['wave'][order, :]
res = data_array * 1.
val = np.empty([len_y, len_x], dtype=np.double)
for ii in range(0, len_x):
spl = UnivariateSpline(time_from_transit, data_array[:, ii])
val[:,ii] = spl(time_from_transit)
res[:,ii] -= val[:,ii]
res[:,ii] /= val[:,ii]
sel = np.abs(res) > 0.05
for i_obs, obs in enumerate(lists['observations']):
if np.sum(sel[i_obs]) > 0:
preparation[obs]['ratio'][order, sel[i_obs]] = val[i_obs, sel[i_obs]] * median_array[i_obs]
preparation[obs]['ratio_err'][order, sel[i_obs]] *= 10.
else:
print()
print(' Cleaning for telluric residuals NOT performed')
for obs in lists['observations']:
preparation[obs]['deblazed'] = preparation[obs]['ratio'] / calib_data['blaze'] / (input_data[obs]['step'] / np.median(input_data[obs]['step']))
preparation[obs]['deblazed_err'] = preparation[obs]['ratio_err'] / calib_data['blaze'] / (input_data[obs]['step'] / np.median(input_data[obs]['step']))
if not config_in['settings'].get('full_output', False):
del preparation[obs]['master_out']
else:
# added for plotting purposes only
preparation[obs]['rescaling'], \
preparation[obs]['rescaled'], \
preparation[obs]['rescaled_err'] = perform_rescaling(
preparation[obs]['wave'],
preparation[obs]['deblazed'],
preparation[obs]['deblazed_err'],
observational_pams['wavelength_rescaling'])
save_to_cpickle('transmission_preparation', preparation, config_in['output'], night)
print()
""" Keep going from here after preparation, unless the subroutines has been called just
to preform the data preparation step
"""
def plot_transmission_spectrum_preparation(config_in, night_input=''):
subroutine_name = 'transmission_spectrum_preparation'
night_dict = from_config_get_nights(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
# ! To be removed when testing is done
# ! This plots do not make any sense anymore
input_data = retrieve_observations(config_in['output'], night, lists['observations'])
""" Retrieving the analysis"""
try:
preparation = load_from_cpickle('transmission_preparation', config_in['output'], night)
except:
print("No transmission spectrum results, no plots")
print()
continue
#from SLOPpy.subroutines.lines_fit_functions import logprob_case12
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
len_y = len(lists['observations'])
len_x = 4096
order= 11
time_from_transit = np.empty(len_y, dtype=np.double)
plot_data = np.empty([len_y, len_x], dtype=np.double)
for i_obs, obs in enumerate(lists['observations']):
time_from_transit[i_obs] = input_data[obs]['BJD'] - observational_pams['time_of_transit']
plot_data[i_obs, :] = preparation[obs]['deblazed'][order ,:]/ np.median(preparation[obs]['deblazed'][order ,:])
wave = preparation[obs]['wave'][order, :]
wave_meshgrid, time_meshgrid = np.meshgrid(wave, time_from_transit)
cmap = plt.get_cmap('coolwarm')
#levels = MaxNLocator(nbins=15).tick_values(
# plot_data.min(), plot_data.max())
levels = MaxNLocator(nbins=21).tick_values(0.90, 1.10)
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
plt.figure(figsize=(15, 10))
plt.title('Transmission map in observer reference frame\n {0:s}'.format(night))
PCF = plt.contourf(wave_meshgrid, time_meshgrid,
plot_data, levels=levels, cmap=cmap)
cbar = plt.colorbar(PCF)
cbar.ax.set_ylabel('Intensity')
plt.show()
if night_dict[night].get('spline_residuals', True):
res = plot_data * 1.
from scipy.interpolate import UnivariateSpline
for ii in range(0,4096):
spl = UnivariateSpline(time_from_transit, plot_data[:, ii])
val = spl(time_from_transit)
res[:,ii] -= val
res[:,ii] /= val
cmap = plt.get_cmap('coolwarm')
levels = MaxNLocator(nbins=10).tick_values(-0.05, 0.05)
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
plt.figure(figsize=(15, 10))
plt.title('Residuals after dividing by UnivariateSpline Spline\n {0:s}'.format(night))
PCF = plt.contourf(wave_meshgrid, time_meshgrid,
res, levels=levels, cmap=cmap)
cbar = plt.colorbar(PCF)
cbar.ax.set_ylabel('Intensity')
plt.show()
""" Creation of the color array, based on the BJD of the observations
"""
colors_properties, colors_plot, colors_scatter = make_color_array_matplotlib3(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(1, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax1.set_ylim(0.90, 1.10)
#ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
for obs in lists['transit_in']:
#preparation[obs]['rescaling'], \
#preparation[obs]['rescaled'], \
#preparation[obs]['rescaled_err'] = perform_rescaling(
# preparation[obs]['wave'],
# preparation[obs]['deblazed'] / (input_data[obs]['step'] / np.median(input_data[obs]['step'])),
# preparation[obs]['deblazed_err'] / (input_data[obs]['step'] / np.median(input_data[obs]['step'])),
# observational_pams['wavelength_rescaling'])
preparation[obs]['rescaling'], \
preparation[obs]['rescaled'], \
preparation[obs]['rescaled_err'] = perform_rescaling(
preparation[obs]['wave'],
preparation[obs]['deblazed'],
preparation[obs]['deblazed_err'],
observational_pams['wavelength_rescaling'])
ax1.scatter(preparation[obs]['wave'],
preparation[obs]['rescaled'],
s=1, alpha=0.25,
color=colors_plot['mBJD'][obs])
sm = plt.cm.ScalarMappable(cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 13,646 | 42.32381 | 163 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_molecfit_v1_preparation.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_telluric_molecfit_v1_preparation",
"plot_telluric_molecfit_v1_preparation"]
def compute_telluric_molecfit_v1_preparation(config_in):
"""
Lazy workaround
:param config_in:
:param kwargs:
:return:
"""
night_dict = from_config_get_nights(config_in)
molecfit_dict = from_config_get_molecfit(config_in)
for night in night_dict:
try:
tellprep = load_from_cpickle('telluric_molecfit_preparation', config_in['output'], night)
continue
except:
print()
print("compute_telluric_molecfit_preparation Night: ", night)
print()
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
tellprep = {
'work_dir': config_in['output'] + '_molecfit_' + night,
'include': {}
}
"""
We store all the molecfit files in a subdirectory
We save the path of the main directory to a temporary file
"""
os.system('mkdir -p ' + tellprep['work_dir'])
os.system('mkdir -p ' + tellprep['work_dir'] + '/output/')
"""
Creation of the include files
"""
"""
includes_spans_ORF: wavelength ranges _with_ telluric lines, in the ORF
includes_spans_SRF: wavelength ranges _without_ stellar lines and broadly
overlapping with telluric ranges, in the SRF
the two lists must have the same number of columns, with precise correspondence
"""
tellprep['include']['spans_telluric'] = np.genfromtxt(molecfit_dict['include_telluric'])
tellprep['include']['spans_stellar_SRF'] = np.genfromtxt(molecfit_dict['include_stellar'])
#print()
#print(tellprep['include']['spans_telluric'])
#print()
#print(tellprep['include']['spans_stellar_SRF'])
""" shift the stellar wavelength ranges into ORF """
tellprep['include']['rv_shift_SRF2ORF'] = -observational_pams['BERV_avg'] + observational_pams['RV_star'][
'RV_systemic']
#print()
#print(tellprep['include']['rv_shift_SRF2ORF'])
#print(observational_pams['BERV_avg'])
#print(observational_pams['RV_star']['RV_systemic'])
#print()
#print()
tellprep['include']['spans_stellar'] = tellprep['include']['spans_stellar_SRF']\
* (tellprep['include']['rv_shift_SRF2ORF']
/ (299792458. / 1000.000) + 1.00000)
#print()
#print(tellprep['include']['spans_stellar'])
""" Selecting the overlapping regions between the two lists: we want telluric regions that are not contaminated
by stellar lines,
"""
sel_lower = (tellprep['include']['spans_stellar'][:, 0] > tellprep['include']['spans_telluric'][:, 0])
sel_upper = (tellprep['include']['spans_stellar'][:, 1] < tellprep['include']['spans_telluric'][:, 1])
""" Final list in the ORF is built"""
tellprep['include']['selected'] = tellprep['include']['spans_telluric'].copy()
tellprep['include']['selected'][sel_lower, 0] = tellprep['include']['spans_stellar'][sel_lower, 0]
tellprep['include']['selected'][sel_upper, 1] = tellprep['include']['spans_stellar'][sel_upper, 1]
#print()
#print(tellprep['include']['selected'])
""" Molecfit line list must be given in vacuum wavelength, even if the stellar spectra is in air wavelength
conversion from air to vacuum for include file preparation
where s = 10000 / lambda air and the conversion is: lambda_vac = lambda_air * n.
http://www.astro.uu.se/valdwiki/Air-to-vacuum%20conversion
"""
s2 = (10000. / tellprep['include']['selected']) ** 2
n = 1 + 0.00008336624212083 + 0.02408926869968 / (130.1065924522 - s2) + 0.0001599740894897 / (
38.92568793293 - s2)
tellprep['include']['vacuum'] = tellprep['include']['selected'] * n / 10000.
fileout = open('./' + tellprep['work_dir'] + '/include_' + night + '.dat', 'w')
for i_s, i_e in zip(tellprep['include']['vacuum'][:, 0], tellprep['include']['vacuum'][:, 1]):
fileout.write('{0:12.8f} {1:12.8f}\n'.format(i_s, i_e))
fileout.close()
#quit()
save_to_cpickle('telluric_molecfit_preparation', tellprep, config_in['output'], night)
def plot_telluric_molecfit_v1_preparation(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
print("plot_telluric_template Night: ", night)
| 5,343 | 38.007299 | 119 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_molecfit_v1_coadd.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.telluric_molecfit_v1_preparation import compute_telluric_molecfit_v1_preparation
__all__ = ["compute_telluric_molecfit_v1_coadd",
"plot_telluric_molecfit_v1_coadd"]
subroutine_name = 'telluric_molecfit_v1_coadd'
def compute_telluric_molecfit_v1_coadd(config_in):
"""
Lazy workaround
:param config_in:
:param kwargs:
:return:
"""
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
molecfit_dict = from_config_get_molecfit(config_in)
compute_telluric_molecfit_v1_preparation(config_in)
for night in night_dict:
instrument_name = night_dict[night]['instrument']
template_dict = instrument_dict[instrument_name]['telluric_template']
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
print(' instrument :', instrument_name)
print()
tellprep = load_from_cpickle('telluric_molecfit_preparation', config_in['output'], night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': 'telluric_molecfit',
'n_orders': 0,
'n_pixels': 0,
}
telluric = {
'subroutine': 'telluric_molecfit',
'reference_frame': 'observer'
}
processed['airmass_ref'] = 0.000
processed['telluric'] = {}
processed['rebin'] = {}
processed['work_dir'] = tellprep['work_dir']
"""
Molecfit works on pixel grid, so we must ensure that the spectra are rebinned always on the same wavelength
scale and same wavelength step. We use local arrays for this purpose
"""
processed['rebin']['wave'] = np.arange(input_data['coadd']['wavelength_range'][0],
input_data['coadd']['wavelength_range'][1],
molecfit_dict['rebinning_step'],
dtype=np.double)
processed['rebin']['size'] = np.size(processed['rebin']['wave'])
processed['rebin']['step'] = np.ones(processed['rebin']['size'],
dtype=np.double) * molecfit_dict['rebinning_step']
processed['rebin'] = {
'wave': input_data['coadd']['wave'],
'size': input_data['coadd']['size'],
'step': input_data['coadd']['step'],
}
n_coadd = 0
n_reference = 0
texp_cumulated = 0.00
texp_total = 0.000
coadd_list = []
# Computing the total integration time
for n_obs, obs in enumerate(lists['observations']):
texp_total += input_data[obs]['EXPTIME']
print(' Writing data and configuration files for molecfit+calctrans')
print()
# There must be a more elegant way to do this, but I'm, not aware of it
for n_obs, obs in enumerate(lists['observations']):
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
""" e2ds spectra are rescaled and then rebinned while keeping them in the Observer Reference Frame"""
processed[obs]['e2ds_rescaling'], processed[obs]['e2ds_rescaled'], processed[obs]['e2ds_rescaled_err'] = \
perform_rescaling(input_data[obs]['wave'],
input_data[obs]['e2ds'],
input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['rebin_ORF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
processed[obs]['e2ds_rescaled'],
calib_data['blaze'],
processed['rebin']['wave'],
processed['rebin']['step'],
preserve_flux=preserve_flux,
rv_shift=0.00)
""" Molecfit analysis is skipped if the telluric correction has been computed already"""
# if os.path.isfile('./molecfit_'+night +'/output/'+obs+'_ORF_s1d_TAC.dat'):
# print(' molecfit+calctrans results for ' + obs + ' already available')
# continue
""" the spectra is saved as an ASCII file in a format suitable for molecfit """
fileout = open('./' + processed['work_dir'] + '/' + obs + '_ORF_s1d.dat', 'w')
for w, f in zip(processed['rebin']['wave'], processed[obs]['rebin_ORF']):
fileout.write('{0:12.6f} {1:12.6f} \n'.format(w, f))
fileout.close()
"""
processed[obs]['rebin_SRF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
processed[obs]['e2ds_rescaled'],
calib_data['blaze'],
processed['rebin']['wave'],
processed['rebin']['step'],
preserve_flux=preserve_flux,
rv_shift = observational_pams[obs]['rv_shift_ORF2SRF'])
fileout = open('./' + processed['work_dir'] + '/' + obs + '_SRF_s1d.dat','w')
for w, f in zip(processed['rebin']['wave'], processed[obs]['rebin_SRF']):
fileout.write('{0:12.6f} {1:12.6f} \n'.format(w, f))
fileout.close()
"""
""" spectra is coadded to increase the SNR of the spectrum analyzed by molecfit """
if n_coadd == 0:
reference_name = 'coadded_{0:03d}'.format(n_reference)
rebin_coadd = processed[obs]['rebin_ORF'].copy()
molecfit_pams = {
'MJD': input_data[obs]['MJD'],
'UTC': input_data[obs]['UTC'],
'ELEVATION': input_data[obs]['ELEVATION'],
'HUMIDITY': input_data[obs]['HUMIDITY'],
'PRESSURE': input_data[obs]['PRESSURE'],
'TEMPERATURE_EN': input_data[obs]['TEMPERATURE_EN'],
'TEMPERATURE_M1': input_data[obs]['TEMPERATURE_M1']}
coadded_files = open('./' + processed['work_dir'] + '/' + reference_name + '_files.list', 'w')
coadd_list.append(reference_name)
else:
rebin_coadd += processed[obs]['rebin_ORF']
molecfit_pams['MJD'] += input_data[obs]['MJD']
molecfit_pams['UTC'] += input_data[obs]['UTC']
molecfit_pams['ELEVATION'] += input_data[obs]['ELEVATION']
molecfit_pams['HUMIDITY'] += input_data[obs]['HUMIDITY']
molecfit_pams['PRESSURE'] += input_data[obs]['PRESSURE']
molecfit_pams['TEMPERATURE_EN'] += input_data[obs]['TEMPERATURE_EN']
molecfit_pams['TEMPERATURE_M1'] += input_data[obs]['TEMPERATURE_M1']
n_coadd += 1
coadded_files.write(obs + '\n')
texp_cumulated += input_data[obs]['EXPTIME']
# TODO: input from configuration file for molecfit installation path
bash_script = open('./' + processed['work_dir'] + '/molecfit_exec_' + obs + '.source', 'w')
bash_script.write('#!/bin/bash \n')
bash_script.write('export TMPDIR=$PWD\n')
bash_script.write('echo " " executing calctrans on ' + obs + ' \n')
bash_script.write(molecfit_dict['installation_path'] + 'calctrans ' +
obs + '.par > ' + obs + '_calctrans.log\n')
bash_script.close()
write_molecfit_v1_par('./' + processed['work_dir'] + '/' + obs + '.par',
obs + '_ORF_s1d.dat',
reference_name,
'include_' + night + '.dat',
input_data[obs]['molecfit'],
input_data[obs])
if (texp_cumulated >= molecfit_dict['exptime_coadd'] and
texp_total-texp_cumulated >= molecfit_dict['exptime_coadd']) \
or n_obs == len(lists['observations'])-1:
coadded_files.close()
print(' Coadded spectrum: ', n_reference)
rebin_coadd /= n_coadd
""" the spectra is saved as an ASCII file in a format suitable for molecfit """
fileout = open('./' + processed['work_dir'] + '/' + reference_name + '_ORF_s1d.dat', 'w')
for w, f in zip(processed['rebin']['wave'], rebin_coadd):
fileout.write('{0:12.6f} {1:12.6f} \n'.format(w, f))
fileout.close()
""" Average of the observational parameters """
for key in molecfit_pams:
molecfit_pams[key] /= n_coadd
molecfit_pams['GEOELEV'] = input_data[obs]['GEOELEV']
molecfit_pams['GEOLONG'] = input_data[obs]['GEOLONG']
molecfit_pams['GEOLAT'] = input_data[obs]['GEOLAT']
# TODO: input from configuration file for molecfit installation path
bash_script = open('./' + processed['work_dir'] + '/molecfit_exec_' + reference_name + '.source', 'w')
bash_script.write('#!/bin/bash \n')
bash_script.write('export TMPDIR=$PWD\n')
bash_script.write('echo " " executing molecfit+calctrans on ' + reference_name + ' \n')
bash_script.write(molecfit_dict['installation_path'] + 'molecfit ' +
reference_name + '.par > ' + reference_name + '_molecfit.log\n')
bash_script.write(molecfit_dict['installation_path'] + 'calctrans ' +
reference_name + '.par > ' + reference_name + '_calctrans.log\n')
bash_script.close()
# TODO: cycle with variation in UTC until molecfit exits succesfully
#
# while True:
#
# # write parameter file
# # execute molecfit
# # check if file _tac.fits has been written (= successful run)
# if cond:
# break
#
utc_reference = molecfit_pams['UTC'] * 1.
utc_incremental = True
utc_increase = 500.
while True:
if os.path.exists('./' + processed['work_dir'] + '/output/' + reference_name + '_tac.asc'):
print(' molecfit for ' + reference_name + ' previously completed')
print()
break
write_molecfit_v1_par('./' + processed['work_dir'] + '/' + reference_name + '.par',
reference_name + '_ORF_s1d.dat',
reference_name,
'include_' + night + '.dat',
input_data[obs]['molecfit'],
molecfit_pams)
os.system('cd ' + processed['work_dir'] + '/ && . ./molecfit_exec_' + reference_name + '.source')
if os.path.exists('./' + processed['work_dir'] + '/output/' + reference_name + '_tac.asc'):
print(' molecfit for ' + reference_name + ' successfully completed')
print()
break
if molecfit_pams['UTC'] > 86400 - utc_increase:
utc_incremental = False
molecfit_pams['UTC'] = utc_reference
if utc_incremental:
molecfit_pams['UTC'] += utc_increase
print(' molecfit for {0:s} crashed, UTC increased from {1:6.0f} to {2:6.0f} '.format(
reference_name, utc_reference, molecfit_pams['UTC']))
else:
molecfit_pams['UTC'] -= utc_increase
print(' molecfit for {0:s} crashed, UTC decreased from {1:6.0f} to {2:6.0f} '.format(
reference_name, utc_reference, molecfit_pams['UTC']))
n_coadd = 0
n_reference += 1
texp_total -= texp_cumulated
texp_cumulated = 0.0
"""
Execute molecfit runs on all the coadded spectra
"""
# for reference_name in coadd_list:
# os.system('cd molecfit_' + night + '/ && . ./molecfit_exec_' + reference_name + '.source')
#
print()
print(' molecfit completed')
for obs in lists['observations']:
if os.path.exists('./' + processed['work_dir'] + '/output/' + obs + '_ORF_s1d_TAC.dat'):
print(' skipping calctrans execution for observation ' + obs)
else:
print(' calctrans execution for observation ' + obs)
os.system('cd ' + processed['work_dir'] + '/ && . ./molecfit_exec_' + obs + '.source')
print()
print(' calctrans completed')
for n_obs, obs in enumerate(lists['observations']):
telluric[obs] = {}
""" Loading the telluric spectrum from the output directory of molecfit """
telluric_molecfit = np.genfromtxt(
'./' + processed['work_dir'] + '/output/'+obs+'_ORF_s1d_TAC.dat', usecols=2)
""" rebinning onto the e2ds wave scale"""
if molecfit_dict.get('fix_telluric', True):
print(' fix_telluric applied - temporary workaround for line at 5885.97 A [ORF]')
line_boundaries = [5885.74, 5886.21]
sel = (processed['rebin']['wave'] > line_boundaries[0]) \
& (processed['rebin']['wave'] < line_boundaries[1])
tell_cont = np.amax(telluric_molecfit[sel])
telluric_molecfit[sel] = (telluric_molecfit[sel] - tell_cont) / 2.0 + tell_cont
telluric[obs]['spectrum'] = \
rebin_1d_to_2d(processed['rebin']['wave'],
processed['rebin']['step'],
telluric_molecfit,
input_data[obs]['wave'],
input_data[obs]['step'],
preserve_flux=False)
try:
telluric[obs]['spectrum'] = np.nan_to_num(nan=1.0, posinf=1.0, neginf=1.0)
except:
temp = ~(np.isfinite(telluric[obs]['spectrum']))
telluric[obs]['spectrum'][temp] = 1.0
sel = telluric[obs]['spectrum'] < 0.0001
telluric[obs]['spectrum'][sel] = 1.0
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
" for compatibilty to some plots, even if it doesn't make any sense"
telluric[obs]['airmass_ref'] = 0.000
telluric[obs]['spectrum_noairmass'] = np.power(telluric[obs]['spectrum'],
telluric[obs]['airmass_ref'] - input_data[obs]['AIRMASS'])
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][telluric[obs]['null']] = 1.0
# we just copy the spectrum file, it's it's a model itself
telluric[obs]['spline'] = telluric[obs]['spectrum'].copy()
processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_rescaled'] / telluric[obs]['spectrum']
processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_rescaled_err'] / telluric[obs]['spectrum']
save_to_cpickle('telluric', telluric, config_in['output'], night)
save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
def plot_telluric_molecfit_v1_coadd(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
# plt.scatter(rescaling_array, computed_std, c='C0', zorder=1)
# plt.scatter(sel_factor, sel_stdev, c='C1', zorder=2)
# plt.plot(rescaling_array, np.polyval(coeff, rescaling_array))
# plt.plot(rescaling_array, 2*rescaling_array*coeff[0] + coeff[1] )
# plt.plot()
print("plot_telluric_template Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('telluric_processed', config_in['output'], night)
telluric = load_from_cpickle('telluric', config_in['output'], night)
except:
print()
print("No telluric correction, no plots")
continue
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
colors, cmap, line_colors = make_color_array(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
cbax1 = plt.subplot(gs[:, 1])
lift_spectrum = 0.25
for i, obs in enumerate(lists['observations']):
color_array = cmap(i / len(lists['observations']))
for order in range(0, processed[obs]['n_orders']):
if order == 0 and i == 0:
ax1.plot(input_data[obs]['wave'][order, :],
processed[obs]['e2ds_rescaled'][order, :],
c=color_array, lw=1, alpha=0.5, label='uncorrected')
ax1.scatter(input_data[obs]['wave'][order, :],
processed[obs]['e2ds_corrected'][order, :],
s=1, c=np.atleast_2d(color_array), label='corrected')
else:
ax1.plot(input_data[obs]['wave'][order, :],
processed[obs]['e2ds_rescaled'][order, :],
c=color_array, lw=1, alpha=0.5)
ax1.scatter(input_data[obs]['wave'][order, :],
processed[obs]['e2ds_corrected'][order, :],
s=1, c=np.atleast_2d(color_array))
# ax1.plot(processed[obs]['wave'][order, :],
# e2ds_rescaled[order, :]+lift_spectrum,
# c=color_array, lw=1, alpha=0.5)
# ax1.scatter(processed[obs]['wave'][order, :],
# e2ds_rescaled_corrected_spline[order, :]+lift_spectrum,
# s=1, c=np.atleast_2d(color_array))
ax2.plot(input_data[obs]['wave'][order, :],
telluric[obs]['spectrum'][order, :],
c=color_array)
ax2.axhline(1.00, c='k')
# ax2.plot(processed[obs]['wave'][order, :],
# telluric[obs]['spline'][order, :]+lift_spectrum,
# c=color_array)
# ax2.axhline(1.00+lift_spectrum, c='k')
# ax2.plot(input_data['coadd']['wave'],telluric['stellarRF']['spline_eval']+0.1,c='k')
# ax2.scatter(input_data['coadd']['wave'],telluric['stellarRF']['spectrum']+0.1,c='r', s=2)
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
try:
instrument = night_dict[night]['instrument']
comparison_file = config_in['instruments'][instrument]['telluric_comparison']
comparison_data = np.genfromtxt(comparison_file, skip_header=1)
if comparison_data[0, 0] < 1000.0:
nm2Ang = 10.
else:
nm2Ang = 1.
ax1.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
ax2.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
except:
pass
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
| 22,214 | 44.244399 | 118 | py |
SLOPpy | SLOPpy-main/SLOPpy/spectra_lightcurve.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.rebin_subroutines import *
from SLOPpy.subroutines.clv_rm_subroutines import *
from SLOPpy.subroutines.math_functions import *
from astropy.convolution import convolve, Box1DKernel
__all__ = ['compute_spectra_lightcurve',
'compute_spectra_lightcurve_clv_rm_correction',
'plot_spectra_lightcurve',
'plot_spectra_lightcurve_clv_rm_correction']
def compute_spectra_lightcurve_clv_rm_correction(config_in, lines_label):
compute_spectra_lightcurve(config_in, lines_label)
def plot_spectra_lightcurve_clv_rm_correction(config_in, night_input=''):
plot_spectra_lightcurve(config_in, night_input)
subroutine_name = 'spectra_lightcurve'
sampler_name = 'emcee'
def compute_spectra_lightcurve(config_in, lines_label):
results_list_default = ['user',
'mcmc_night_MED',
'mcmc_night_MAP',
'mcmc_global_MED',
'mcmc_global_MAP']
append_list = ['', '_uncorrected', '_clv_model']
do_average_instead_of_sum = True
night_dict = from_config_get_nights(config_in)
#instrument_dict = from_config_get_instrument(config_in)
#system_dict = from_config_get_system(config_in)
planet_dict = from_config_get_planet(config_in)
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label]
clv_rm_correction = lines_dict.get('clv_rm_correction', True)
# from_config_get_transmission_lightcurve(config_in)
#lightcurve_dict = from_config_get_transmission_lightcurve(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
""" Using the MCMC fit range to define the transmission spectrum region """
shared_selection = (shared_data['coadd']['wave'] >= lines_dict['range'][0]) \
& (shared_data['coadd']['wave'] < lines_dict['range'][1])
preparation_template = {
'subroutine': subroutine_name,
'range': lines_dict['range'],
'wave': shared_data['coadd']['wave'][shared_selection],
'step': shared_data['coadd']['step'][shared_selection],
'size': np.int(np.sum(shared_selection)),
}
if 'full_transit_duration' in planet_dict:
full_transit_duration = planet_dict['total_transit_duration'][0]
else:
full_transit_duration = planet_dict['transit_duration'][0]
if 'total_transit_duration' in planet_dict:
total_transit_duration = planet_dict['total_transit_duration'][0]
else:
total_transit_duration = planet_dict['transit_duration'][0]
"""
The transit phase [0-1] is divided in N (=5) bins. Two arrays are computed:
- transit_in_bins: array with the boundaries of the bins, size=N+1
- transit_in_step: average size of the bin, size=1
"""
transit_in_bins = np.linspace(
-total_transit_duration/2./planet_dict['period'][0],
total_transit_duration/2./planet_dict['period'][0],
6
)
transit_full_bins = np.linspace(
-full_transit_duration/2./planet_dict['period'][0],
full_transit_duration/2./planet_dict['period'][0],
6
)
transit_in_step = np.average(transit_in_bins[1:]-transit_in_bins[:-1])
transit_full_step = np.average(transit_full_bins[1:]-transit_full_bins[:-1])
""" Preparation stage - rebinning of spectra """
for night in night_dict:
preparation = None # Free up memory
try:
preparation = load_from_cpickle(subroutine_name + '_preparation', config_in['output'], night, lines_label)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name+ '_preparation', night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name+ '_preparation', night, 'Computing'))
print()
preparation = preparation_template.copy()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations( config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
for n_obs, obs in enumerate( lists['observations']):
preparation[obs] = {}
preparation[obs]['rescaling'], \
preparation[obs]['rescaled'], \
preparation[obs]['rescaled_err'] = perform_rescaling(
input_data[obs]['wave'], input_data[obs]['e2ds'], input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
preserve_flux = input_data[obs].get('absolute_flux', True)
preparation[obs]['rebinned'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds'],
calib_data['blaze'],
preparation['wave'],
preparation['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'])
preparation[obs]['rebinned_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds_err'],
calib_data['blaze'],
preparation['wave'],
preparation['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'])
save_to_cpickle(subroutine_name+'_preparation', preparation, config_in['output'], night, lines_label)
# Free up memory
calib_data = None
input_data = None
observational_pams = None
""" Actual computation of spectral lightcurve """
# doublet sodium in the lab reference frame
"""
C stands for central
"""
C_bands = {}
for passband_key, passband_val in lines_dict['passbands'].items():
C_bands[passband_key] = {}
for line_key, line_val in lines_dict['lines'].items():
C_bands[passband_key][line_key] = (np.abs(preparation['wave'] - line_val) < passband_val / 2.)
"""
S stands for side
"""
S_bands = {}
for band_key, band_val in lines_dict['continuum'].items():
S_bands[band_key] = (preparation['wave'] >= band_val[0]) & (preparation['wave'] <= band_val[1])
results_list = results_list_default.copy()
for results_selection in results_list_default:
skip_iteration = False
for night in night_dict:
print_warning = True
if skip_iteration: continue
binned_mcmc_night = check_existence_cpickle(
'transmission_binned_mcmc_'+sampler_name+'_results', config_in['output'], night, lines_label)
binned_mcmc_global = check_existence_cpickle(
'transmission_binned_mcmc_'+sampler_name+'_results', config_in['output'], lines_label)
mcmc_night = check_existence_cpickle(
'transmission_mcmc_'+sampler_name+'_results', config_in['output'], night, lines_label)
mcmc_global = check_existence_cpickle(
'transmission_mcmc_'+sampler_name+'_results', config_in['output'], lines_label)
results_list = ['user']
if (mcmc_night or binned_mcmc_night):
results_list.append(['mcmc_night_MED', 'mcmc_night_MAP'])
if (mcmc_global or binned_mcmc_global):
results_list.append(['mcmc_global_MED', 'mcmc_global_MAP'])
if results_selection not in results_list:
print(' {0:s} results not found, skipping iteration'.format(results_selection))
skip_iteration = True
continue
if mcmc_night and results_selection in ['mcmc_night_MED', 'mcmc_night_MAP']:
mcmc_results_night = load_from_cpickle(
'transmission_mcmc_'+sampler_name+'_results', config_in['output'], night, lines_label)
print(' Observational parameters from MCMC fit of unbinned data, individual night')
elif mcmc_global and results_selection in ['mcmc_global_MED', 'mcmc_global_MAP']:
mcmc_results_global = load_from_cpickle(
'transmission_mcmc_'+sampler_name+'_results', config_in['output'], lines=lines_label)
print(' Observational parameters from MCMC fit of unbinned data, global fit')
elif binned_mcmc_night and results_selection in ['mcmc_night_MED', 'mcmc_night_MAP']:
mcmc_results_night = load_from_cpickle(
'transmission_binned_mcmc_'+sampler_name+'_results', config_in['output'], night, lines_label)
print(' Observational parameters from MCMC fit of binned data, individual night')
elif binned_mcmc_global and results_selection in ['mcmc_global_MED', 'mcmc_global_MAP']:
mcmc_results_global = load_from_cpickle(
'transmission_binned_mcmc_'+sampler_name+'_results', config_in['output'], lines=lines_label)
print(' Observational parameters from MCMC fit of binned data, global fit')
else:
print(' Observational parameters from configuration file')
try:
lightcurve = load_from_cpickle(subroutine_name + '_' + results_selection , config_in['output'], night, lines_label)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
preparation = load_from_cpickle(subroutine_name + '_preparation', config_in['output'], night, lines_label)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': 'compute_spectra_lightcurve',
'range': preparation['range'],
'wave': preparation['wave'],
'step': preparation['step'],
'size': preparation['size']
}
lightcurve = {
'subroutine': subroutine_name,
'arrays': {
'observations': {
'obs_name': np.zeros(len(lists['observations']), dtype=str),
'phase': np.zeros(len(lists['observations'])),
},
'transit_in': {},
'transit_full': {},
'transit_out': {},
},
'C_bands': C_bands,
'S_bands': S_bands,
'average': {},
'bins': {
'transit_in_bins': transit_in_bins,
'transit_in_step': transit_in_step,
'transit_full_bins': transit_full_bins,
'transit_full_step': transit_full_step
}
}
""" Adding the C-bands arrays to the dictionary"""
for band_key in C_bands:
for name_append in append_list:
lightcurve['arrays']['observations']['ratio_' + band_key + name_append] = np.zeros([len(lists['observations']), 2])
transit_out_flag = np.zeros(len(lists['observations']), dtype=bool)
transit_in_flag = np.zeros(len(lists['observations']), dtype=bool)
transit_full_flag = np.zeros(len(lists['observations']), dtype=bool)
if clv_rm_correction:
try:
clv_rm_models = load_from_cpickle('clv_rm_models', config_in['output'], night, lines_label)
except (FileNotFoundError, IOError):
clv_rm_models = load_from_cpickle('clv_rm_models', config_in['output'], night)
""" Shift into planetary reference system is the default
choice"""
if results_selection == 'user':
planet_R_factor = observational_pams.get('Rp_factor', 1.00000)
elif results_selection == 'mcmc_night_MED':
planet_R_factor = mcmc_results_night['results']['planet_R']
elif results_selection == 'mcmc_night_MAP':
planet_R_factor = mcmc_results_night['results_MAP']['planet_R']
elif results_selection == 'mcmc_global_MED':
planet_R_factor = mcmc_results_global['results']['planet_R']
elif results_selection == 'mcmc_global_MAP':
planet_R_factor = mcmc_results_global['results_MAP']['planet_R']
for n_obs, obs in enumerate( lists['observations']):
processed[obs] = {}
lightcurve[obs] = {}
processed[obs]['uncorrected'] = preparation[obs]['rebinned']
processed[obs]['uncorrected_err'] = preparation[obs]['rebinned_err']
if clv_rm_correction:
"""" CLV + RM computation in the planetary reference frame """
processed[obs]['clv_model_stellarRF'] = interpolate1d_grid_nocheck(planet_R_factor,
clv_rm_models['common']['radius_grid'],
clv_rm_models[obs]['clv_rm_model_convolved_normalized'])
processed[obs]['clv_model_rebinned'] = \
rebin_1d_to_1d(clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
processed[obs]['clv_model_stellarRF'],
processed['wave'],
processed['step'],
preserve_flux=False)
processed[obs]['rebinned'] = processed[obs]['uncorrected'] / processed[obs]['clv_model_rebinned']
processed[obs]['rebinned_err'] = processed[obs]['uncorrected_err'] / processed[obs]['clv_model_rebinned']
else:
processed[obs]['clv_model_rebinned'] = np.ones(processed['size'])
processed[obs]['rebinned'] = processed[obs]['uncorrected']
processed[obs]['rebinned_err'] = processed[obs]['uncorrected_err']
if print_warning:
print(' *** No CLV correction')
print_warning = False
try:
phase_internal = (observational_pams[obs]['BJD'] - night_dict[night]['time_of_transit'][0])/planet_dict['period'][0]
except:
phase_internal = (observational_pams[obs]['BJD'] - night_dict[night]['time_of_transit'])/planet_dict['period'][0]
processed[obs]['bands'] = {
'phase': phase_internal
}
processed[obs]['bands_uncorrected'] = {
'phase': phase_internal
}
processed[obs]['bands_clv_model'] = {
'phase': phase_internal
}
processed[obs]['s_integrated'] = 0.000
processed[obs]['s_integrated_uncorrected'] = 0.000
processed[obs]['s_integrated_clv_model'] = 0.000
processed[obs]['s_sigmaq_sum'] = 0.000
n_bands = 0.00
for band_key, band_val in S_bands.items():
if do_average_instead_of_sum:
processed[obs]['bands'][band_key] = \
[np.average(processed[obs]['rebinned'][band_val]),
np.sum((processed[obs]['rebinned_err'][band_val])**2)
/len(processed[obs]['rebinned_err'][band_val])**2]
processed[obs]['bands_uncorrected'][band_key] = \
[np.average(processed[obs]['uncorrected'][band_val]),
np.sum((processed[obs]['uncorrected_err'][band_val])**2)
/len(processed[obs]['uncorrected_err'][band_val])**2]
processed[obs]['bands_clv_model'][band_key] = \
[np.average(processed[obs]['clv_model_rebinned'][band_val]),
np.sum((processed[obs]['rebinned_err'][band_val])**2)
/len(processed[obs]['rebinned_err'][band_val])**2]
else:
processed[obs]['bands'][band_key] = \
[np.sum(processed[obs]['rebinned'][band_val]),
np.sum((processed[obs]['rebinned_err'][band_val])**2)]
processed[obs]['bands_uncorrected'][band_key] = \
[np.sum(processed[obs]['uncorrected'][band_val]),
np.sum((processed[obs]['uncorrected_err'][band_val])**2)]
processed[obs]['bands_clv_model'][band_key] = \
[np.sum(processed[obs]['clv_model_rebinned'][band_val]),
np.sum((processed[obs]['rebinned_err'][band_val])**2)]
processed[obs]['s_integrated'] += processed[obs]['bands'][band_key][0]
processed[obs]['s_integrated_uncorrected'] += processed[obs]['bands_uncorrected'][band_key][0]
processed[obs]['s_integrated_clv_model'] += processed[obs]['bands_clv_model'][band_key][0]
processed[obs]['s_sigmaq_sum'] += processed[obs]['bands'][band_key][1]
n_bands += 1.
#todo: why a 2 denominator???
processed[obs]['s_integrated'] /= (n_bands / 2.)
processed[obs]['s_integrated_uncorrected'] /= (n_bands / 2.)
processed[obs]['s_integrated_clv_model'] /= (n_bands / 2.)
processed[obs]['s_sigmaq_sum'] /= (n_bands / 2.)**2
for band_key, band_dict in C_bands.items():
processed[obs]['bands'][band_key] = {}
processed[obs]['bands_uncorrected'][band_key] = {}
processed[obs]['bands_clv_model'][band_key] = {}
processed[obs]['c_integrated'] = 0.000
processed[obs]['c_integrated_uncorrected'] = 0.000
processed[obs]['c_integrated_clv_model'] = 0.000
processed[obs]['c_sigmaq_sum'] = 0.000
n_bands = 0.00
for line_key, line_val in band_dict.items():
if do_average_instead_of_sum:
processed[obs]['bands'][band_key][line_key] = \
[np.average(processed[obs]['rebinned'][line_val]),
np.sum((processed[obs]['rebinned_err'][line_val]) ** 2)
/ len(processed[obs]['rebinned_err'][line_val]) ** 2]
processed[obs]['bands_uncorrected'][band_key][line_key] = \
[np.average(processed[obs]['uncorrected'][line_val]),
np.sum((processed[obs]['uncorrected_err'][line_val]) ** 2)
/ len(processed[obs]['rebinned_err'][line_val]) ** 2]
processed[obs]['bands_clv_model'][band_key][line_key] = \
[np.average(processed[obs]['clv_model_rebinned'][line_val]),
np.sum((processed[obs]['rebinned_err'][line_val]) ** 2)
/ len(processed[obs]['rebinned_err'][line_val]) ** 2]
else:
processed[obs]['bands'][band_key][line_key] = \
[np.sum(processed[obs]['rebinned'][line_val]),
np.sum((processed[obs]['rebinned_err'][line_val]) ** 2)]
processed[obs]['c_integrated'] += processed[obs]['bands'][band_key][line_key][0]
processed[obs]['c_integrated_uncorrected'] += processed[obs]['bands_uncorrected'][band_key][line_key][0]
processed[obs]['c_integrated_clv_model'] += processed[obs]['bands_clv_model'][band_key][line_key][0]
processed[obs]['c_sigmaq_sum'] += processed[obs]['bands'][band_key][line_key][1]
n_bands += 1.
processed[obs]['c_integrated'] /= (n_bands / 2.)
processed[obs]['c_integrated_uncorrected'] /= (n_bands / 2.)
processed[obs]['c_integrated_clv_model'] /= (n_bands / 2.)
processed[obs]['c_sigmaq_sum'] /= (n_bands / 2.) ** 2
for name_append in append_list:
ratio = processed[obs]['c_integrated' + name_append] / processed[obs]['s_integrated' + name_append]
ratio_err = ratio * np.sqrt(
processed[obs]['c_sigmaq_sum'] / processed[obs]['c_integrated' + name_append] ** 2
+ processed[obs]['s_sigmaq_sum'] / processed[obs]['s_integrated' + name_append] ** 2)
lightcurve[obs]['ratio_' + band_key + name_append] = [ratio, ratio_err]
lightcurve['arrays']['observations']['ratio_' + band_key + name_append][n_obs, :] = \
lightcurve[obs]['ratio_' + band_key + name_append][:]
lightcurve[obs]['phase'] = processed[obs]['bands']['phase']
lightcurve['arrays']['observations']['obs_name'][n_obs] = obs
lightcurve['arrays']['observations']['phase'][n_obs] = lightcurve[obs]['phase']
if obs in lists['transit_out']:
transit_out_flag[n_obs] = True
if obs in lists['transit_in']:
transit_in_flag[n_obs] = True
if obs in lists['transit_full']:
transit_full_flag[n_obs] = True
for band_key in C_bands:
for name_append in append_list:
lightcurve['arrays']['rescaling_' + band_key + name_append] = \
np.average(lightcurve['arrays']['observations']['ratio_' + band_key + name_append][transit_out_flag, 0], axis=0)
sorting_index = np.argsort(lightcurve['arrays']['observations']['phase'])
transit_out_flag = transit_out_flag[sorting_index]
transit_in_flag = transit_in_flag[sorting_index]
transit_full_flag = transit_full_flag[sorting_index]
lightcurve['arrays']['observations']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][sorting_index]
lightcurve['arrays']['observations']['phase'] = lightcurve['arrays']['observations']['phase'][sorting_index]
lightcurve['arrays']['transit_in']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][transit_in_flag]
lightcurve['arrays']['transit_in']['phase'] = lightcurve['arrays']['observations']['phase'][transit_in_flag]
lightcurve['arrays']['transit_full']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][transit_full_flag]
lightcurve['arrays']['transit_full']['phase'] = lightcurve['arrays']['observations']['phase'][transit_full_flag]
lightcurve['arrays']['transit_out']['obs_name'] = lightcurve['arrays']['observations']['obs_name'][transit_out_flag]
lightcurve['arrays']['transit_out']['phase'] = lightcurve['arrays']['observations']['phase'][transit_out_flag]
for band_key in C_bands:
for name_append in append_list:
lightcurve['arrays']['observations']['ratio_' + band_key + name_append] = \
lightcurve['arrays']['observations']['ratio_' + band_key + name_append][sorting_index] \
/ lightcurve['arrays']['rescaling_' + band_key + name_append]
lightcurve['arrays']['transit_in']['ratio_' + band_key + name_append] = \
lightcurve['arrays']['observations']['ratio_' + band_key + name_append][transit_in_flag]
lightcurve['arrays']['transit_full']['ratio_' + band_key + name_append] = \
lightcurve['arrays']['observations']['ratio_' + band_key + name_append][transit_full_flag]
lightcurve['arrays']['transit_out']['ratio_' + band_key + name_append] = \
lightcurve['arrays']['observations']['ratio_' + band_key + name_append][transit_out_flag]
avg_out, avg_out_sq = \
np.average(lightcurve['arrays']['transit_out']['ratio_' + band_key + name_append][:, 0],
weights=1./(lightcurve['arrays']['transit_out']['ratio_' + band_key + name_append][:, 1])**2,
returned=True)
avg_in, avg_in_sq = \
np.average(lightcurve['arrays']['transit_in']['ratio_' + band_key + name_append][:, 0],
weights=1. / (lightcurve['arrays']['transit_in']['ratio_' + band_key + name_append][:, 1]) ** 2,
returned=True)
avg_full, avg_full_sq = \
np.average(lightcurve['arrays']['transit_full']['ratio_' + band_key + name_append][:, 0],
weights=1. / (lightcurve['arrays']['transit_full']['ratio_' + band_key + name_append][:, 1]) ** 2,
returned=True)
lightcurve['average'][band_key + name_append] = {
'average_out': np.asarray([avg_out, 1./np.power(avg_out_sq, 0.5)]),
'average_in': np.asarray([avg_in, 1. / np.power(avg_in_sq, 0.5)]),
'average_full': np.asarray([avg_full, 1. / np.power(avg_full_sq, 0.5)]),
}
delta_fac = (lightcurve['average'][band_key + name_append]['average_full'][0]
/ lightcurve['average'][band_key + name_append]['average_out'][0])
delta_err = delta_fac * np.sqrt(
(lightcurve['average'][band_key + name_append]['average_out'][1]
/ lightcurve['average'][band_key + name_append]['average_out'][0]) ** 2
+ (lightcurve['average'][band_key + name_append]['average_full'][1]
/ lightcurve['average'][band_key + name_append]['average_full'][0]) ** 2)
lightcurve['average'][band_key + name_append]['delta'] = np.asarray([(1.-delta_fac)*100., delta_err*100.])
lightcurve['arrays']['observations']['transit_out_flag'] = transit_out_flag
lightcurve['arrays']['observations']['transit_in_flag'] = transit_in_flag
lightcurve['arrays']['observations']['transit_full_flag'] = transit_full_flag
""" Compute the duration of the pre-transit observations, using as scale
the number of bins, with the same size as those used inside the
transit.
The value is given by the difference of the phase of the beginning of the transit minus
the phase of the first observation, keeping in mind that the centre of the transit has phase = 0
An additional bin is added if there are observations left out from the actual number of bins
"""
pre_duration = transit_full_bins[0] - lightcurve['arrays']['transit_out']['phase'][0]
if pre_duration > 0:
nsteps_pre = int(pre_duration/transit_full_step)
if pre_duration % transit_full_step > 0.0:
nsteps_pre += 1
else:
nsteps_pre = 0
""" same as pre-transit, but suing the post-transit instead"""
post_duration = lightcurve['arrays']['transit_out']['phase'][-1] - transit_full_bins[-1]
if post_duration > 0:
nsteps_post = int(post_duration / transit_full_step)
if post_duration % transit_full_step > 0.0:
nsteps_post += 1
else:
nsteps_post = 0
""" THe full array with both in-transit and out-transit phase, built in such a way that the
- the lower boundary of the first in-transit bin corresponds to the beginning of the transit
- the upper boundary of the last in-transit bin corresponds to the end of the transit
"""
transit_bins = np.arange(transit_full_bins[0]-nsteps_pre*transit_full_step,
transit_full_bins[-1] + (nsteps_post+1.1) * transit_full_step,
transit_full_step)
lightcurve['binned'] = {
'observations': {
'phase': np.zeros(len(transit_bins)),
},
'transit_in': {},
'transit_full': {},
'transit_out': {},
}
for band_key in C_bands:
for name_append in append_list:
lightcurve['binned']['observations']['ratio_' + band_key + name_append] = np.zeros([len(transit_bins), 2])
transit_out_flag = np.zeros(len(transit_bins), dtype=bool)
transit_in_flag = np.zeros(len(transit_bins), dtype=bool)
transit_full_flag = np.zeros(len(transit_bins), dtype=bool)
n_a = 0
for nb in range(0, len(transit_bins)-1):
sel = (lightcurve['arrays']['observations']['phase'] >= transit_bins[nb]) \
& (lightcurve['arrays']['observations']['phase'] < transit_bins[nb+1])
if np.sum(sel) <= 0: continue
lightcurve['binned']['observations']['phase'][n_a] = np.average(lightcurve['arrays']['observations']['phase'][sel])
for band_key in C_bands:
for name_append in append_list:
lightcurve['binned']['observations']['ratio_' + band_key + name_append][n_a, 0], sum_weights = np.average(
lightcurve['arrays']['observations']['ratio_' + band_key + name_append][sel, 0],
weights=1. / lightcurve['arrays']['observations']['ratio_' + band_key + name_append][sel, 1]**2,
returned=True)
lightcurve['binned']['observations']['ratio_' + band_key + name_append][n_a, 1] = np.sqrt(1. / sum_weights)
if np.abs(lightcurve['binned']['observations']['phase'][n_a]) >= \
total_transit_duration/2./planet_dict['period'][0]:
transit_out_flag[n_a] = True
elif np.abs(lightcurve['binned']['observations']['phase'][n_a]) >= \
full_transit_duration/2./planet_dict['period'][0]:
transit_in_flag[n_a] = True
else:
transit_full_flag[n_a] = True
n_a += 1 # bins actually computed
lightcurve['binned']['transit_in']['phase'] = lightcurve['binned']['observations']['phase'][transit_in_flag]
lightcurve['binned']['transit_full']['phase'] = lightcurve['binned']['observations']['phase'][transit_full_flag]
lightcurve['binned']['transit_out']['phase'] = lightcurve['binned']['observations']['phase'][transit_out_flag]
lightcurve['binned']['observations']['phase'] = lightcurve['binned']['observations']['phase'][:n_a]
for band_key in C_bands:
for name_append in append_list:
lightcurve['binned']['transit_in']['ratio_' + band_key + name_append] = \
lightcurve['binned']['observations']['ratio_' + band_key + name_append][transit_in_flag, :]
lightcurve['binned']['transit_full']['ratio_' + band_key + name_append] = \
lightcurve['binned']['observations']['ratio_' + band_key + name_append][transit_full_flag, :]
lightcurve['binned']['transit_out']['ratio_' + band_key + name_append] = \
lightcurve['binned']['observations']['ratio_' + band_key + name_append][transit_out_flag, :]
lightcurve['binned']['observations']['ratio_' + band_key + name_append] = \
lightcurve['binned']['observations']['ratio_' + band_key + name_append][:n_a, :]
save_to_cpickle(subroutine_name+ '_' + results_selection + '_processed', processed, config_in['output'], night, lines_label)
save_to_cpickle(subroutine_name+ '_' + results_selection, lightcurve, config_in['output'], night, lines_label)
# Forcing memory deallocation
lightcurve = None
processed = None
preparation = None
clv_rm_models = None
def plot_spectra_lightcurve(config_in, night_input='', clv_rm_correction=False):
import matplotlib.pyplot as plt
if clv_rm_correction:
subroutine_name = 'spectra_lightcurve_clv_rm_correction'
else:
subroutine_name = 'spectra_lightcurve'
night_dict = from_config_get_nights(config_in)
if night_input=='':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
""" Retrieving the analysis"""
try:
lightcurve = load_from_cpickle(subroutine_name, config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Plotting'))
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Skipped'))
continue
#observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
C_bands = lightcurve['C_bands']
print()
for band_key in C_bands:
print("Night: {0:s} Band: {1:s} Delta:{2:8.4f} +- {3:8.4f} [%]".format(night, band_key,
lightcurve['average'][band_key]['delta'][0], lightcurve['average'][band_key]['delta'][1]))
for band_key in C_bands:
plt.figure(figsize=(12, 6))
plt.title('Spectra lightcurve - night {0:s} \n {1:s}'.format(night, band_key))
plt.errorbar(lightcurve['arrays']['observations']['phase'],
lightcurve['arrays']['observations']['ratio_' + band_key][:,0]*100 -100.,
yerr= lightcurve['arrays']['observations']['ratio_' + band_key][:,1]*100 ,
fmt='.', c='k', alpha=0.25, label='observations')
plt.errorbar(lightcurve['binned']['observations']['phase'],
lightcurve['binned']['observations']['ratio_' + band_key][:, 0]*100 -100.,
yerr= lightcurve['binned']['observations']['ratio_' + band_key][:,1]*100 ,
fmt='.', c='k', alpha=1.0, label='observations')
plt.axvspan(-1, lightcurve['bins']['transit_in_bins'][0], alpha=0.25, color='green')
plt.axvspan(lightcurve['bins']['transit_in_bins'][-1], 1., alpha=0.25, color='green')
plt.axhline(0, c='C1')
plt.xlim(lightcurve['arrays']['observations']['phase'][0]-0.01,
lightcurve['arrays']['observations']['phase'][-1]+0.01)
plt.xlabel('orbital phase')
plt.ylabel('$\mathcal{R}$ - 1. [%]')
plt.legend()
plt.show()
print()
| 36,599 | 50.260504 | 144 | py |
SLOPpy | SLOPpy-main/SLOPpy/differential_refraction.bkp.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_differential_refraction", "plot_differential_refraction"]
subroutine_name = 'differential_refraction'
def compute_differential_refraction(config_in):
night_dict = from_config_get_nights(config_in)
print()
for night in night_dict:
try:
refraction = load_from_cpickle('refraction', config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'],
use_refraction=False, use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
try:
processed = load_from_cpickle('refraction_processed_halfway', config_in['output'], night)
refraction = load_from_cpickle('refraction_halfway', config_in['output'], night)
print(" Starting from intermediate step ")
except:
processed = {
'subroutine': subroutine_name,
'coadd': {
'wave': input_data['coadd']['wave'],
'size': input_data['coadd']['size'],
'step': input_data['coadd']['step'],
#'flux': np.zeros(input_data['coadd']['size'], dtype=np.double),
#'flux_err': np.zeros(input_data['coadd']['size'], dtype=np.double)
}
}
refraction = {
'subroutine': 'differential_refraction',
'wave': processed['coadd']['wave']
}
total_flux = np.empty([len(lists['observations']), input_data['coadd']['size']], dtype=np.double)
total_wght = np.zeros([len(lists['observations']), input_data['coadd']['size']], dtype=np.double)
total_mask = np.ones([len(lists['observations']), input_data['coadd']['size']], dtype=bool)
print(" Chebyshev polynomial order for differential refraction fit: ",
observational_pams['refraction_poly_order'])
print(" Number of iterations: ",
observational_pams['refraction_poly_iters'])
print()
""" Rebinning of all the spectra """
for n_obs, obs in enumerate(lists['observations']):
print(" Spectral rebinning - Processing: ", obs)
processed[obs] = {}
""" Rebinning of the spectra in the SRF, except for a fixed constant in order to minimize
the difference between """
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['flux_rebinned_stellarRF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds'],
calib_data['blaze'],
processed['coadd']['wave'],
processed['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF_mod'])
processed[obs]['err_flux_rebinned_SRF'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds_err'],
calib_data['blaze'],
processed['coadd']['wave'],
processed['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF_mod'],
is_error=True)
""" Zero or negative values are identified, flagged and substituted with another value """
processed[obs]['flux_rebinned_stellarRF'], \
processed[obs]['err_flux_rebinned_SRF'], \
processed[obs]['flux_rebinned_SRF_null'] = \
replace_values_errors_with_interpolation_1d(processed[obs]['flux_rebinned_stellarRF'],
processed[obs]['err_flux_rebinned_SRF'],
force_positive=True)
processed[obs]['rescaling'], processed[obs]['rescaled'], processed[obs]['rescaled_err'] = \
perform_rescaling(processed['coadd']['wave'],
processed[obs]['flux_rebinned_stellarRF'],
processed[obs]['err_flux_rebinned_SRF'],
observational_pams['wavelength_rescaling'])
processed[obs]['rescaled_blazed'] = input_data[obs]['e2ds'] \
/ processed[obs]['rescaling'] \
/ calib_data['blaze']
if obs in lists['telluric']:
total_flux[n_obs, :] = processed[obs]['rescaled']
total_mask[n_obs, :] = processed[obs]['flux_rebinned_SRF_null']
total_wght[n_obs, :] = 1. / (processed[obs]['rescaled_err'] ** 2)
# processed['coadd']['flux'] += processed[obs]['flux_rebinned_stellarRF']
# """ SNR (assumed to be the square root of the flux) is added in quadrature """
# processed['coadd']['flux_err'] += processed[obs]['err_flux_rebinned_SRF'] ** 2
print(" Observation added to reference spectrum")
#masked_array = np.ma.array(total_flux, mask=total_mask)
#processed['coadd']['rescaled'], sum_weights = np.ma.average(masked_array,
# weights=total_wght,
# axis=0,
# returned=True)
# processed['coadd']['rescaled'][sum_weights <= 0.0001] = 1.000
# sum_weights[sum_weights <= 0.0001] = 0.0001
# processed['coadd']['rescaled_err'] = 1. / np.sqrt(sum_weights)
masked_array = np.ma.array(total_flux, mask=total_mask)
rescaled_mask, sum_weights = np.ma.average(masked_array,
weights=total_wght,
axis=0,
returned=True)
processed['coadd']['rescaled'] = rescaled_mask.filled(0.00)
sum_weights[sum_weights <= 0.0] = 1.0
processed['coadd']['rescaled_err'] = 1. / np.sqrt(sum_weights)
processed['coadd']['rescaled'], processed['coadd']['rescaled_err'], processed['coadd']['null'] = \
replace_values_errors_with_interpolation_1d(processed['coadd']['rescaled'],
processed['coadd']['rescaled_err'],
force_positive=True)
save_to_cpickle('refraction_processed_halfway', processed, config_in['output'], night)
save_to_cpickle('refraction_halfway', refraction, config_in['output'], night)
""" Now each observation is divided by the reference spectrum, after being redshifted in the observer RF
The result is then used to model the flux variation
"""
for obs in lists['observations']:
print(" Division by reference spectrum and fit of the flux variation: ", obs)
preserve_flux = input_data[obs].get('absolute_flux', True)
""" Going back to the observer RF and rebinning the spectrum into the observed orders """
processed[obs]['master_flux'] = \
rebin_1d_to_2d(processed['coadd']['wave'],
processed['coadd']['step'],
processed['coadd']['rescaled'],
input_data[obs]['wave'],
input_data[obs]['step'],
preserve_flux=preserve_flux,
rv_shift=-observational_pams[obs]['rv_shift_ORF2SRF_mod'])
processed[obs]['master_ferr'] = \
rebin_1d_to_2d(processed['coadd']['wave'],
processed['coadd']['step'],
processed['coadd']['rescaled_err'],
input_data[obs]['wave'],
input_data[obs]['step'],
preserve_flux=preserve_flux,
rv_shift=-observational_pams[obs]['rv_shift_ORF2SRF_mod'],
is_error=True)
""" Zero or negative values are identified, flagged and substituted with another value """
processed[obs]['master_flux'], processed[obs]['master_ferr'], processed[obs]['master_null'] = \
replace_values_errors_with_interpolation_2d(processed[obs]['master_flux'],
processed[obs]['master_ferr'],
less_than=0.001)
processed[obs]['ratio'] = processed[obs]['rescaled_blazed'] / processed[obs]['master_flux']
"""
processed[obs]['ratio'] = input_data[obs]['e2ds']\
/processed[obs]['rescaling']\
/ (processed[obs]['master_flux'] * calib_data['blaze'])
"""
refraction[obs] = {}
refraction[obs]['polyfit_e2ds'] = np.zeros([input_data[obs]['n_orders'], input_data[obs]['n_pixels']])
processed[obs]['residuals'] = np.zeros([input_data[obs]['n_orders'], input_data[obs]['n_pixels']])
refraction[obs]['poly_flag'] = np.zeros([input_data[obs]['n_orders'], input_data[obs]['n_pixels']],
dtype=bool)
for order in range(0, input_data[obs]['n_orders']):
order_coeff_name = 'order_' + repr(order)
refraction[obs]['poly_flag'][order, :] = (processed[obs]['ratio'][order, :] > 0.1)
refraction[obs]['poly_flag'][order, :50] = False
refraction[obs]['poly_flag'][order, -50:] = False
for n_iter in range(0, observational_pams['refraction_poly_iters']):
refraction[obs][order_coeff_name] = np.polynomial.chebyshev.chebfit(
input_data[obs]['wave'][order, refraction[obs]['poly_flag'][order, :]],
processed[obs]['ratio'][order, refraction[obs]['poly_flag'][order, :]],
observational_pams['refraction_poly_order'])
refraction[obs]['polyfit_e2ds'][order, :] = \
np.polynomial.chebyshev.chebval(input_data[obs]['wave'][order, :],
refraction[obs][order_coeff_name])
processed[obs]['residuals'][order, :] = refraction[obs]['polyfit_e2ds'][order, :]\
- processed[obs]['ratio'][order, :]
if n_iter < observational_pams['refraction_poly_iters'] - 1:
std = np.std(processed[obs]['residuals'][order, :])
refraction[obs]['poly_flag'][order, :] = (refraction[obs]['poly_flag'][order, :]) \
& (np.abs(processed[obs]['residuals'][order, :]) <
observational_pams['refraction_poly_sigma'] * std)
processed[obs]['e2ds_corrected'] = input_data[obs]['e2ds'] / refraction[obs]['polyfit_e2ds']
processed[obs]['e2ds_corrected_err'] = input_data[obs]['e2ds_err'] / refraction[obs]['polyfit_e2ds']
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['flux_rebinned_stellarRF_corrected'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
processed[obs]['e2ds_corrected'],
calib_data['blaze'],
processed['coadd']['wave'],
processed['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF_mod'])
processed[obs]['err_flux_rebinned_SRF_corrected'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
processed[obs]['e2ds_corrected_err'],
calib_data['blaze'],
processed['coadd']['wave'],
processed['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF_mod'],
is_error=True)
processed[obs]['flux_rebinned_stellarRF_corrected'], \
processed[obs]['err_flux_rebinned_SRF_corrected'], _ = \
replace_values_errors_with_interpolation_1d(processed[obs]['flux_rebinned_stellarRF_corrected'],
processed[obs]['err_flux_rebinned_SRF_corrected'],
less_than=0.001)
save_to_cpickle('refraction_processed', processed, config_in['output'], night)
save_to_cpickle('refraction', refraction, config_in['output'], night)
def plot_differential_refraction(config_in, night_input=''):
night_dict = from_config_get_nights(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
input_data = retrieve_observations(config_in['output'], night, lists['observations'],
use_refraction=False, use_telluric=False)
#observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
try:
""" Retrieving the analysis"""
processed = load_from_cpickle('refraction_processed', config_in['output'], night)
refraction = load_from_cpickle('refraction', config_in['output'], night)
except:
print(" Failed in retrieving processed data")
return
""" Creation of the color array, based on the BJD of the observations
"""
bjd = []
am = []
for obs in lists['observations']:
bjd.append(input_data[obs]['BJD'] - 2450000.0)
am.append(input_data[obs]['AIRMASS'])
color_cmap = plt.cm.viridis
color_norm = plt.Normalize(vmin=bjd[0], vmax=bjd[-1])
colors = color_cmap(color_norm(np.asarray(bjd)))
offset = 0.10
y_limits = [0.8, 1.2]
"""
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
shift = i/10.0
for order in range(0, input_data[obs]['n_orders']):
ax1.scatter(input_data[obs]['wave'][order, :],
processed[obs]['rescaled_blazed'][order, :] - shift,
c=line_colors[i], s=1, alpha=0.5)
ax1.plot(input_data[obs]['wave'][order, :],
processed[obs]['master_flux'][order, :], - shift,
c='k', lw=1)
ax2.scatter(input_data[obs]['wave'][order, :],
processed[obs]['rescaled_blazed'][order, :]/refraction[obs]['polyfit_e2ds'][order, :] - shift,
c=line_colors[i], s=1, alpha=0.5)
ax2.plot(input_data[obs]['wave'][order, :],
processed[obs]['master_flux'][order, :], - shift,
c='k', lw=1)
ax1.set_xlim(processed['coadd']['wave'][0], processed['coadd']['wave'][-1])
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
"""
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
color = [colors[i][:-1]]
if i==0:
ax1.scatter(processed['coadd']['wave'],
processed[obs]['flux_rebinned_stellarRF'] / processed[obs]['rescaling'],
c=color, s=2, alpha=0.2, label='observation')
else:
ax1.scatter(processed['coadd']['wave'],
processed[obs]['flux_rebinned_stellarRF'] / processed[obs]['rescaling'],
c=color, s=2, alpha=0.2)
ax2.scatter(processed['coadd']['wave'],
processed[obs]['flux_rebinned_stellarRF_corrected'] / processed[obs]['rescaling'],
c=color, s=3, alpha=0.2)
ax1.plot(processed['coadd']['wave'], processed['coadd']['rescaled'], c='k', lw=1, label='reference spectrum')
ax2.plot(processed['coadd']['wave'], processed['coadd']['rescaled'], c='k', lw=1)
ax1.set_xlim(processed['coadd']['wave'][0], processed['coadd']['wave'][-1])
ax1.set_ylim(y_limits)
ax2.set_ylim(y_limits)
ax1.legend(loc=1)
ax1.set_title('Night: {0:s} \n Input spectra'.format(night))
ax2.set_title('Corrected spectra')
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=color_cmap, norm=color_norm)
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
"""
PLOT
"""
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(1, 2, width_ratios=[50, 1])
ax = plt.subplot(gs[0, 0])
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
if i == 0:
offset = np.std(processed[obs]['ratio'][refraction[obs]['poly_flag']].flatten()) * 6
average = np.average(processed[obs]['ratio'][refraction[obs]['poly_flag']].flatten())
y_limits = [average-offset, average+offset]
color = [colors[i][:-1]]
for order in range(0, input_data[obs]['n_orders']):
ax.scatter(input_data[obs]['wave'][refraction[obs]['poly_flag']],
processed[obs]['ratio'][refraction[obs]['poly_flag']] + offset*i,
s=1, c=color, alpha=0.50, zorder=2)
ax.scatter(input_data[obs]['wave'][~refraction[obs]['poly_flag']],
processed[obs]['ratio'][~refraction[obs]['poly_flag']] + offset*i,
s=2, c='k', alpha=0.05, zorder=1)
ax.plot(input_data[obs]['wave'][order, :],
refraction[obs]['polyfit_e2ds'][order, :] + offset*i,
c='k', lw=1, zorder=5)
y_limits_offset = [min(y_limits[0] + offset * i, y_limits[0]),
max(y_limits[1] + offset * i, y_limits[1])]
ax.set_ylim(y_limits_offset)
ax.set_xlabel('$\lambda$ [$\AA$]')
ax.legend(loc=3)
ax.set_title('Night: {0:s} \n Fit of the ratio obs/master'.format(night))
sm = plt.cm.ScalarMappable(cmap=color_cmap, norm=color_norm)
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
"""
PLOT: residuals of the fit
"""
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(1, 2, width_ratios=[50, 1])
ax = plt.subplot(gs[0, 0])
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
if i == 0:
median = np.median(processed[obs]['residuals'][refraction[obs]['poly_flag']].flatten())
offset = np.std(processed[obs]['residuals'][refraction[obs]['poly_flag']].flatten()) * 6
y_limits = [median-offset, median+offset]
color = colors[i][:-1]
for order in range(0, input_data[obs]['n_orders']):
# Workaround to damn stupid matplotlib error I didn't manage to solve
ax.scatter(input_data[obs]['wave'][refraction[obs]['poly_flag']],
processed[obs]['residuals'][refraction[obs]['poly_flag']] + offset*i,
s=1, c=[color], alpha=0.50, zorder=2)
ax.scatter(input_data[obs]['wave'][~refraction[obs]['poly_flag']],
processed[obs]['residuals'][~refraction[obs]['poly_flag']] + offset*i,
s=2, c='k', alpha=0.05, zorder=1)
ax.axhline(offset*i, c='k', zorder=3)
y_limits_offset = [min(y_limits[0] + offset * i, y_limits[0]),
max(y_limits[1] + offset * i, y_limits[1])]
ax.set_ylim(y_limits_offset)
ax.set_xlabel('$\lambda$ [$\AA$]')
ax.legend(loc=3)
ax.set_title('Night: {0:s} \n Residuals of the fit on ratio obs/master'.format(night))
sm = plt.cm.ScalarMappable(cmap=color_cmap, norm=color_norm)
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
"""
PLOT: corrected e2ds
"""
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(1, 2, width_ratios=[50, 1])
ax = plt.subplot(gs[0, 0])
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
color = [colors[i][:-1]]
ax.scatter(input_data[obs]['wave'][refraction[obs]['poly_flag']],
processed[obs]['e2ds_corrected'][refraction[obs]['poly_flag']]/processed[obs]['rescaling'],
s=2, c=color, alpha=0.10)
ax.scatter(input_data[obs]['wave'][~refraction[obs]['poly_flag']],
processed[obs]['e2ds_corrected'][~refraction[obs]['poly_flag']]/processed[obs]['rescaling'],
s=2, c='k', alpha=0.05)
#for order in range(0, np.size(input_data[obs]['wave'][:, 0])):
#
# ax.plot(input_data[obs]['wave'][order, :],
# refraction[obs]['polyfit_e2ds'][order, :],
# c=color_array, lw=1)
ax.set_xlabel('$\lambda$ [$\AA$]')
ax.set_title('Night: {0:s} \n Corrected and rescaled e2ds spectra'.format(night))
sm = plt.cm.ScalarMappable(cmap=color_cmap, norm=color_norm)
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show() | 25,301 | 47.378585 | 119 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_molecfit_preparation.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_telluric_molecfit_preparation",
"plot_telluric_molecfit_preparation"]
def compute_telluric_molecfit_preparation(config_in):
"""
Lazy workaround
:param config_in:
:param kwargs:
:return:
"""
night_dict = from_config_get_nights(config_in)
molecfit_dict = from_config_get_molecfit(config_in)
for night in night_dict:
try:
tellprep = load_from_cpickle('telluric_molecfit_preparation', config_in['output'], night)
continue
except:
print()
print("compute_telluric_molecfit_preparation Night: ", night)
print()
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
tellprep = {
'work_dir': config_in['output'] + '_molecfit_' + night,
'include': {}
}
"""
We store all the molecfit files in a subdirectory
We save the path of the main directory to a temporary file
"""
os.system('mkdir -p ' + tellprep['work_dir'])
os.system('mkdir -p ' + tellprep['work_dir'] + '/output/')
"""
Creation of the include files
"""
"""
includes_spans_ORF: wavelength ranges _with_ telluric lines, in the ORF
includes_spans_SRF: wavelength ranges _without_ stellar lines and broadly
overlapping with telluric ranges, in the SRF
the two lists must have the same number of columns, with precise correspondence
"""
tellprep['include']['spans_telluric'] = np.genfromtxt(molecfit_dict['include_telluric'])
tellprep['include']['spans_stellar_SRF'] = np.genfromtxt(molecfit_dict['include_stellar'])
#print()
#print(tellprep['include']['spans_telluric'])
#print()
#print(tellprep['include']['spans_stellar_SRF'])
""" shift the stellar wavelength ranges into ORF """
tellprep['include']['rv_shift_SRF2ORF'] = -observational_pams['BERV_avg'] + observational_pams['RV_star'][
'RV_systemic']
#print()
#print(tellprep['include']['rv_shift_SRF2ORF'])
#print(observational_pams['BERV_avg'])
#print(observational_pams['RV_star']['RV_systemic'])
#print()
#print()
tellprep['include']['spans_stellar'] = tellprep['include']['spans_stellar_SRF']\
* (tellprep['include']['rv_shift_SRF2ORF']
/ (299792458. / 1000.000) + 1.00000)
#print()
#print(tellprep['include']['spans_stellar'])
""" Selecting the overlapping regions between the two lists: we want telluric regions that are not contaminated
by stellar lines,
"""
sel_lower = (tellprep['include']['spans_stellar'][:, 0] > tellprep['include']['spans_telluric'][:, 0])
sel_upper = (tellprep['include']['spans_stellar'][:, 1] < tellprep['include']['spans_telluric'][:, 1])
""" Final list in the ORF is built"""
tellprep['include']['selected'] = tellprep['include']['spans_telluric'].copy()
tellprep['include']['selected'][sel_lower, 0] = tellprep['include']['spans_stellar'][sel_lower, 0]
tellprep['include']['selected'][sel_upper, 1] = tellprep['include']['spans_stellar'][sel_upper, 1]
#print()
#print(tellprep['include']['selected'])
""" Molecfit line list must be given in vacuum wavelength, even if the stellar spectra is in air wavelength
conversion from air to vacuum for include file preparation
where s = 10000 / lambda air and the conversion is: lambda_vac = lambda_air * n.
http://www.astro.uu.se/valdwiki/Air-to-vacuum%20conversion
"""
s2 = (10000. / tellprep['include']['selected']) ** 2
n = 1 + 0.00008336624212083 + 0.02408926869968 / (130.1065924522 - s2) + 0.0001599740894897 / (
38.92568793293 - s2)
tellprep['include']['vacuum'] = tellprep['include']['selected'] * n / 10000.
#fileout = open('./' + tellprep['work_dir'] + '/include_' + night + '.dat', 'w')
#for i_s, i_e in zip(tellprep['include']['vacuum'][:, 0], tellprep['include']['vacuum'][:, 1]):
# fileout.write('{0:12.8f} {1:12.8f}\n'.format(i_s, i_e))
#fileout.close()
#quit()
save_to_cpickle('telluric_molecfit_preparation', tellprep, config_in['output'], night)
def plot_telluric_molecfit_preparation(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
print("plot_telluric_template Night: ", night)
| 5,335 | 37.948905 | 119 | py |
SLOPpy | SLOPpy-main/SLOPpy/telluric_airmass_observerRF.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["compute_telluric_airmass_berv_observerRF",
"plot_telluric_airmass_berv_observerRF",
"compute_telluric_airmass_observerRF",
"plot_telluric_airmass_observerRF",
"compute_telluric_airmass_reference_observerRF",
"plot_telluric_airmass_reference_observerRF",
"compute_telluric_airmass_berv_reference_observerRF",
"plot_telluric_airmass_berv_reference_observerRF"
]
def compute_telluric_airmass_berv_observerRF(config_in):
compute_telluric_observerRF(config_in,
n_iterations=1,
use_berv=True,
use_reference_airmass=False,
subroutine_name='telluric_airmass_berv_observerRF')
def compute_telluric_airmass_observerRF(config_in):
compute_telluric_observerRF(config_in,
n_iterations=1,
use_berv=False,
use_reference_airmass=False,
subroutine_name='telluric_airmass_observerRF')
def compute_telluric_airmass_reference_observerRF(config_in):
compute_telluric_observerRF(config_in,
n_iterations=1,
use_berv=False,
use_reference_airmass=True,
subroutine_name='telluric_airmass_reference_observerRF')
def compute_telluric_airmass_berv_reference_observerRF(config_in):
compute_telluric_observerRF(config_in,
n_iterations=1,
use_berv=True,
use_reference_airmass=True,
subroutine_name='telluric_airmass_berv_reference_observerRF')
def plot_telluric_airmass_berv_observerRF(config_in, night_input):
""" Alias to simplify the configuration file"""
plot_telluric_airmass_observerRF(config_in, night_input)
def plot_telluric_airmass_reference_observerRF(config_in, night_input):
""" Alias to simplify the configuration file"""
plot_telluric_airmass_observerRF(config_in, night_input)
def plot_telluric_airmass_berv_reference_observerRF(config_in, night_input):
""" Alias to simplify the configuration file"""
plot_telluric_airmass_observerRF(config_in, night_input)
def compute_telluric_observerRF(config_in, **kwargs):
night_dict = from_config_get_nights(config_in)
for night in night_dict:
print()
print("compute_telluric_airmass_observerRF Night: ", night)
try:
telluric = load_from_cpickle('telluric', config_in['output'], night)
continue
except:
print("No telluric correction file found, computing now ")
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations"""
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'], use_telluric=False)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
processed = {
'subroutine': kwargs['subroutine_name'],
'n_orders': 0,
'n_pixels': 0
}
telluric = {
'subroutine': kwargs['subroutine_name'],
'reference_frame': 'observer'
}
# There must be a more elegant way to do this, but I'm, not aware of it
for obs in lists['observations']:
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
""" for plotting purpose only"""
processed[obs]['wave'] = input_data[obs]['wave']
processed[obs]['e2ds'] = input_data[obs]['e2ds']
processed[obs]['e2ds_err'] = input_data[obs]['e2ds_err']
processed[obs]['e2ds_rescaling'], processed[obs]['e2ds_rescaled'], processed[obs]['e2ds_rescaled_err'] = \
perform_rescaling(input_data[obs]['wave'],
input_data[obs]['e2ds'],
input_data[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
if processed['n_orders'] == 0:
processed['n_orders'] = input_data[obs]['orders']
processed['n_pixels'] = input_data[obs]['wave_size']
""" Reference airmass for iterative correction of airmass"""
if kwargs['use_reference_airmass']:
airmass_temp = np.zeros(lists['n_transit_in'])
for n_obs, obs in enumerate(lists['transit_in']):
# This is to ensure that airmass, berv and rvc are associated to the correct spectra
airmass_temp[n_obs] = input_data[obs]['AIRMASS']
processed['airmass_ref'] = np.average(airmass_temp)
else:
processed['airmass_ref'] = 0.000
for obs in lists['observations']:
processed[obs]['e2ds_precorrected'] = processed[obs]['e2ds_rescaled'][:]
processed[obs]['e2ds_precorrected_err'] = input_data[obs]['e2ds_err'] / processed[obs]['e2ds_rescaling']
for niter in range(0, kwargs['n_iterations']):
if kwargs['n_iterations'] > 1:
print("NITER: ", niter)
for obs in lists['telluric']:
processed[obs]['logI'] = np.log(processed[obs]['e2ds_precorrected'])
processed[obs]['logI_err'] = processed[obs]['e2ds_precorrected_err']/processed[obs]['e2ds_precorrected']
processed['telluric'] = {}
abs_slope = np.ones([processed['n_orders'], processed['n_pixels']], dtype=np.double)
line_shift = np.ones([processed['n_orders'], processed['n_pixels']], dtype=np.double)
zero_point = np.ones([processed['n_orders'], processed['n_pixels']], dtype=np.double)
pearson_r = np.zeros([processed['n_orders'], processed['n_pixels']], dtype=np.double)
pearson_p = np.zeros([processed['n_orders'], processed['n_pixels']], dtype=np.double)
airmass = np.zeros(lists['n_tellurics'], dtype=np.double)
berv = np.zeros(lists['n_tellurics'], dtype=np.double)
rvc = np.zeros(lists['n_tellurics'], dtype=np.double)
for n_obs, obs in enumerate(lists['telluric']):
# This is to ensure that airmass, berv and rvc are associated to the correct spectra
processed['telluric'][obs] = {'n_obs': n_obs}
airmass[n_obs] = input_data[obs]['AIRMASS']
berv[n_obs] = input_data[obs]['BERV']
rvc[n_obs] = input_data[obs]['RVC']
for order in range(0, processed['n_orders']):
logi_array = np.empty([lists['n_tellurics'], processed['n_pixels']], dtype=np.double)
sigi_array = np.empty([lists['n_tellurics'], processed['n_pixels']], dtype=np.double)
for obs in lists['telluric']:
n_obs = processed['telluric'][obs]['n_obs']
logi_array[n_obs, :] = processed[obs]['logI'][order, :]
sigi_array[n_obs, :] = processed[obs]['logI_err'][order, :]
""" The user has the option to select between different approaches to
extract the telluric absorption spectrum
To-Do: move this section to a subroutine for cythonization"""
if kwargs['use_berv']:
if observational_pams['linear_fit_method'] == 'linear_curve_fit':
abs_slope[order, :], line_shift[order, :], zero_point[order, :] = \
berv_linear_curve_fit_modified(airmass, berv, logi_array, sigi_array, processed['n_pixels'])
else:
abs_slope[order, :], line_shift[order, :], zero_point[order, :] = \
berv_linear_lstsq(airmass, berv, logi_array)
else:
if observational_pams['linear_fit_method'] == 'linear_curve_fit':
abs_slope[order, :], zero_point[order, :] = \
airmass_linear_curve_fit(airmass, logi_array, sigi_array, processed['n_pixels'])
#abs_slope[order, :], zero_point[order, :] = \
# airmass_linear_curve_fit_ransac(airmass, logi_array, sigi_array, processed['n_pixels'])
#obs_ref = lists['observations'][0]
#plt.plot(processed[obs_ref]['wave'][order,:], processed[obs_ref]['e2ds_rescaled'][order,:])
#for iii in range(0, processed['n_pixels']):
# if iii < 3700 or iii > 3720: continue
# plt.axvline(processed[obs_ref]['wave'][order,iii])
#plt.show()
#
#ik=0
#air_arr = np.arange(1.2, 2.5, 0.1)
#for iii in range(0, processed['n_pixels']):
#
# if iii < 3700 or iii > 3720: continue
# plt.errorbar(airmass, logi_array[:, iii]+ik, yerr=sigi_array[:, iii], fmt='o')
# print(np.exp(abs_slope[order, iii]))
# plt.plot(air_arr, air_arr*abs_slope[order, iii] + zero_point[order, iii]+ik)
# ik -= 0.20
#plt.show()
else:
abs_slope[order, :], zero_point[order, :] = \
airmass_linear_lstsq(airmass, logi_array)
plt.show()
""" Saving the outcome to dictionary """
processed['telluric']['order_'+repr(order)] = {'logi_array': logi_array, 'sigi_array': sigi_array}
if kwargs.get('use_template', False):
telluric_template_data = np.genfromtxt(night_dict[night]['telluric_template'])
spectrum_noairmass = np.exp(abs_slope)
obs_reference = lists['observations'][0]
telluric['template'] = {
'input':{
'wave': telluric_template_data[:, 0],
'flux': telluric_template_data[:, 1],
'ferr': telluric_template_data[:, 2],
'step': telluric_template_data[:, 3]
},
'rebinned':{
'wave': input_data[obs_reference]['wave'],
'step': input_data[obs_reference]['step']
}
}
telluric['template']['rebinned']['flux'] = \
rebin_1d_to_2d(telluric['template']['input']['wave'],
telluric['template']['input']['step'],
telluric['template']['input']['flux'],
telluric['template']['rebinned']['wave'],
telluric['template']['rebinned']['step'],
preserve_flux=False)
telluric['template']['rebinned']['ferr'] = \
rebin_1d_to_2d(telluric['template']['input']['wave'],
telluric['template']['input']['step'],
telluric['template']['input']['ferr'],
telluric['template']['rebinned']['wave'],
telluric['template']['rebinned']['step'],
preserve_flux=False,
is_error=True)
plt.plot(telluric['template']['input']['wave'], telluric['template']['input']['flux'], zorder=1, c='C0')
plt.scatter(telluric['template']['rebinned']['wave'], telluric['template']['rebinned']['flux'],zorder=2, s=2)
plt.scatter(telluric['template']['rebinned']['wave'],spectrum_noairmass, alpha=0.5, s=1, zorder=3)
factor_list = []
slope_list = []
for order in range(0, processed['n_orders']):
fit_selection = (telluric['template']['rebinned']['flux'][order, :] < 1.0)
# Check if there are telluric lines in this wavelength range
if np.sum(fit_selection) > 30:
#telluric_factor, telluric_slope, success_flag = find_telluric_rescaling_factor(
# spectrum_noairmass[order, :],
# telluric['template']['rebinned']['flux'][order, :]
#)
telluric_factor, telluric_slope, success_flag = find_telluric_rescaling_factor_2steps(
spectrum_noairmass[order, :],
telluric['template']['rebinned']['flux'][order, :]
)
if success_flag:
factor_list.extend([telluric_factor])
slope_list.extend([telluric_slope])
if len(factor_list)>1:
telluric_slope = np.median(slope_list)
telluric_factor = np.median(factor_list)
elif len(factor_list) == 1:
telluric_slope = slope_list[0]
telluric_factor = factor_list[0]
else:
telluric_slope = 0.00
telluric_factor = 0.00
print(' telluric factor: {0:7f} (correction slope: {1:7f}'.format(telluric_factor,telluric_slope))
print()
#print(telluric_factor, success_flag)
#quit()
processed['telluric']['spectrum_noairmass'] = \
(telluric['template']['rebinned']['flux']-1.)*telluric_factor + 1.0
plt.plot(telluric['template']['input']['wave'],
(telluric['template']['input']['flux']-1.)*telluric_factor + 1.0, zorder=1, c='C1')
plt.show()
else:
processed['telluric']['spectrum_noairmass'] = np.exp(abs_slope)
for obs in lists['observations']:
""" Correction of telluric lines for the average airmass value, following Wyttenbach et al. 2015 """
processed[obs]['e2ds_corrected'] = processed[obs]['e2ds_precorrected'] / \
np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] -
processed['airmass_ref'])
processed[obs]['e2ds_corrected_err'] = processed[obs]['e2ds_precorrected_err'] / \
np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] -
processed['airmass_ref'])
for obs in lists['observations']:
# Correction of telluric lines
telluric[obs] = {}
telluric[obs]['spectrum_noairmass'] = processed['telluric']['spectrum_noairmass']
telluric[obs]['airmass'] = input_data[obs]['AIRMASS']
telluric[obs]['airmass_ref'] = processed['airmass_ref']
telluric[obs]['null'] = telluric[obs]['spectrum_noairmass'] < 0.001
telluric[obs]['spectrum_noairmass'][ telluric[obs]['null']] = 1.0
telluric[obs]['spectrum'] = np.power(processed['telluric']['spectrum_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
telluric[obs]['spline_noairmass'] = np.ones([input_data[obs]['n_orders'],
input_data[obs]['n_pixels']],
dtype=np.double)
for order in range(0, processed['n_orders']):
telluric[obs]['spline_noairmass'][order, :], _, _ = \
compute_spline(input_data[obs]['wave'][order, :],
telluric[obs]['spectrum_noairmass'][order, :],
0.05)
telluric[obs]['spline'] = np.power(telluric[obs]['spline_noairmass'],
input_data[obs]['AIRMASS'] - processed['airmass_ref'])
telluric[obs]['telluric_corrected'] = processed[obs]['e2ds_corrected']
telluric[obs]['telluric_corrected_err'] = processed[obs]['e2ds_corrected_err']
save_to_cpickle('telluric', telluric, config_in['output'], night)
save_to_cpickle('telluric_processed', processed, config_in['output'], night)
print()
print("Night ", night, " completed")
def plot_telluric_airmass_observerRF(config_in, night_input=''):
import matplotlib.pyplot as plt
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
print("plot_telluric_airmass_observerRF Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Retrieving the analysis"""
try:
processed = load_from_cpickle('telluric_processed', config_in['output'], night)
telluric = load_from_cpickle('telluric', config_in['output'], night)
except:
print()
print("No telluric correction, no plots")
continue
colors, cmap, line_colors = make_color_array(lists, observational_pams)
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
cbax1 = plt.subplot(gs[:, 1])
lift_spectrum = 0.25
for i, obs in enumerate(lists['observations']):
color_array = cmap(i / len(lists['observations']))
_, e2ds_rescaled , _ = \
perform_rescaling(processed[obs]['wave'],
processed[obs]['e2ds'],
processed[obs]['e2ds_err'],
observational_pams['wavelength_rescaling'])
e2ds_rescaled_corrected_spectrum = e2ds_rescaled / telluric[obs]['spectrum']
e2ds_rescaled_corrected_spline = e2ds_rescaled / telluric[obs]['spline']
for order in range(0, processed[obs]['n_orders']):
if order == 0 and i==0:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5, label='uncorrected')
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array), label='corrected')
else:
ax1.plot(processed[obs]['wave'][order, :],
e2ds_rescaled[order, :],
c=color_array, lw=1, alpha=0.5)
ax1.scatter(processed[obs]['wave'][order, :],
e2ds_rescaled_corrected_spectrum[order, :],
s=1, c=np.atleast_2d(color_array))
#ax1.plot(processed[obs]['wave'][order, :],
# e2ds_rescaled[order, :]+lift_spectrum,
# c=color_array, lw=1, alpha=0.5)
#ax1.scatter(processed[obs]['wave'][order, :],
# e2ds_rescaled_corrected_spline[order, :]+lift_spectrum,
# s=1, c=np.atleast_2d(color_array))
ax2.plot(processed[obs]['wave'][order, :],
telluric[obs]['spectrum'][order, :],
c=color_array)
ax2.axhline(1.00, c='k')
#ax2.plot(processed[obs]['wave'][order, :],
# telluric[obs]['spline'][order, :]+lift_spectrum,
# c=color_array)
#ax2.axhline(1.00+lift_spectrum, c='k')
#ax2.plot(input_data['coadd']['wave'],telluric['stellarRF']['spline_eval']+0.1,c='k')
#ax2.scatter(input_data['coadd']['wave'],telluric['stellarRF']['spectrum']+0.1,c='r', s=2)
ax1.legend(loc=3)
ax1.set_title('Night: ' + night)
ax2.set_xlabel('$\lambda$ [$\AA$]')
try:
instrument = night_dict[night]['instrument']
comparison_file = config_in['instruments'][instrument]['telluric_comparison']
comparison_data = np.genfromtxt(comparison_file, skip_header=1)
if comparison_data[0,0]<1000.0:
nm2Ang = 10.
else:
nm2Ang = 1.
ax1.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
ax2.plot(comparison_data[:, 0]*nm2Ang, comparison_data[:, 1], c='C0', zorder=1000)
except:
pass
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show() | 22,627 | 46.537815 | 125 | py |
SLOPpy | SLOPpy-main/SLOPpy/sloppy_run.py | import SLOPpy
import argparse
import os
import sys
import collections
def sloppy_run():
print()
print('SLOPpy v{0}'.format(SLOPpy.__version__))
print()
print('Python version in use:')
print(sys.version)
#if sys.version_info[0] == 3 and sys.version_info[1] > 7:
# print('WARNING MESSAGES SUPPRESSED!')
#print()
parser = argparse.ArgumentParser(prog='SLOPpy_Run', description='SLOPpy runner')
parser.add_argument('config_file', type=str, nargs=1, help='config file')
args = parser.parse_args()
file_conf = args.config_file[0]
config_in = SLOPpy.yaml_parser(file_conf)
SLOPpy.pars_input(config_in)
print()
""" creation of the pickle files """
SLOPpy.prepare_datasets(config_in)
#""" Retrieving the dictionary with the pipeline recipes """
#pipeline = config_in['pipeline']
""" Recipes must be performed in a given order... that's why we must use and ordered dictionary"""
""" Each of the following recipes has to be performed on the whole spectrum """
pipeline_common_routines = collections.OrderedDict()
pipeline_common_routines['sky_correction'] = SLOPpy.compute_sky_correction
#pipeline_common_routines['PCA_test01'] = SLOPpy.PCA_test01
pipeline_common_routines['pca_preparation'] = SLOPpy.compute_pca_preparation
pipeline_common_routines['sysrem_correction'] = SLOPpy.compute_sysrem_correction
# molecfit version 1.5
pipeline_common_routines['telluric_molecfit_v1_preparation'] = SLOPpy.compute_telluric_molecfit_v1_preparation
pipeline_common_routines['telluric_molecfit_v1'] = SLOPpy.compute_telluric_molecfit_v1
pipeline_common_routines['telluric_molecfit_v1_coadd'] = SLOPpy.compute_telluric_molecfit_v1_coadd
# molecfit new version
pipeline_common_routines['telluric_molecfit_preparation'] = SLOPpy.compute_telluric_molecfit_preparation
pipeline_common_routines['telluric_molecfit'] = SLOPpy.compute_telluric_molecfit
pipeline_common_routines['telluric_molecfit_coadd'] = SLOPpy.compute_telluric_molecfit_coadd
pipeline_common_routines['telluric_template'] = SLOPpy.compute_telluric_template
pipeline_common_routines['telluric_template_reference'] = SLOPpy.compute_telluric_template_reference
pipeline_common_routines['telluric_template_alternative'] = SLOPpy.compute_telluric_template_alternative
pipeline_common_routines['telluric_airmass_stellarRF'] = SLOPpy.compute_telluric_airmass_stellarRF
pipeline_common_routines['telluric_airmass_reference_stellarRF'] = SLOPpy.compute_telluric_airmass_reference_stellarRF
pipeline_common_routines['telluric_airmass_observerRF'] = SLOPpy.compute_telluric_airmass_observerRF
pipeline_common_routines['telluric_airmass_berv_observerRF'] = SLOPpy.compute_telluric_airmass_berv_observerRF
pipeline_common_routines['telluric_airmass_reference_observerRF'] = SLOPpy.compute_telluric_airmass_reference_observerRF
pipeline_common_routines['telluric_airmass_berv_reference_observerRF'] = SLOPpy.compute_telluric_airmass_berv_reference_observerRF
pipeline_common_routines['telluric_observerRF_skycalc'] = SLOPpy.compute_telluric_observerRF_skycalc
pipeline_common_routines['differential_refraction'] = SLOPpy.compute_differential_refraction
pipeline_common_routines['differential_refraction_update'] = SLOPpy.compute_differential_refraction_update
pipeline_common_routines['check_differential_refraction'] = SLOPpy.check_differential_refraction
pipeline_common_routines['write_differential_refraction'] = SLOPpy.write_differential_refraction
pipeline_common_routines['interstellar_lines'] = SLOPpy.compute_interstellar_lines
pipeline_common_routines['master_out'] = SLOPpy.compute_master_out
pipeline_common_routines['clv_rm_models'] = SLOPpy.compute_clv_rm_models
pipeline_common_routines['transmission_spectrum_preparation'] = SLOPpy.compute_transmission_spectrum_preparation
pipeline_common_routines['write_output_transmission'] = SLOPpy.write_output_transmission
pipeline_common_routines['write_output_transmission_stellarRF'] = SLOPpy.write_output_transmission_stellarRF
pipeline_common_routines['write_output_transmission_planetRF'] = SLOPpy.write_output_transmission_planetRF
pipeline_common_routines['write_output_transmission_observerRF'] = SLOPpy.write_output_transmission_observerRF
""" Legacy routines for testing purposes """
pipeline_routines = collections.OrderedDict()
""" Each of the following recipes has to be performed independently on each
set of spectral lines """
pipeline_lines_routines = collections.OrderedDict()
"""
pipeline_lines_routines['transmission_spectrum_planetRF'] = SLOPpy.compute_transmission_spectrum_planetRF
pipeline_lines_routines['transmission_spectrum_observerRF'] = SLOPpy.compute_transmission_spectrum_observerRF
pipeline_lines_routines['transmission_spectrum_stellarRF'] = SLOPpy.compute_transmission_spectrum_stellarRF
pipeline_lines_routines['transmission_spectrum'] = SLOPpy.compute_transmission_spectrum
pipeline_lines_routines['second_telluric_correction_on_transmission'] = SLOPpy.compute_second_telluric_correction_on_transmission
pipeline_lines_routines['transmission_map'] = SLOPpy.compute_transmission_map
pipeline_lines_routines['transmission_clv_rm_map'] = SLOPpy.compute_transmission_clv_rm_map
"""
# ! NEW
pipeline_lines_routines['quick_transmission'] = SLOPpy.compute_quick_transmission
pipeline_lines_routines['clv_rm_models_lines'] = SLOPpy.compute_clv_rm_models_lines
pipeline_lines_routines['transmission_mcmc'] = SLOPpy.compute_transmission_mcmc
pipeline_lines_routines['transmission_mcmc_iterative'] = SLOPpy.compute_transmission_mcmc_iterative
pipeline_lines_routines['transmission_binned_mcmc'] = SLOPpy.compute_transmission_binned_mcmc
pipeline_lines_routines['transmission_binned_mcmc_iterative'] = SLOPpy.compute_transmission_binned_mcmc_iterative
pipeline_lines_routines['transmission_spectrum_planetRF'] = SLOPpy.compute_transmission_spectrum_planetRF
pipeline_lines_routines['transmission_spectrum_observerRF'] = SLOPpy.compute_transmission_spectrum_observerRF
pipeline_lines_routines['transmission_spectrum_stellarRF'] = SLOPpy.compute_transmission_spectrum_stellarRF
pipeline_lines_routines['transmission_spectrum'] = SLOPpy.compute_transmission_spectrum
pipeline_lines_routines['transmission_spectrum_planetRF_iterative'] = SLOPpy.compute_transmission_spectrum_planetRF_iterative
pipeline_lines_routines['transmission_spectrum_observerRF_iterative'] = SLOPpy.compute_transmission_spectrum_observerRF_iterative
pipeline_lines_routines['transmission_spectrum_stellarRF_iterative'] = SLOPpy.compute_transmission_spectrum_stellarRF_iterative
pipeline_lines_routines['transmission_spectrum_iterative'] = SLOPpy.compute_transmission_spectrum_iterative
pipeline_lines_routines['transmission_spectrum_average_planetRF'] = SLOPpy.compute_transmission_spectrum_average_planetRF
pipeline_lines_routines['transmission_spectrum_average_observerRF'] = SLOPpy.compute_transmission_spectrum_average_observerRF
pipeline_lines_routines['transmission_spectrum_average_stellarRF'] = SLOPpy.compute_transmission_spectrum_average_stellarRF
pipeline_lines_routines['transmission_spectrum_average'] = SLOPpy.compute_transmission_spectrum_average
pipeline_lines_routines['transmission_spectrum_average_planetRF_iterative'] = SLOPpy.compute_transmission_spectrum_average_planetRF
pipeline_lines_routines['transmission_spectrum_average_observerRF_iterative'] = SLOPpy.compute_transmission_spectrum_average_observerRF
pipeline_lines_routines['transmission_spectrum_average_stellarRF_iterative'] = SLOPpy.compute_transmission_spectrum_average_stellarRF
pipeline_lines_routines['transmission_spectrum_average_iterative'] = SLOPpy.compute_transmission_spectrum_average
pipeline_lines_routines['transmission_lightcurve'] = SLOPpy.compute_transmission_lightcurve
pipeline_lines_routines['transmission_lightcurve_average'] = SLOPpy.compute_transmission_lightcurve_average
pipeline_lines_routines['spectra_lightcurve'] = SLOPpy.compute_spectra_lightcurve
pipeline_lines_routines['spectra_lightcurve_average'] = SLOPpy.compute_spectra_lightcurve_average
# TODO: to be updated to support single line(s) set
"""
pipeline_lines_routines['transmission_clv_rm_correction_planetRF'] = SLOPpy.compute_transmission_clv_rm_correction_planetRF
pipeline_lines_routines['transmission_clv_rm_correction_observerRF'] = SLOPpy.compute_transmission_clv_rm_correction_observerRF
pipeline_lines_routines['transmission_clv_rm_correction_stellarRF'] = SLOPpy.compute_transmission_clv_rm_correction_stellarRF
pipeline_lines_routines['transmission_clv_rm_correction'] = SLOPpy.compute_transmission_clv_rm_correction
pipeline_lines_routines['transmission_clv_rm_average_planetRF'] = SLOPpy.compute_transmission_clv_rm_average_planetRF
pipeline_lines_routines['transmission_clv_rm_average_observerRF'] = SLOPpy.compute_transmission_clv_rm_average_observerRF
pipeline_lines_routines['transmission_clv_rm_average_stellarRF'] = SLOPpy.compute_transmission_clv_rm_average_stellarRF
pipeline_lines_routines['transmission_clv_rm_average'] = SLOPpy.compute_transmission_clv_rm_average
pipeline_lines_routines['spectra_lightcurve'] = SLOPpy.compute_spectra_lightcurve
pipeline_lines_routines['spectra_lightcurve_average'] = SLOPpy.compute_spectra_lightcurve_average
pipeline_lines_routines['excess_lightcurve'] = SLOPpy.compute_spectra_lightcurve
pipeline_lines_routines['excess_lightcurve_average'] = SLOPpy.compute_spectra_lightcurve_average
pipeline_lines_routines['spectra_lightcurve_clv_rm_correction'] = SLOPpy.compute_spectra_lightcurve_clv_rm_correction
pipeline_lines_routines['spectra_lightcurve_average_clv_rm_correction'] = SLOPpy.compute_spectra_lightcurve_average_clv_rm_correction
pipeline_lines_routines['excess_lightcurve_clv_rm_correction'] = SLOPpy.compute_spectra_lightcurve_clv_rm_correction
pipeline_lines_routines['excess_lightcurve_average_clv_rm_correction'] = SLOPpy.compute_spectra_lightcurve_average_clv_rm_correction
pipeline_lines_routines['transmission_lightcurve_planetRF'] = SLOPpy.compute_transmission_lightcurve_planetRF
pipeline_lines_routines['transmission_lightcurve_observerRF'] = SLOPpy.compute_transmission_lightcurve_observerRF
pipeline_lines_routines['transmission_lightcurve_stellarRF'] = SLOPpy.compute_transmission_lightcurve_stellarRF
pipeline_lines_routines['transmission_lightcurve'] = SLOPpy.compute_transmission_lightcurve
pipeline_lines_routines['write_output_spectra'] = SLOPpy.write_output_spectra
pipeline_lines_routines['transmission_lightcurve_average_planetRF'] = SLOPpy.compute_transmission_lightcurve_average_planetRF
pipeline_lines_routines['transmission_lightcurve_average_observerRF'] = SLOPpy.compute_transmission_lightcurve_average_observerRF
pipeline_lines_routines['transmission_lightcurve_average_stellarRF'] = SLOPpy.compute_transmission_lightcurve_average_stellarRF
pipeline_lines_routines['transmission_lightcurve_average'] = SLOPpy.compute_transmission_lightcurve_average
"""
plot_preparation_routines = collections.OrderedDict()
#plot_preparation_routines['clv_rm_modelling'] = SLOPpy.plot_clv_rm_modelling
# ! New
plot_preparation_routines['clv_rm_models'] = SLOPpy.plot_clv_rm_models
plot_routines = collections.OrderedDict()
plot_routines['plot_dataset'] = SLOPpy.plot_dataset
plot_routines['prepare_dataset'] = SLOPpy.plot_dataset
plot_routines['dataset'] = SLOPpy.plot_dataset
plot_routines['sky_correction'] = SLOPpy.plot_sky_correction
plot_routines['differential_refraction'] = SLOPpy.plot_differential_refraction
plot_routines['differential_refraction_update'] = SLOPpy.plot_differential_refraction_update
plot_routines['check_differential_refraction'] = SLOPpy.plot_check_differential_refraction
#plot_routines['write_differential_refraction'] = SLOPpy.write_differential_refraction
#plot_routines['PCA_test01'] = SLOPpy.PCA_test01
# molecfit version 1.5
plot_routines['telluric_molecfit_v1'] = SLOPpy.plot_telluric_molecfit_v1
plot_routines['telluric_molecfit_v1_coadd'] = SLOPpy.plot_telluric_molecfit_v1_coadd
# molecfit new version
plot_routines['telluric_molecfit'] = SLOPpy.plot_telluric_molecfit
plot_routines['telluric_molecfit_coadd'] = SLOPpy.plot_telluric_molecfit_coadd
plot_routines['telluric_template'] = SLOPpy.plot_telluric_template
plot_routines['telluric_template_reference'] = SLOPpy.plot_telluric_template_reference
plot_routines['telluric_template_alternative'] = SLOPpy.plot_telluric_template_alternative
plot_routines['telluric_airmass_stellarRF'] = SLOPpy.plot_telluric_airmass_stellarRF
plot_routines['telluric_airmass_reference_stellarRF'] = SLOPpy.plot_telluric_airmass_reference_stellarRF
plot_routines['telluric_airmass_observerRF'] = SLOPpy.plot_telluric_airmass_observerRF
plot_routines['telluric_airmass_berv_observerRF'] = SLOPpy.plot_telluric_airmass_berv_observerRF
plot_routines['telluric_airmass_reference_observerRF'] = SLOPpy.plot_telluric_airmass_reference_observerRF
plot_routines['telluric_airmass_berv_reference_observerRF'] = SLOPpy.plot_telluric_airmass_berv_reference_observerRF
#plot_routines['telluric_obsolete_wyttenbach'] = SLOPpy.plot_telluric_obsolete_wyttenbach
#plot_routines['telluric_airmass_observerRF_chunks'] = SLOPpy.plot_telluric_airmass_observerRF_chunks
plot_routines['telluric_observerRF_skycalc'] = SLOPpy.plot_telluric_observerRF_skycalc
plot_routines['interstellar_lines'] = SLOPpy.plot_interstellar_lines
plot_routines['master_out'] = SLOPpy.plot_master_out
plot_routines['telluric_molecfit_preparation'] = SLOPpy.plot_telluric_molecfit_preparation
# ! NEW
plot_routines['transmission_spectrum_preparation'] = SLOPpy.plot_transmission_spectrum_preparation
"""
plot_routines['transmission_spectrum_planetRF'] = SLOPpy.plot_transmission_spectrum_planetRF
plot_routines['transmission_spectrum_observerRF'] = SLOPpy.plot_transmission_spectrum_observerRF
plot_routines['transmission_spectrum_stellarRF'] = SLOPpy.plot_transmission_spectrum_stellarRF
plot_routines['transmission_spectrum'] = SLOPpy.plot_transmission_spectrum
plot_routines['second_telluric_correction_on_transmission'] = SLOPpy.plot_second_telluric_correction_on_transmission
plot_routines['transmission_clv_rm_correction_planetRF'] = SLOPpy.plot_transmission_clv_rm_correction_planetRF
plot_routines['transmission_clv_rm_correction_observerRF'] = SLOPpy.plot_transmission_clv_rm_correction_observerRF
plot_routines['transmission_clv_rm_correction_stellarRF'] = SLOPpy.plot_transmission_clv_rm_correction_stellarRF
plot_routines['transmission_clv_rm_correction'] = SLOPpy.plot_transmission_clv_rm_correction
plot_routines['spectra_lightcurve'] = SLOPpy.plot_spectra_lightcurve
plot_routines['excess_lightcurve'] = SLOPpy.plot_spectra_lightcurve
plot_routines['spectra_lightcurve_clv_rm_correction'] = SLOPpy.plot_spectra_lightcurve_clv_rm_correction
plot_routines['excess_lightcurve_clv_rm_correction'] = SLOPpy.plot_spectra_lightcurve_clv_rm_correction
plot_routines['transmission_lightcurve_planetRF'] = SLOPpy.plot_transmission_lightcurve_planetRF
plot_routines['transmission_lightcurve_observerRF'] = SLOPpy.plot_transmission_lightcurve_observerRF
plot_routines['transmission_lightcurve_stellarRF'] = SLOPpy.plot_transmission_lightcurve_stellarRF
plot_routines['transmission_lightcurve'] = SLOPpy.plot_transmission_lightcurve
plot_routines['transmission_map'] = SLOPpy.plot_transmission_map
plot_routines['transmission_clv_rm_map'] = SLOPpy.plot_transmission_clv_rm_map
"""
plot_lines_routines = collections.OrderedDict()
plot_lines_routines['clv_rm_models_lines'] = SLOPpy.plot_clv_rm_models_lines
plot_lines_routines['transmission_binned_mcmc'] = SLOPpy.plot_transmission_binned_mcmc
plot_lines_routines['transmission_spectrum_planetRF'] = SLOPpy.plot_transmission_spectrum_planetRF
plot_lines_routines['transmission_spectrum_observerRF'] = SLOPpy.plot_transmission_spectrum_observerRF
plot_lines_routines['transmission_spectrum_stellarRF'] = SLOPpy.plot_transmission_spectrum_stellarRF
plot_lines_routines['transmission_spectrum'] = SLOPpy.plot_transmission_spectrum
plot_lines_routines['transmission_spectrum_planetRF_iterative'] = SLOPpy.plot_transmission_spectrum_planetRF_iterative
plot_lines_routines['transmission_spectrum_observerRF_iterative'] = SLOPpy.plot_transmission_spectrum_observerRF_iterative
plot_lines_routines['transmission_spectrum_stellarRF_iterative'] = SLOPpy.plot_transmission_spectrum_stellarRF_iterative
plot_lines_routines['transmission_spectrum_iterative'] = SLOPpy.plot_transmission_spectrum_iterative
plot_average_routines = collections.OrderedDict()
plot_average_routines['compare_master_out'] = SLOPpy.plot_compare_master_out
plot_lines_average_routines = collections.OrderedDict()
# ! These should be removed and performed line by line !
plot_lines_average_routines['transmission_binned_mcmc'] = SLOPpy.plot_transmission_binned_mcmc
plot_lines_average_routines['transmission_spectrum_average_planetRF'] = SLOPpy.plot_transmission_spectrum_average_planetRF
plot_lines_average_routines['transmission_spectrum_average_observerRF'] = SLOPpy.plot_transmission_spectrum_average_observerRF
plot_lines_average_routines['transmission_spectrum_average_stellarRF'] = SLOPpy.plot_transmission_spectrum_average_stellarRF
plot_lines_average_routines['transmission_spectrum_average'] = SLOPpy.plot_transmission_spectrum_average
"""
plot_average_routines['excess_lightcurve_average'] = SLOPpy.plot_spectra_lightcurve_average
plot_average_routines['spectra_lightcurve_average'] = SLOPpy.plot_spectra_lightcurve_average
plot_average_routines['spectra_lightcurve_average_clv_rm_correction'] = \
SLOPpy.plot_spectra_lightcurve_average_clv_rm_correction
plot_average_routines['excess_lightcurve_average_clv_rm_correction'] = \
SLOPpy.plot_spectra_lightcurve_average_clv_rm_correction
plot_average_routines['transmission_average_planetRF'] = SLOPpy.plot_transmission_average_planetRF
plot_average_routines['transmission_average_observerRF'] = SLOPpy.plot_transmission_average_observerRF
plot_average_routines['transmission_average_stellarRF'] = SLOPpy.plot_transmission_average_stellarRF
plot_average_routines['transmission_average'] = SLOPpy.plot_transmission_average
plot_average_routines['transmission_lightcurve_average_planetRF'] = SLOPpy.plot_transmission_lightcurve_average_planetRF
plot_average_routines['transmission_lightcurve_average_observerRF'] = SLOPpy.plot_transmission_lightcurve_average_observerRF
plot_average_routines['transmission_lightcurve_average_stellarRF'] = SLOPpy.plot_transmission_lightcurve_average_stellarRF
plot_average_routines['transmission_lightcurve_average'] = SLOPpy.plot_transmission_lightcurve_average
plot_average_routines['transmission_clv_rm_average_planetRF'] = SLOPpy.plot_transmission_clv_rm_average_planetRF
plot_average_routines['transmission_clv_rm_average_observerRF'] = SLOPpy.plot_transmission_clv_rm_average_observerRF
plot_average_routines['transmission_clv_rm_average_stellarRF'] = SLOPpy.plot_transmission_clv_rm_average_stellarRF
plot_average_routines['transmission_clv_rm_average'] = SLOPpy.plot_transmission_clv_rm_average
plot_average_routines['compare_clv_rm_effects_planetRF'] = SLOPpy.plot_compare_clv_rm_effects_planetRF
plot_average_routines['compare_clv_rm_effects_observerRF'] = SLOPpy.plot_compare_clv_rm_effects_observerRF
plot_average_routines['compare_clv_rm_effects_stellarRF'] = SLOPpy.plot_compare_clv_rm_effects_stellarRF
plot_average_routines['compare_clv_rm_effects'] = SLOPpy.plot_compare_clv_rm_effects
plot_average_routines['transmission_map_average'] = SLOPpy.plot_transmission_map_average
plot_average_routines['transmission_clv_rm_map_average'] = SLOPpy.plot_transmission_clv_rm_map_average
"""
"""
Execution of subroutines
"""
# ! NEW !
print()
print("*** Data preparation analysis ***")
try:
pipeline = config_in['pipeline']
has_plots = len(pipeline)
except (KeyError, TypeError):
pipeline = {}
for key in pipeline:
if key in pipeline_common_routines:
print()
pipeline_common_routines[key](config_in)
# ! Kept here for legacy purposes !
for key in pipeline:
if key in pipeline_routines:
print()
pipeline_routines[key](config_in)
# ! NEW !
print()
print("*** Spectral lines analysis ***")
try:
spectral_lines = config_in['spectral_lines']
has_plots = len(spectral_lines)
except (KeyError, TypeError):
pipeline = {}
for lines_label in spectral_lines:
for key in pipeline:
if key in pipeline_lines_routines:
print()
pipeline_lines_routines[key](config_in, lines_label)
#for key, func in pipeline_routines.items():
# if key in pipeline: func(config_in)
#TODO: must be updated to be performed on a single set of spectral lines
try:
plots = config_in['plots']
has_plots = len(plots)
except (KeyError, TypeError):
return
print()
print("*** Plot Subroutines ***")
print()
plots = config_in['plots']
nights = config_in['nights']
for key in plots:
if key in plot_preparation_routines:
plot_preparation_routines[key](config_in)
print()
for key in plots:
if key in plot_routines:
plot_routines[key](config_in)
print()
for lines_label in config_in['spectral_lines']:
for key in plots:
for night in nights:
if key in plot_lines_routines:
plot_lines_routines[key](config_in, lines_label, night)
print()
if key in plot_lines_average_routines:
plot_lines_average_routines[key](config_in, lines_label)
print()
#for key, func in plot_preparation_routines.items():
# if key in plots: func(config_in)
#
#for night in nights:
# for key, func in plot_routines.items():
# if key in plots: func(config_in, night)
#
#for key, func in plot_average_routines.items():
# if key in plots: func(config_in)
| 24,000 | 51.749451 | 139 | py |
SLOPpy | SLOPpy-main/SLOPpy/transmission_mcmc.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.constants import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.math_functions import *
from SLOPpy.subroutines.bayesian_emcee import *
# from SLOPpy.subroutines.rebin_subroutines import *
from scipy.signal import savgol_filter
__all__ = ['compute_transmission_mcmc', 'compute_transmission_mcmc_iterative']
subroutine_name = 'transmission_mcmc'
def compute_transmission_mcmc_iterative(config_in, lines_label):
pca_parameters = from_config_get_pca_parameters(config_in)
for it in range(0, pca_parameters.get('iterations', 5)):
compute_transmission_mcmc(config_in, lines_label, reference='planetRF', pca_iteration=it)
def compute_transmission_mcmc(config_in, lines_label, reference='planetRF', pca_iteration=-1):
night_dict = from_config_get_nights(config_in)
planet_dict = from_config_get_planet(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
""" selection of those parameters that are specific of the spectral line(s)
under analysis
"""
spectral_lines = from_config_get_spectral_lines(config_in)
lines_dict = spectral_lines[lines_label]
norm_dict = lines_dict.get('normalization', {})
norm_pams = {}
norm_pams['normalize_transmission'] = norm_dict.get('normalize_transmission', True)
norm_pams['normalization_model'] = norm_dict.get('normalization_model', 'polynomial')
""" Normalization parameters for polynomial model"""
norm_pams['model_poly_degree'] = norm_dict.get('model_poly_degree', 2)
norm_pams['spectra_poly_degree'] = norm_dict.get('spectra_poly_degree', 2)
norm_pams['lower_threshold'] = norm_dict.get('lower_threshold', 0.950)
norm_pams['percentile_selection'] = norm_dict.get('percentile_selection', 10)
""" Normalization parameters using Savitzky-Golay filter"""
norm_pams['window_length'] = norm_dict.get('window_length', 101)
norm_pams['polyorder'] = norm_dict.get('polyorder', 3)
norm_pams['mode'] = norm_dict.get('mode', 'nearest')
norm_pams['cval'] = norm_dict.get('cval', 1.0)
sampler_pams = lines_dict['sampler_parameters']
sampler_name = sampler_pams.get('sampler_name', 'emcee')
# TODO reference as input parameter
reference = 'planetRF'
"""
- case 0: only one spectral line, default line parameters are contrast, FWHM, rv_shift
- case 1: only one spectral line, no winds
- case 2: only one spectral line, no planetary radius dependance
- case 3: only one spectral line, no winds and no planetary radius dependance
- case 10: more than one spectral lines, all line parameters are free and independent
- case 11: more than one spectral lines, all lines are affected by the same wind
- case 12: more than one spectral lines, all lines have same FWHM
- case 13: more than one spectral lines, all lines are affected by the same wind and have same FWHM
- case 14: more than one spectral lines, no winds
- case 15: more than one spectral lines, no winds, all lines have same FWHM
- case 20: more than one spectral lines, no Rp dependance, all line parameters are free and independent
- case 21: more than one spectral lines, no Rp dependance, all lines are affected by the same wind
- case 22: more than one spectral lines, no Rp dependance, all lines have same FWHM
- case 23: more than one spectral lines, no Rp dependance, all lines are affected by the same wind and have same FWHM
- case 24: more than one spectral lines, no Rp dependance, no winds
- case 25: more than one spectral lines, no Rp dependance, no winds, all lines have same FWHM
free_Rp free_winds shared_winds shared_FWHM
- case 0: True True False False DEFAULT for single line
- case 1: True False False False
- case 2: False True False False
- case 3: False False False False
- case 10: True True False False DEFAULT for multiple lines
- case 11: True True True False
- case 12: True True False True
- case 13: True True True True
- case 14: True False False False
- case 15: True False False True
- case 20: False True False False
- case 21: False True True False
- case 22: False True False True
- case 23: False True True True
- case 24: False False False False
- case 25: False False False True
"""
model_case = 10
fit_pams = lines_dict['fit_parameters']
# Added compativlity to "wrong" keys
clv_rm_correction = lines_dict.get('clv_rm_correction', True)
free_Rp = fit_pams.get('free_Rp', True) \
and fit_pams.get('free_planet_radius', True) \
and clv_rm_correction
free_winds = fit_pams.get('free_winds', True) \
and fit_pams.get('free_offset', True)
shared_winds = fit_pams.get('shared_winds', False) \
or fit_pams.get('shared_offset', False)
shared_FWHM = fit_pams.get('shared_FWHM', False) \
or fit_pams.get('shared_fwhm', False)
prior_dict = fit_pams.get('priors', {}) \
or fit_pams.get('priors', {})
if len(lines_dict['lines']) < 2:
if free_Rp is True and free_winds is True:
model_case = 0
if free_Rp is True and free_winds is False:
model_case = 1
if free_Rp is False and free_winds is True:
model_case = 2
if free_Rp is False and free_winds is False:
model_case = 3
else:
if free_Rp is True:
if free_winds is True:
if shared_winds is False and shared_FWHM is False:
model_case = 10
if shared_winds is True and shared_FWHM is False:
model_case = 11
if shared_winds is False and shared_FWHM is True:
model_case = 12
if shared_winds is True and shared_FWHM is True:
model_case = 13
else:
if shared_winds is False and shared_FWHM is False:
model_case = 14
if shared_winds is False and shared_FWHM is True:
model_case = 15
else:
if free_winds is True:
if shared_winds is False and shared_FWHM is False:
model_case = 20
if shared_winds is True and shared_FWHM is False:
model_case = 21
if shared_winds is False and shared_FWHM is True:
model_case = 22
if shared_winds is True and shared_FWHM is True:
model_case = 23
else:
if shared_winds is False and shared_FWHM is False:
model_case = 24
if shared_winds is False and shared_FWHM is True:
model_case = 25
jitter_flag = fit_pams.get('jitter', True)
print()
print(' free_Rp: (default: True) ', free_Rp)
print(' free_winds: (default: True) ', free_winds)
print(' shared_winds: (default: False) ', shared_winds)
print(' shared_FWHM: (default: False) ', shared_FWHM)
print(' jitter: (default: True) ', jitter_flag)
print(' # lines: ', len(lines_dict['lines']))
print(' model_case: ', model_case)
""" parameters list:
to be updated
pams_dict = {} # dictionary containing the index of a given parameter
pams_list = [] # list with the parameter names ordered according to their index
boundaries = np.empty([0, 2]) # boundaries for MCMC / nested sampling
theta_start = np.empty(0) # starting point for MCMC
lines_center = np.empty(0) # laboratory wavelength of spectral lines
pam_index = 0 # keep track of the number of variables
for line_key, line_val in lines_dict['lines'].items():
pam_name = line_key + '_contrast'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[0.00, 1.00]], axis=0)
theta_start = np.append(theta_start, 0.010)
pam_index += 1
lines_center = np.append(lines_center, line_val)
# skip the inclusion of FWHM as a free parameter for each line
if the shared FWHM is selected
#
if model_case in [0, 1, 2, 3, 10, 11, 14, 20, 21, 24]:
# if not lines_dict['fit_parameters']['shared_fwhm']:
pam_name = line_key + '_fwhm'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[0.00, 150.00]], axis=0)
theta_start = np.append(theta_start, 5.0)
pam_index += 1
# if lines_dict['fit_parameters']['fixed_separation']: continue
# if not lines_dict['fit_parameters']['lines_shift']: continue
if model_case in [0, 2, 10, 12, 20, 22]:
pam_name = line_key + '_winds'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[-5.00, 5.00]], axis=0)
theta_start = np.append(theta_start, 0.00)
pam_index += 1
if model_case in [12, 13, 15, 22, 23, 25]:
# if lines_dict['fit_parameters']['shared_fwhm']:
pam_name = 'shared_fwhm'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[0.000, 150.00]], axis=0)
theta_start = np.append(theta_start, 5.000)
pam_index += 1
if model_case in [11, 13, 21, 23]:
# if lines_dict['fit_parameters']['fixed_separation'] and lines_dict['fit_parameters']['lines_shift']:
pam_name = 'shared_winds'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[-5.0, 5.0]], axis=0)
theta_start = np.append(theta_start, 0.000)
pam_index += 1
if model_case in [0, 1, 10, 11, 12, 13, 14, 15]:
pams_dict['rp_factor'] = pam_index
pams_list.append('rp_factor')
boundaries = np.append(boundaries, [[0.5, 2.0]], axis=0)
theta_start = np.append(theta_start, 1.0)
pam_index += 1
pams_dict['K_planet'] = pam_index
pams_list.append('K_planet')
boundaries = np.append(boundaries,
[[-300., planet_dict['RV_semiamplitude']
[0]+ 300.]],
axis=0)
theta_start = np.append(
theta_start, planet_dict['RV_semiamplitude'][0])
pam_index += 1
pam_name = 'jitter'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[10**(-12), 0.01]], axis=0)
theta_start = np.append(theta_start, 10**(-11))
pam_index += 1
for ii in range(0, pam_index):
print(pams_list[ii], ' ', boundaries[ii, :],
' ', theta_start[ii])
ndim = pam_index
"""
for night in night_dict:
print()
print("transmission_mcmc Night: {0:s}".format(night))
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle(
'observational_pams', config_in['output'], night)
# Moved here to check wheter PCA or master-out have been employed
preparation_input = load_from_cpickle(
'transmission_preparation', config_in['output'], night)
if preparation_input.get('pca_output', False):
if pca_iteration >= 0:
it_string = str(pca_iteration).zfill(2)
else:
it_string = str(preparation_input.get('ref_iteration', 0)).zfill(2)
preparation = preparation_input[it_string]
else:
preparation = preparation_input
it_string = ''
try:
mcmc_data = load_from_cpickle(subroutine_name + '_data', config_in['output'], night, lines_label, it_string)
clv_rm_radius = mcmc_data['clv_rm_radius']
clv_rm_grid = mcmc_data['clv_rm_grid']
transmission_spec = mcmc_data['transmission_spec']
transmission_spec_err = mcmc_data['transmission_spec_err']
wave_array = mcmc_data['wave_array']
time_array = mcmc_data['time_array']
planet_RVsinusoid = mcmc_data['planet_RVsinusoid']
jitter_index = mcmc_data['jitter_index']
n_jitter = mcmc_data['n_jitter']
print(" Loading MCMC data array for lines {0:s}, night: {1:s}".format(
lines_label, night))
except FileNotFoundError:
print(" Computing MCMC data array for lines {0:s}, night: {1:s}".format(
lines_label, night))
calib_data = load_from_cpickle(
'calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(
config_in['output'], night, lists['observations'])
if clv_rm_correction:
try:
clv_rm_models = load_from_cpickle(
'clv_rm_models', config_in['output'], night, lines_label)
except (FileNotFoundError, IOError):
clv_rm_models = load_from_cpickle(
'clv_rm_models', config_in['output'], night)
else:
# workaround if CLV correction is not available
clv_rm_models = {'common': {}}
clv_rm_models['common']['n_radius_grid'] = 3
clv_rm_models['common']['radius_grid'] = np.asarray(
[0.5, 1.0, 1.5])
processed = {
'subroutine': subroutine_name,
}
"""
we use the first transit_full observation to define the boolean
selection array to be used for all the observations.
In this way we make sure that all the wavelength/spectrum arrays have the
same dimension
"""
obs_reference = lists['transit_full'][0]
wave_SRF, step_SRF = shift_wavelength(input_data[obs_reference]['wave'],
input_data[obs_reference]['step'],
observational_pams[obs_reference]['rv_shift_ORF2SRF'])
print('WAVE SHAPE:', np.shape(wave_SRF))
print('STEP SHAPE:', np.shape(step_SRF))
processed['common'] = {
'range': lines_dict['fit_parameters']['range'],
'reference_wave': wave_SRF,
'reference_step': step_SRF
}
"""
Identification of the orders including the data points of interest
"""
identify_order = (wave_SRF > processed['common']['range'][0]) \
& (wave_SRF < processed['common']['range'][1])
order_selection = (np.sum(identify_order, axis=1) > 0)
order_list = np.arange(0, observational_pams['n_orders'], dtype=np.int16)[order_selection]
n_orders = len(order_list)
processed['common']['selection'] = identify_order
processed['common']['order_selection'] = order_selection
processed['common']['order_list'] = order_list
processed['common']['n_orders'] = n_orders
processed['common']['size'] = np.sum(identify_order)
print('COMMON')
print(np.shape(processed['common']['selection']))
print(processed['common']['order_selection'])
print(processed['common']['order_list'])
print(processed['common']['size'])
processed['common_extended'] = {
'range': lines_dict['range'],
'reference_wave': wave_SRF,
'reference_step': step_SRF
}
identify_order = (wave_SRF > processed['common_extended']['range'][0]) \
& (wave_SRF < processed['common_extended']['range'][1])
order_selection = (np.sum(identify_order, axis=1) > 0)
order_list = np.arange(0, observational_pams['n_orders'], dtype=np.int16)[order_selection]
n_orders = len(order_list)
processed['common_extended']['selection'] = identify_order
processed['common_extended']['order_selection'] = order_selection
processed['common_extended']['order_list'] = order_list
processed['common_extended']['n_orders'] = n_orders
processed['common_extended']['size'] = np.sum(identify_order)
#print('COMMON_EXTENDED')
#print(np.shape(processed['common_extended']['selection']))
#print(processed['common_extended']['order_selection'])
#print(processed['common_extended']['order_list'])
#print(processed['common_extended']['size'])
#print()
for obs in lists['observations']:
""" we start from the e2ds file, after correction for blaze and
division by the master-out
Observation data:
wave: input_data[obs]['wave']
step: input_data[obs]['step']
flux: preparation[obs]['deblazed']
ferr: preparation[obs]['deblazed_err']
"""
""" First step: we rebin the spectra in the Stellar Reference Frame,
with the step size decided by the user specifically for the fit
"""
processed[obs] = {}
processed[obs]['wave_SRF'], processed[obs]['step_SRF'] = shift_wavelength(
input_data[obs]['wave'],
input_data[obs]['step'],
observational_pams[obs]['rv_shift_ORF2SRF'])
""" Continuum normalization:
3) Polynomial fit, everything is hard coded now but personalized
options can be implemented easily in the yaml file
"""
processed[obs]['continuum'] = processed['common']['reference_wave'] * 0.
processed[obs]['normalized'] = processed['common']['reference_wave'] * 0.
processed[obs]['normalized_err'] = processed['common']['reference_wave'] * 0.
""" We perform the selection only on the order
where we actually have data for the MCMC
"""
if norm_pams['normalize_transmission'] and norm_pams['normalization_model'] == 'polynomial':
""" Continuum normalization preparatory steps:
1) exclusion of regions with planetary lines
2) exclusion of regions with stellar lines
3) Polynomial fit of selected regions
Boolean array initialized to all True values, fit is
performed on the extended region and then applied to the fit subset
"""
processed['common_extended']['line_exclusion'] = processed['common_extended']['selection'].copy()
""" Continuum normalization:
1) exclusion of regions with planetary lines, taking into
account the planetary RV semi-amplitude
"""
for line_key, line_val in lines_dict['lines'].items():
line_extension = 1.2 * \
planet_dict['RV_semiamplitude'][0] * \
line_val / speed_of_light_km
processed['common_extended']['line_exclusion'] = processed['common_extended']['line_exclusion'] \
& (np.abs(processed['common_extended']['reference_wave']-line_val) > line_extension)
""" Continuum normalization:
2) exclusion of regions with planetary lines, taking into
account the planetary RV semi-amplitude
"""
try:
for order in processed['common_extended']['order_list']:
print('ORDER', order)
order_sel = processed['common_extended']['selection'][order, :]
print('selection', np.shape(processed['common_extended']['selection']))
print('ORDER_SEL', np.shape(order_sel))
stellar_spectrum_rebinned = rebin_1d_to_1d(clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
clv_rm_models['common']['norm_convolved'],
processed['common_extended']['reference_wave'][order, order_sel],
processed['common_extended']['reference_step'][order, order_sel])
stellar_spectrum_derivative = first_derivative(
processed['common_extended']['reference_wave'][order, order_sel], stellar_spectrum_rebinned)
processed['common_extended']['line_exclusion'][order, order_sel] = processed['common_extended']['line_exclusion'][order, order_sel] & (
np.abs(stellar_spectrum_derivative) < 0.0005)
except KeyError:
print(
"No stellar synthetic spectrum from CLV models, some stellar lines may be included transmission normalization ")
for obs in lists['observations']:
for order in processed['common']['order_list']:
selection = processed['common_extended']['line_exclusion'][order, :] & (
preparation[obs]['deblazed'][order, :]
> np.std(preparation[obs]['deblazed'][order, :]))
if np.sum(selection) < 100:
selection = (preparation[obs]['deblazed'][order, :] >
np.std(preparation[obs]['deblazed'][order, :]))
processed[obs]['norm_coeff_' + repr(order)] = \
np.polynomial.chebyshev.chebfit(processed[obs]['wave_SRF'][order, selection],
preparation[obs]['deblazed'][order, selection],
2)
processed[obs]['continuum'][order, selection] = np.polynomial.chebyshev.chebval(
preparation[obs]['wave_SRF'][order, selection], processed[obs]['norm_coeff_' + repr(order)])
processed[obs]['normalized'][order, selection] = preparation[obs]['deblazed'][order, selection] / \
processed[obs]['continuum'][order, selection]
processed[obs]['normalized_err'][order, selection] = preparation[obs]['deblazed_err'][order, selection] / \
processed[obs]['continuum'][order, selection]
elif norm_pams['normalize_transmission'] and (
norm_pams['normalization_model'] == 'savgol'
or norm_pams['normalization_model'] == 'savitzky-golay'):
print(' ', obs, ' normalization using Savitzky-Golay filter')
for obs in lists['observations']:
processed[obs]['continuum'] = np.pnes_like(preparation[obs]['deblazed'])
for order in processed['common']['order_list']:
processed[obs]['continuum'][order,:] = savgol_filter(preparation[obs]['deblazed'][order,:],
window_length=norm_pams['window_length'],
polyorder=norm_pams['polyorder'],
mode=norm_pams['mode'],
cval=norm_pams['cval'])
processed[obs]['normalized'] = preparation[obs]['deblazed'] / processed[obs]['continuum']
processed[obs]['normalized_err'] = preparation[obs]['deblazed_err'] / processed[obs]['continuum']
print('ciao')
processed['common']['n_obs'] = len(lists['transit_full'])
processed['common']['n_radius_grid'] = clv_rm_models['common']['n_radius_grid']
processed['common']['radius_grid'] = clv_rm_models['common']['radius_grid']
clv_rm_radius = clv_rm_models['common']['radius_grid']
""" We are moving the values of interest from dictionaries to arrays
in order to speed up the MCMC
1) clv_rm_grid: array with all the CLV models, as a function of the
radius of the planet
2) time_from_transit: BJD_TDB - T0
3) planet_RVsinusoid: Fractional RV of the planet (K=1) - from a array
"""
clv_rm_grid = np.ones([processed['common']['n_radius_grid'],
processed['common']['n_obs'],
processed['common']['size']],
dtype=np.double)
time_from_transit = np.empty(
processed['common']['n_obs'], dtype=np.double)
wave_array = np.empty([processed['common']['n_obs'],
processed['common']['size']],
dtype=np.double)
time_array = np.empty([processed['common']['n_obs'],
processed['common']['size']],
dtype=np.double)
transmission_spec = np.empty([processed['common']['n_obs'],
processed['common']['size']],
dtype=np.double)
transmission_spec_err = np.empty([processed['common']['n_obs'],
processed['common']['size']],
dtype=np.double)
for i_obs, obs in enumerate(lists['transit_full']):
time_from_transit[i_obs] = observational_pams[obs]['BJD'] - \
observational_pams['time_of_transit']
time_array[i_obs, :] = time_from_transit[i_obs]
# planet_RVsinusoid[i_obs] = np.sin(2*np.pi / planet_dict['period'][0] * time_from_transit[i_obs])
wave_array[i_obs, :] = processed[obs]['wave_SRF'][processed['common']['selection']].flatten()
transmission_spec[i_obs, :] = processed[obs]['normalized'][processed['common']['selection']].flatten()
transmission_spec_err[i_obs,
:] = processed[obs]['normalized_err'][processed['common']['selection']].flatten()
if clv_rm_correction is False:
continue
for i_r in range(0, processed['common']['n_radius_grid']):
clv_rm_temp = processed['common_extended']['reference_wave'] * 0.
for order in processed['common']['order_list']:
""" CLV Synthetic models are in the Stellar Reference system,
so no shift is required """
clv_rm_temp[order, :] = rebin_1d_to_1d(
clv_rm_models['common']['wave'],
clv_rm_models['common']['step'],
clv_rm_models[obs]['clv_rm_model_convolved_normalized'][i_r, :],
processed[obs]['wave_SRF'][order, :],
processed[obs]['step_SRF'][order, :],
preserve_flux=False)
clv_rm_grid[i_r, i_obs, :] = clv_rm_temp[processed['common']['selection']].flatten()
# preserve_flux should be True or False?
# False if the spectra are already normalized
remove_outliers = (np.abs(transmission_spec - 1.) > 0.5)
transmission_spec[remove_outliers] = 1.0
transmission_spec_err[remove_outliers] = 1.0
planet_RVsinusoid = np.sin(
2*np.pi / planet_dict['period'][0] * time_array)
if jitter_flag:
jitter_index = []
n_jitter = 1
else:
jitter_index = None
n_jitter = 0
mcmc_data = {
'observations': lists['transit_full'],
'common_wave': processed['common']['reference_wave'],
'common_step': processed['common']['reference_step'],
'clv_rm_grid': clv_rm_grid,
'transmission_spec': transmission_spec,
'transmission_spec_err': transmission_spec_err,
'wave_array': wave_array,
'time_array': time_array,
'planet_RVsinusoid': planet_RVsinusoid,
'clv_rm_radius': clv_rm_models['common']['radius_grid'],
'n_obs': len(lists['transit_full']),
'n_radius_grid': clv_rm_models['common']['n_radius_grid'],
'jitter_index': jitter_index,
'n_jitter': n_jitter
}
save_to_cpickle(subroutine_name + '_data', mcmc_data,
config_in['output'], night, lines_label, it_string)
# Forcing memory deallocation
clv_rm_models = None
mcmc_data = None
print()
print("transmission_mcmc ")
try:
results_dict = load_from_cpickle(subroutine_name+'_'+sampler_name+'_results',
config_in['output'], night, lines_label, it_string)
print(" Transmission MCMC analysis for lines {0:s}, night: {1:s} already performed".format(
lines_label, night))
pams_dict = results_dict['pams_dict']
chain_med = results_dict['chain_med']
boundaries = results_dict['boundaries']
start_average = np.average(results_dict['point_start'], axis=0)
ndim = results_dict['ndim']
# TODO improve output
print(' *** sampler output ')
for key, val in pams_dict.items():
print('{0:24s} {1:4d} {2:12f} {3:12f} {4:12f} (15-84 p) ([{5:9f}, {6:9f}]) (start: {7:9f})'.format(key, val,
chain_med[val, 0],
chain_med[val, 2],
chain_med[val, 1],
boundaries[val, 0],
boundaries[val, 1],
start_average[val])
)
continue
# R(h) = np.sqrt(1+h/delta)
except FileNotFoundError:
print()
# getting fit parameters
lines_center, pams_dict, pams_list, boundaries, theta_start = define_theta_array(
model_case, lines_dict, planet_dict, n_jitter)
ndim = len(theta_start)
ngen = sampler_pams.get('n_gen', 64000)
nwalkers_mult = sampler_pams.get('n_walkers_mult', 2)
nwalkers = sampler_pams.get('n_walkers', nwalkers_mult * ndim)
nthin = sampler_pams.get('n_thin', 50)
nsteps = sampler_pams.get('n_steps', 20000)
nburnin = sampler_pams.get('n_burnin', 5000)
ndata = np.size(wave_array)
if pams_dict.get('rp_factor', False):
pam_id = pams_dict['rp_factor']
boundaries[pam_id, :] = [clv_rm_radius[0], clv_rm_radius[-1]]
print()
print(' PyDE + emcee parameters')
print(' n_dim: {0:9.0f}'.format(ndim))
print(
' n_walkers: (default: 2*ndim) {0:9.0f}'.format(nwalkers))
print(' n_gen: (default: 64000) {0:9.0f}'.format(ngen))
print(' n_steps: (default: 20000) {0:9.0f}'.format(nsteps))
print(
' n_burnin: (default: 10000) {0:9.0f}'.format(nburnin))
print(' n_thin: (default: 50) {0:9.0f}'.format(nthin))
population, sampler_chain, sampler_lnprobability, point_start = emcee_lines_fit_functions(
model_case,
wave_array,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
prior_dict,
theta_start, boundaries, ndim, nwalkers, ngen, nsteps, nthin)
flat_chain, flat_lnprob, chain_med, chain_MAP, lnprob_med, lnprob_MAP = \
emcee_flatten_median(population, sampler_chain,
sampler_lnprobability, nburnin, nthin, nwalkers)
emcee_compute_BIC_AIC(lnprob_med, lnprob_MAP, ndata, ndim)
med_lines_model, med_clv_model, med_lines_array, med_planet_K, med_planet_R, med_jitter = \
return_model(model_case,
chain_med[:, 0],
wave_array,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index)
map_lines_model, map_clv_model, map_lines_array, map_planet_K, map_planet_R, map_jitter = \
return_model(model_case,
chain_MAP,
wave_array,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index)
results_dict = {
'sampler_name': sampler_name,
'ndim': ndim,
'nwalkers': nwalkers,
'nthin': nthin,
'nsteps': nsteps,
'nburnin': nburnin,
'ndata': ndata,
'pams_dict': pams_dict,
'population': population,
'sampler_chain': sampler_chain,
'sampler_lnprobability': sampler_lnprobability,
'theta_start': theta_start,
'boundaries': boundaries,
'flat_chain': flat_chain,
'flat_lnprob': flat_lnprob,
'chain_med': chain_med,
'chain_MAP': chain_MAP,
'lnprob_med': lnprob_med,
'lnprob_MAP': lnprob_MAP,
'lines_center': lines_center,
'point_start': point_start,
'theta_start': theta_start,
}
results_dict['results'] = {
'lines_model': med_lines_model,
'clv_model': med_clv_model,
'lines_array': med_lines_array,
'planet_K': med_planet_K,
'planet_R': med_planet_R,
'jitter': med_jitter
}
results_dict['results_MAP'] = {
'lines_model': map_lines_model,
'clv_model': map_clv_model,
'lines_array': map_lines_array,
'planet_K': map_planet_K,
'planet_R': map_planet_R,
'jitter': map_jitter
}
results_dict['results']['observational_pams'] = {}
results_dict['results_MAP']['observational_pams'] = {}
for obs in lists['observations']:
results_dict['results']['observational_pams'][obs] = {}
results_dict['results_MAP']['observational_pams'][obs] = {}
""" RV shift from the observer RF to the planet RF
STRONG ASSUMPTIONS:
- there is only the transiting planet in the system
- the planet has null eccentricity
- linear approximation or the orbit near the transit event
Computation is performed by moving to the Solar Barycenter, than to the Stellar System Barycenter
and finally onto the planet
"""
results_dict['results']['observational_pams'][obs]['rv_shift_ORF2PRF'] = \
observational_pams[obs]['BERV'] \
- observational_pams['RV_star']['RV_systemic'] \
- results_dict['results']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
results_dict['results_MAP']['observational_pams'][obs]['rv_shift_ORF2PRF'] = \
observational_pams[obs]['BERV'] \
- observational_pams['RV_star']['RV_systemic'] \
- results_dict['results_MAP']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
""" RV shift from Stellar Rest Frame to Planetary Rest Frame
We have to take into account the RV of star relatively to the Barycenter
"""
results_dict['results']['observational_pams'][obs]['rv_shift_SRF2PRF'] = \
+ observational_pams[obs]['RV_bjdshift'] \
- results_dict['results']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
results_dict['results_MAP']['observational_pams'][obs]['rv_shift_SRF2PRF'] = \
+ observational_pams[obs]['RV_bjdshift'] \
- results_dict['results_MAP']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
save_to_cpickle(subroutine_name+'_'+sampler_name+'_results',
results_dict, config_in['output'], night, lines_label, it_string)
# TODO improve output
print(' *** sampler output ')
for key, val in pams_dict.items():
print('{0:24s} {1:4d} {2:12f} {3:12f} {4:12f} (15-84 p) ([{5:9f}, {6:9f}])'.format(key, val,
chain_med[val, 0],
chain_med[val, 2],
chain_med[val, 1],
boundaries[val, 0], boundaries[val, 1])
)
# print(' *** physical output')
#
# results_dict['results'] = {
# 'lines_model': med_lines_model,
# 'clv_model': med_clv_model,
# 'lines_array': med_lines_array,
# 'planet_K': med_planet_K,
# 'planet_R': med_planet_R,
# 'jitter': med_jitter
# }
""" Analysis of the entire dataset """
print()
try:
all_mcmc_data = load_from_cpickle(
subroutine_name+'_data', config_in['output'], night='', lines=lines_label, it_string=it_string)
all_clv_rm_radius = all_mcmc_data['clv_rm_radius']
all_clv_rm_grid = all_mcmc_data['clv_rm_grid']
all_transmission_spec = all_mcmc_data['transmission_spec']
all_transmission_spec_err = all_mcmc_data['transmission_spec_err']
all_wave_array = all_mcmc_data['wave_array']
all_time_array = all_mcmc_data['time_array']
all_planet_RVsinusoid = all_mcmc_data['planet_RVsinusoid']
all_observations = all_mcmc_data['observations']
all_n_obs = all_mcmc_data['n_obs']
all_n_radius_grid = all_mcmc_data['n_radius_grid']
all_jitter_index = all_mcmc_data['jitter_index']
n_jitter = all_mcmc_data['n_jitter']
except:
n_jitter = 0
for night in night_dict:
mcmc_data = load_from_cpickle(subroutine_name+'_data',
config_in['output'], night, lines_label, it_string=it_string)
try:
# Building the arrays for the full analysis
all_clv_rm_grid = np.concatenate(
(all_clv_rm_grid, mcmc_data['clv_rm_grid']), axis=1)
all_transmission_spec = np.concatenate(
(all_transmission_spec, mcmc_data['transmission_spec']))
all_transmission_spec_err = np.concatenate(
(all_transmission_spec_err, mcmc_data['transmission_spec_err']))
all_wave_array = np.concatenate(
(all_wave_array, mcmc_data['wave_array']))
all_time_array = np.concatenate(
(all_time_array, mcmc_data['time_array']))
all_planet_RVsinusoid = np.concatenate(
(all_planet_RVsinusoid, mcmc_data['planet_RVsinusoid']))
all_observations = np.concatenate(
(all_observations, mcmc_data['observations']))
all_n_obs += mcmc_data['n_obs']
if jitter_flag:
all_jitter_index = np.concatenate(
(all_jitter_index, n_jitter*np.ones(np.shape(mcmc_data['wave_array']), dtype=np.int16)))
n_jitter += 1
except NameError:
""" This error is expected when retrieving the data of the first night"""
all_clv_rm_radius = mcmc_data['clv_rm_radius']
all_clv_rm_grid = mcmc_data['clv_rm_grid']
all_transmission_spec = mcmc_data['transmission_spec']
all_transmission_spec_err = mcmc_data['transmission_spec_err']
all_wave_array = mcmc_data['wave_array']
all_time_array = mcmc_data['time_array']
all_planet_RVsinusoid = mcmc_data['planet_RVsinusoid']
all_observations = mcmc_data['observations']
all_n_obs = mcmc_data['n_obs']
all_n_radius_grid = mcmc_data['n_radius_grid']
if jitter_flag:
all_jitter_index = n_jitter * \
np.ones(
np.shape(mcmc_data['wave_array']), dtype=np.int16)
n_jitter += 1
else:
all_jitter_index = None
all_mcmc_data = {
'observations': all_observations,
'clv_rm_grid': all_clv_rm_grid,
'transmission_spec': all_transmission_spec,
'transmission_spec_err': all_transmission_spec_err,
'wave_array': all_wave_array,
'time_array': all_time_array,
'planet_RVsinusoid': all_planet_RVsinusoid,
'clv_rm_radius': all_clv_rm_radius,
'n_obs': all_n_obs,
'n_radius_grid': all_n_radius_grid,
'jitter_index': all_jitter_index,
'n_jitter': n_jitter
}
save_to_cpickle(subroutine_name+'_data', all_mcmc_data,
config_in['output'], night='', lines=lines_label, it_string=it_string)
try:
results_dict = load_from_cpickle(subroutine_name + '_' + sampler_name+'_results',
config_in['output'], night='', lines=lines_label, it_string=it_string)
print(" Transmission MCMC analysis for lines {0:s} already performed ".format(
lines_label))
pams_dict = results_dict['pams_dict']
chain_med = results_dict['chain_med']
boundaries = results_dict['boundaries']
ndim = results_dict['ndim']
# TODO improve output
print(' *** sampler output ')
for key, val in pams_dict.items():
print('{0:24s} {1:4d} {2:12f} {3:12f} {4:12f} (15-84 p) ([{5:9f}, {6:9f}])'.format(key, val,
chain_med[val, 0],
chain_med[val, 2],
chain_med[val, 1],
boundaries[val, 0], boundaries[val, 1])
)
except FileNotFoundError:
lines_center, pams_dict, pams_list, boundaries, theta_start = define_theta_array(
model_case, lines_dict, planet_dict, n_jitter)
ndim = len(theta_start)
if pams_dict.get('rp_factor', False):
pam_id = pams_dict['rp_factor']
boundaries[pam_id, :] = [clv_rm_radius[0], clv_rm_radius[-1]]
ndata = np.size(all_wave_array)
print()
print(' PyDE + emcee parameters')
print(' n_dim: {0:9.0f}'.format(ndim))
print(
' n_walkers: (default: 2*ndim) {0:9.0f}'.format(nwalkers))
print(' n_gen: (default: 64000) {0:9.0f}'.format(ngen))
print(' n_steps: (default: 20000) {0:9.0f}'.format(nsteps))
print(
' n_burnin: (default: 10000) {0:9.0f}'.format(nburnin))
print(' n_thin: (default: 50) {0:9.0f}'.format(nthin))
population, sampler_chain, sampler_lnprobability, point_start = emcee_lines_fit_functions(
model_case,
all_wave_array,
all_transmission_spec,
all_transmission_spec_err,
all_clv_rm_radius,
all_clv_rm_grid,
all_planet_RVsinusoid,
lines_center,
all_jitter_index,
prior_dict,
theta_start, boundaries, ndim, nwalkers, ngen, nsteps, nthin)
flat_chain, flat_lnprob, chain_med, chain_MAP, lnprob_med, lnprob_MAP = \
emcee_flatten_median(population, sampler_chain,
sampler_lnprobability, nburnin, nthin, nwalkers)
emcee_compute_BIC_AIC(lnprob_med, lnprob_MAP, ndata, ndim)
med_lines_model, med_clv_model, med_lines_array, med_planet_K, med_planet_R, med_jitter = \
return_model(model_case,
chain_med[:, 0],
all_wave_array,
all_clv_rm_radius,
all_clv_rm_grid,
all_planet_RVsinusoid,
lines_center,
all_jitter_index)
map_lines_model, map_clv_model, map_lines_array, map_planet_K, map_planet_R, map_jitter = \
return_model(model_case,
chain_MAP,
all_wave_array,
all_clv_rm_radius,
all_clv_rm_grid,
all_planet_RVsinusoid,
lines_center,
all_jitter_index)
results_dict = {
'sampler_name': sampler_name,
'ndim': ndim,
'nwalkers': nwalkers,
'nthin': nthin,
'nsteps': nsteps,
'nburnin': nburnin,
'ndata': ndata,
'pams_dict': pams_dict,
'population': population,
'sampler_chain': sampler_chain,
'sampler_lnprobability': sampler_lnprobability,
'theta_start': theta_start,
'boundaries': boundaries,
'flat_chain': flat_chain,
'flat_lnprob': flat_lnprob,
'chain_med': chain_med,
'chain_MAP': chain_MAP,
'lnprob_med': lnprob_med,
'lnprob_MAP': lnprob_MAP,
'lines_center': lines_center,
'point_start': point_start,
'theta_start': theta_start,
}
results_dict['results'] = {
'lines_model': med_lines_model,
'clv_model': med_clv_model,
'lines_array': med_lines_array,
'planet_K': med_planet_K,
'planet_R': med_planet_R,
'jitter': med_jitter
}
results_dict['results_MAP'] = {
'lines_model': map_lines_model,
'clv_model': map_clv_model,
'lines_array': map_lines_array,
'planet_K': map_planet_K,
'planet_R': map_planet_R,
'jitter': map_jitter
}
results_dict['results']['observational_pams'] = {}
results_dict['results_MAP']['observational_pams'] = {}
for night in night_dict:
lists = load_from_cpickle('lists', config_in['output'], night)
observational_pams = load_from_cpickle(
'observational_pams', config_in['output'], night)
""" No differentiation by night """
for obs in lists['observations']:
results_dict['results']['observational_pams'][obs] = {}
results_dict['results_MAP']['observational_pams'][obs] = {}
""" RV shift from the observer RF to the planet RF
STRONG ASSUMPTIONS:
- there is only the transiting planet in the system
- the planet has null eccentricity
- linear approximation or the orbit near the transit event
Computation is performed by moving to the Solar Barycenter, than to the Stellar System Barycenter
and finally onto the planet
"""
results_dict['results']['observational_pams'][obs]['rv_shift_ORF2PRF'] = \
observational_pams[obs]['BERV'] \
- observational_pams['RV_star']['RV_systemic'] \
- results_dict['results']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
results_dict['results_MAP']['observational_pams'][obs]['rv_shift_ORF2PRF'] = \
observational_pams[obs]['BERV'] \
- observational_pams['RV_star']['RV_systemic'] \
- results_dict['results_MAP']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
""" RV shift from Stellar Rest Frame to Planetary Rest Frame
We have to take into account the RV of star relatively to the Barycenter
"""
results_dict['results']['observational_pams'][obs]['rv_shift_SRF2PRF'] = \
+ observational_pams[obs]['RV_bjdshift'] \
- results_dict['results']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
results_dict['results_MAP']['observational_pams'][obs]['rv_shift_SRF2PRF'] = \
+ observational_pams[obs]['RV_bjdshift'] \
- results_dict['results_MAP']['planet_K'] \
* (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
/ planet_dict['period'][0] * 2 * np.pi
save_to_cpickle(subroutine_name + '_'+sampler_name+'_results',
results_dict, config_in['output'], night='', lines=lines_label, it_string=it_string)
for key, val in pams_dict.items():
print('{0:24s} {1:4d} {2:12f} {3:12f} {4:12f} (15-84 p) ([{5:9f}, {6:9f}])'.format(key, val,
chain_med[val, 0],
chain_med[val, 2],
chain_med[val, 1],
boundaries[val, 0], boundaries[val, 1])
)
print('MCMC completed')
# Update planet parameters
# deprecated
# try:
# _ = load_from_cpickle(
# 'observational', config_in['output'], night, lines_label)
# print(" Transmission MCMC results for lines {0:s} already store in observational array".format(
# lines_label))
# except FileNotFoundError:
#
# results_full = load_from_cpickle('transmission_mcmc_'+sampler_name+'_results',
# config_in['output'], night='', lines=lines_label)
#
# for night in night_dict:
#
# results_night = load_from_cpickle('transmission_mcmc_'+sampler_name+'_results',
# config_in['output'], night=night, lines=lines_label)
# lists = load_from_cpickle('lists', config_in['output'], night)
# observational_pams = load_from_cpickle(
# 'observational_pams', config_in['output'], night)
# for obs in lists['observations']:
#
# """ RV shift from the observer RF to the planet RF
# STRONG ASSUMPTIONS:
# - there is only the transiting planet in the system
# - the planet has null eccentricity
# - linear approximation or the orbit near the transit event
#
# Computation is performed by moving to the Solar Barycenter, than to the Stellar System Barycenter
# and finally onto the planet
# """
# observational_pams[obs]['rv_shift_ORF2PRF'] = \
# observational_pams[obs]['BERV'] \
# - observational_pams['RV_star']['RV_systemic'] \
# - results_full['results']['planet_K'] \
# * (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
# / planet_dict['period'][0] * 2 * np.pi
# """ RV shift from Stellar Rest Frame to Planetary Rest Frame
# We have to take into account the RV of star relatively to the Barycenter
# """
# observational_pams[obs]['rv_shift_SRF2PRF'] = \
# + observational_pams[obs]['RV_bjdshift'] \
# - results_full['results']['planet_K'] \
# * (observational_pams[obs]['BJD'] - observational_pams['time_of_transit']) \
# / planet_dict['period'][0] * 2 * np.pi
# observational_pams['Rp_factor'] = results_full['results']['planet_R']
# observational_pams['lines_array'] = results_full['results']['lines_array']
# observational_pams['jitter'] = results_full['results']['jitter']
# save_to_cpickle('observational', observational_pams,
# config_in['output'], night, lines_label)
| 56,597 | 46.24374 | 159 | py |
SLOPpy | SLOPpy-main/SLOPpy/check_differential_refraction.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
from SLOPpy.subroutines.shortcuts import *
__all__ = ["check_differential_refraction", "plot_check_differential_refraction", "write_differential_refraction"]
subroutine_name = 'check_differential_refraction'
def check_differential_refraction(config_in):
night_dict = from_config_get_nights(config_in)
for night in night_dict:
try:
processed = load_from_cpickle('check_differential_refraction_processed', config_in['output'], night)
check_drc = load_from_cpickle('check_differential_refraction', config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'],
use_refraction=False, use_telluric=False)
input_data_corrected = retrieve_observations(config_in['output'], night, lists['observations'],
use_refraction=True, use_telluric=False)
input_data_s1d = load_from_cpickle('input_dataset_s1d_fibA', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
check_drc = {
'subroutine': subroutine_name,
'wave': input_data['coadd']['wave']
}
processed = {
'subroutine': subroutine_name
}
for obs in lists['observations']:
processed[obs] = {
'n_orders': input_data[obs]['n_orders'],
'n_pixels': input_data[obs]['n_pixels']
}
""" for plotting purpose only"""
#processed[obs]['e2ds'] = input_data[obs]['e2ds']
#processed[obs]['e2ds_err'] = input_data[obs]['e2ds_err']
#processed[obs]['flux'] = input_data[obs]['e2ds']/calib_data['blaze']/input_data[obs]['step']
#processed[obs]['flux_err'] = np.sqrt(input_data[obs]['e2ds'])/calib_data['blaze']/input_data[obs]['step']
preserve_flux = input_data[obs].get('absolute_flux', True)
processed[obs]['flux_s1d'] = \
rebin_2d_to_1d(input_data[obs]['wave'], input_data[obs]['step'], input_data[obs]['e2ds'],
calib_data['blaze'], input_data['coadd']['wave'], input_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2BRF'])
"""
processed[obs]['flux_s1d_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'], input_data[obs]['step'], input_data[obs]['e2ds_err'],
calib_data['blaze'], input_data['coadd']['wave'], input_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=0.00, is_error=True)
"""
processed[obs]['flux_s1d_err'] = processed[obs]['flux_s1d']
processed[obs]['s1d_rescaling'], processed[obs]['s1d_rescaled'], processed[obs]['s1d_rescaled_err'] = \
perform_rescaling(input_data['coadd']['wave'],
processed[obs]['flux_s1d'],
processed[obs]['flux_s1d_err'],
[5450.0, 5550.0])
#observational_pams['wavelength_rescaling'])
""" for plotting purpose only"""
#processed[obs]['e2ds_corr'] = input_data_corrected[obs]['e2ds']
#processed[obs]['e2ds_err_corr'] = input_data_corrected[obs]['e2ds_err']
#processed[obs]['flux_corr'] = input_data_corrected[obs]['e2ds']/calib_data['blaze']/input_data_corrected[obs]['step']
#processed[obs]['flux_err_corr'] = np.sqrt(input_data_corrected[obs]['e2ds'])/calib_data['blaze']/input_data_corrected[obs]['step']
processed[obs]['flux_s1d_corr'] = \
rebin_2d_to_1d(input_data_corrected[obs]['wave'],
input_data_corrected[obs]['step'],
input_data_corrected[obs]['e2ds'],
calib_data['blaze'],
input_data_corrected['coadd']['wave'],
input_data_corrected['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2BRF'])
"""
processed[obs]['flux_s1d_corr_err'] = \
rebin_2d_to_1d(input_data_corrected[obs]['wave'],
input_data_corrected[obs]['step'],
input_data_corrected[obs]['e2ds_err'],
calib_data['blaze'],
input_data_corrected['coadd']['wave'],
input_data_corrected['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=0.00,
is_error=True)
"""
processed[obs]['flux_s1d_corr_err'] = processed[obs]['flux_s1d_corr']
processed[obs]['s1d_corr_rescaling'], processed[obs]['s1d_corr_rescaled'], processed[obs]['s1d_corr_rescaled_err'] = \
perform_rescaling(input_data['coadd']['wave'],
processed[obs]['flux_s1d_corr'],
processed[obs]['flux_s1d_corr_err'],
[5450.0, 5550.0])
processed[obs]['dr_correction'] = processed[obs]['s1d_corr_rescaled']/processed[obs]['s1d_rescaled']
processed[obs]['s1d_DRS_rescaling'], processed[obs]['s1d_DRS_rescaled'], processed[obs]['s1d_DRS_rescaled_err'] = \
perform_rescaling(input_data_s1d[obs]['wave'],
input_data_s1d[obs]['flux'],
np.sqrt(np.abs(input_data_s1d[obs]['flux'])),
[5450.0, 5550.0])
#observational_pams['wavelength_rescaling'])
processed[obs]['DRS_coeff_flux'] = []
processed[obs]['DRS_corr'] = np.zeros(input_data_s1d[obs]['size'], dtype=np.double)
for coeff_index in np.arange(0, 10, 1, dtype=np.int16):
try:
keyword = 'HIERARCH TNG DRS FLUX CORR COEFF' + repr(coeff_index)
processed[obs]['DRS_coeff_flux'].extend([input_data[obs]['header']['ccf'][keyword]])
processed[obs]['DRS_corr'] += \
input_data[obs]['header']['ccf'][keyword] * \
np.power(input_data_s1d[obs]['wave'], coeff_index)
except:
continue
processed[obs]['DRS_corr_rescaling'], processed[obs]['DRS_corr_rescaled'], _ = \
perform_rescaling(input_data_s1d[obs]['wave'],
processed[obs]['DRS_corr'],
processed[obs]['DRS_corr'],
observational_pams['wavelength_rescaling'])
check_drc[obs] = {
's1d': {
'wave': input_data['coadd']['wave'],
'flux': processed[obs]['flux_s1d'],
#'flux_err': processed[obs]['flux_s1d_err'],
'rescaled': processed[obs]['s1d_rescaled'],
#'rescaled_err': processed[obs]['s1d_rescaled_err']
},
's1d_corr': {
'correction': processed[obs]['dr_correction'],
'correction_rescaled': processed[obs]['dr_correction'],
'flux': processed[obs]['flux_s1d_corr'],
#'flux_err': processed[obs]['flux_s1d_corr_err'],
'rescaled': processed[obs]['s1d_corr_rescaled'],
#'rescaled_err': processed[obs]['s1d_corr_rescaled_err']
},
'DRS_s1d':{
'wave': input_data_s1d[obs]['wave'],
'flux': input_data_s1d[obs]['flux'],
#'flux_err': np.sqrt(input_data_s1d[obs]['flux']+0.1),
'rescaled': processed[obs]['s1d_DRS_rescaled'],
#'rescaled_err': processed[obs]['s1d_DRS_rescaled_err']
},
'DRS_s1d_corr': {
'correction': processed[obs]['DRS_corr'],
'correction_rescaled': processed[obs]['DRS_corr_rescaled'],
'flux': input_data_s1d[obs]['flux']/processed[obs]['DRS_corr'],
#'flux_err': np.sqrt(np.abs(input_data_s1d[obs]['flux']))/processed[obs]['DRS_corr'],
'rescaled': processed[obs]['s1d_DRS_rescaled']/processed[obs]['DRS_corr_rescaled'],
#'rescaled_err': processed[obs]['s1d_DRS_rescaled_err']/processed[obs]['DRS_corr_rescaled'],
},
}
save_to_cpickle('check_differential_refraction_processed', processed, config_in['output'], night)
save_to_cpickle('check_differential_refraction', check_drc, config_in['output'], night)
print('Night ', night, ' completed')
print()
def plot_check_differential_refraction(config_in, night_input=''):
night_dict = from_config_get_nights(config_in)
if night_input == '':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
try:
""" Retrieving the analysis"""
check_drc = load_from_cpickle('check_differential_refraction', config_in['output'], night)
except:
print(" Failed in retrieving the data")
return
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
""" Creation of the color array, based on the BJD of the observations
"""
bjd = []
am = []
for obs in lists['observations']:
bjd.append(observational_pams[obs]['BJD'] - 2450000.0)
am.append(observational_pams[obs]['AIRMASS'])
n_obs = len(lists['observations']) * 1.0 + 1.
colors = np.asarray(bjd)
cmap = plt.cm.viridis
line_colors = cmap(np.linspace(0, 1, len(lists['observations'])))
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
gs.update(hspace=0.1)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
sel = (check_drc[obs]['s1d']['rescaled'] > -1000000.05)
ax1.plot(check_drc['wave'][sel],
check_drc[obs]['s1d']['rescaled'][sel]+i/5.,
c = line_colors[i], lw = 1, alpha = 1)
ax2.plot(check_drc[obs]['DRS_s1d']['wave'],
check_drc[obs]['DRS_s1d']['rescaled']+i/5.,
c = line_colors[i], lw = 1, alpha = 1)
i_max = 1.5 + i/5.
ax1.set_xlim(check_drc['wave'][0], check_drc['wave'][-1])
ax1.set_ylim(0.00, i_max)
ax1.legend(loc=3)
ax1.set_title('Night: {0:s} \n SLOPpy input s1d'.format(night))
ax2.set_title('DRS input s1d')
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
#gs.update(wspace=0.025, hspace=0.05)
gs.update(hspace=0.1)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
sel = (check_drc[obs]['s1d']['flux']> 0.05)
#ax1.plot(check_drc['wave'][sel],
# check_drc[obs]['s1d'][sel]+i/5.,
# c=line_colors[i], lw=1, alpha=1.0, zorder=0)
ax1.plot(check_drc['wave'],
check_drc[obs]['s1d_corr']['correction_rescaled']+i/5.,
c=line_colors[i], lw=1, alpha=1)
#ax2.plot(check_drc[obs]['wave_DRS'],
# check_drc[obs]['s1d_DRS']+i/5.,
# c=line_colors[i], lw=1, alpha=1)
ax2.plot(check_drc[obs]['DRS_s1d']['wave'],
1./check_drc[obs]['DRS_s1d_corr']['correction_rescaled']+i/5.,
c=line_colors[i], lw=1, alpha=1)
i_max = 1.5 + i/5.
#ax1.plot(processed['coadd']['wave'], processed['coadd']['rescaled'], c='k', lw=1)
#ax2.plot(processed['coadd']['wave'], processed['coadd']['rescaled'], c='k', lw=1)
ax1.set_xlim(check_drc['wave'][0], check_drc['wave'][-1])
ax1.set_ylim(0.00, i_max)
ax1.legend(loc=3)
ax1.set_title('Night: {0:s} \n SLOPpy correction function'.format(night))
ax2.set_title('DRS correction function')
ax1.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
#gs.update(wspace=0.025, hspace=0.05)
gs.update(hspace=0.1)
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
cbax1 = plt.subplot(gs[:, 1])
for i, obs in enumerate(lists['observations']):
sel = (check_drc[obs]['s1d_corr']['rescaled']> 0.05)
ax1.plot(check_drc['wave'][sel],
check_drc[obs]['s1d_corr']['rescaled'][sel]+i/5.,
c=line_colors[i], lw=1, alpha=0.5)
ax2.plot(check_drc[obs]['DRS_s1d']['wave'],
check_drc[obs]['DRS_s1d_corr']['rescaled']+i/5.,
c=line_colors[i], lw=1, alpha=0.5)
i_max = 1.5 + i/5.
#ax1.plot(processed['coadd']['wave'], processed['coadd']['rescaled'], c='k', lw=1)
#ax2.plot(processed['coadd']['wave'], processed['coadd']['rescaled'], c='k', lw=1)
ax1.set_xlim(check_drc['wave'][0], check_drc['wave'][-1])
ax1.set_ylim(0.00, i_max)
ax1.legend(loc=3)
ax1.set_title('Night: {0:s} \n SLOPpy corrected spectra'.format(night))
ax2.set_title('DRS corrected spectra')
ax2.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=cmap, norm=plt.Normalize(vmin=colors[0], vmax=colors[-1]))
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
fig.subplots_adjust(wspace=0.05, hspace=0.4)
plt.show()
def write_differential_refraction(config_in):
night_dict = from_config_get_nights(config_in)
for night in night_dict:
print()
print('write_differential_refraction Night: ', night)
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations(config_in['output'], night, lists['observations'],
use_refraction=True)
input_data_s1d = load_from_cpickle('input_dataset_s1d_fibA', config_in['output'], night)
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
# Let's keep it simple to save memory
dir_SLOPpy_drc = night + '_SLOPpy_drc/'
dir_DRS_drc = night + '_DRS_drc/'
os.system('mkdir -p ' + dir_SLOPpy_drc)
os.system('mkdir -p ' + dir_DRS_drc)
for obs in lists['observations']:
processed = dict(n_orders=input_data[obs]['n_orders'], n_pixels=input_data[obs]['n_pixels'])
preserve_flux = input_data[obs].get('absolute_flux', True)
processed['flux_s1d_BRF_corr'] = \
rebin_2d_to_1d(input_data[obs]['wave'], input_data[obs]['step'],input_data[obs]['e2ds'],
calib_data['blaze'], input_data['coadd']['wave'], input_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2BRF'])
processed['flux_s1d_BRF_corr_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'], input_data[obs]['step'],input_data[obs]['e2ds_err'],
calib_data['blaze'], input_data['coadd']['wave'], input_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2BRF'], is_error=True)
processed['flux_s1d_SRF_corr'] = \
rebin_2d_to_1d(input_data[obs]['wave'], input_data[obs]['step'],input_data[obs]['e2ds'],
calib_data['blaze'], input_data['coadd']['wave'], input_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'])
processed['flux_s1d_SRF_corr_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'], input_data[obs]['step'],input_data[obs]['e2ds_err'],
calib_data['blaze'], input_data['coadd']['wave'], input_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'], is_error=True)
processed['DRS_coeff_flux'] = []
processed['DRS_s1d_corr'] = np.zeros(input_data_s1d[obs]['size'], dtype=np.double)
processed['DRS_e2ds_corr'] = np.zeros(np.shape(input_data[obs]['wave']), dtype=np.double)
for coeff_index in np.arange(0, 10, 1, dtype=np.int16):
try:
keyword = 'HIERARCH TNG DRS FLUX CORR COEFF' + repr(coeff_index)
processed['DRS_coeff_flux'].extend([input_data[obs]['header']['ccf'][keyword]])
processed['DRS_s1d_corr'] += \
input_data[obs]['header']['ccf'][keyword] * \
np.power(input_data_s1d[obs]['wave'], coeff_index)
processed['DRS_e2ds_corr'] += \
input_data[obs]['header']['ccf'][keyword] * \
np.power(input_data[obs]['wave'], coeff_index)
except:
continue
processed['DRS_s1d_corr'] = input_data_s1d[obs]['flux']/processed['DRS_s1d_corr']
processed['DRS_e2ds_corr'] = input_data[obs]['e2ds']/processed['DRS_e2ds_corr']
"""Saving the e2ds files"""
hdu_e2ds_SLOPpy = fits.PrimaryHDU()
hdu_e2ds_DRS = fits.PrimaryHDU()
hdu_e2ds_SLOPpy.data = np.asarray(input_data[obs]['e2ds'], dtype=np.float32)
hdu_e2ds_DRS.data = np.asarray(processed['DRS_e2ds_corr'], dtype=np.float32)
for key_name, key_val in input_data[obs]['header']['e2ds'].items():
if key_name == 'SIMPLE' or \
key_name=='BITPIX' or \
key_name == 'NAXIS' or \
key_name == 'NAXIS1' or \
key_name == 'NAXIS2':
continue
if len(key_name) > 8:
hdu_e2ds_SLOPpy.header['HIERARCH '+ key_name] = key_val
hdu_e2ds_DRS.header['HIERARCH '+ key_name] = key_val
else:
hdu_e2ds_SLOPpy.header[key_name] = key_val
hdu_e2ds_DRS.header[key_name] = key_val
hdu_e2ds_SLOPpy.writeto(dir_SLOPpy_drc + obs + '_e2ds_A.fits', overwrite=True)
hdu_e2ds_DRS.writeto(dir_DRS_drc + obs + '_e2ds_A.fits', overwrite=True)
"""Saving the s1d files"""
hdu_s1d_SLOPpy_BRF = fits.PrimaryHDU()
hdu_s1d_SLOPpy_SRF = fits.PrimaryHDU()
hdu_s1d_DRS = fits.PrimaryHDU()
hdu_s1d_SLOPpy_BRF.data = np.asarray(processed['flux_s1d_BRF_corr'], dtype=np.float32)
hdu_s1d_SLOPpy_SRF.data = np.asarray(processed['flux_s1d_SRF_corr'], dtype=np.float32)
hdu_s1d_DRS.data = np.asarray(processed['DRS_s1d_corr'], dtype=np.float32)
for key_name, key_val in input_data[obs]['header']['s1d'].items():
if key_name == 'SIMPLE' or \
key_name=='BITPIX' or \
key_name == 'NAXIS' or \
key_name == 'NAXIS1' or \
key_name == 'NAXIS2':
continue
if len(key_name) > 8:
hdu_s1d_SLOPpy_BRF.header['HIERARCH '+ key_name] = key_val
hdu_s1d_SLOPpy_SRF.header['HIERARCH '+ key_name] = key_val
hdu_s1d_DRS.header['HIERARCH '+ key_name] = key_val
else:
hdu_s1d_SLOPpy_BRF.header[key_name] = key_val
hdu_s1d_SLOPpy_SRF.header[key_name] = key_val
hdu_s1d_DRS.header[key_name] = key_val
""" Fixing SLOPpy s1d keywords """
hdu_s1d_SLOPpy_BRF.header['CRVAL1'] = input_data['coadd']['wave'][0]
hdu_s1d_SLOPpy_BRF.header['CDELT1'] = input_data['coadd']['step'][0]
hdu_s1d_SLOPpy_SRF.header['CRVAL1'] = input_data['coadd']['wave'][0]
hdu_s1d_SLOPpy_SRF.header['CDELT1'] = input_data['coadd']['step'][0]
""" Fixing DRS s1d keywords """
hdu_s1d_DRS.header['CRVAL1'] = input_data_s1d[obs]['wave'][0]
hdu_s1d_DRS.header['CDELT1'] = input_data_s1d[obs]['step'][0]
hdu_s1d_SLOPpy_BRF.writeto(dir_SLOPpy_drc + obs + '_s1d_A.fits', overwrite=True)
hdu_s1d_SLOPpy_SRF.writeto(dir_SLOPpy_drc + obs + '_s1d_A_SRF.fits', overwrite=True)
hdu_s1d_DRS.writeto(dir_DRS_drc + obs + '_s1d_A.fits', overwrite=True)
print()
print('Night ', night, ' completed') | 23,686 | 45.083658 | 143 | py |
SLOPpy | SLOPpy-main/SLOPpy/sky_correction.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.plot_subroutines import *
__all__ = ["compute_sky_correction", "plot_sky_correction"]
subroutine_name = 'sky_correction'
def compute_sky_correction(config_in):
night_dict = from_config_get_nights(config_in)
for night in night_dict:
try:
processed = load_from_cpickle('skycorrected_fibA', config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations and calibration data for fiber B, if they exist"""
try:
input_data_B = load_from_cpickle('input_dataset_fibB', config_in['output'], night)
calib_data_B = load_from_cpickle('calibration_fibB', config_in['output'], night)
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Computing'))
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Skipped'))
continue
""" Retrieving the observations and calibration data for fiber A"""
print()
print(" Retrieving the data for night ", night)
input_data_A = load_from_cpickle('input_dataset_fibA', config_in['output'], night)
calib_data_A = load_from_cpickle('calibration_fibA', config_in['output'], night)
map_orders_A = calib_data_A['fibAB_orders_match']
map_orders_B = calib_data_B['fibAB_orders_match']
"""map_orders_A = [0,1,2,3] = map_orders_B"""
processed = {
'subroutine': 'sky_correction'
}
for obs in lists['observations']:
processed[obs] = {}
" computing the ratio between the lamp flux of fiber A and B"
print()
print(" Computing the ratio between the lamp flux of fiber A and B")
processed['ratioAB'] = calib_data_A['lamp'][map_orders_A, :]/calib_data_B['lamp'][map_orders_B, :]
first_obs = lists['observations'][0]
wave_difference = \
input_data_A[first_obs]['wave'][map_orders_A, :] - input_data_B[first_obs]['wave'][map_orders_B, :]
print()
print(" Wavelength difference between fiber A and B: ", \
np.average(wave_difference), " +- ", np.std(wave_difference), " \AA")
if np.abs(np.average(wave_difference)) > 0.006 or np.std(wave_difference) > 0.006:
raise ValueError("TO BE IMPLEMENTED!!!!!!! ")
quit()
else:
""" We assume that the relative RV shift between finber A and fiber B in the pixel scale is
is minimal """
for obs in lists['observations']:
processed[obs]['sky_fibA'] = np.zeros([input_data_A['n_orders'], input_data_A['n_pixels']])
processed[obs]['sky_fibA'][map_orders_A, :] = \
processed['ratioAB'] * input_data_B[obs]['e2ds'][map_orders_B, :]
processed[obs]['e2ds'] = input_data_A[obs]['e2ds'] - processed[obs]['sky_fibA']
processed[obs]['e2ds_err'] = np.sqrt(
input_data_A[obs]['e2ds_err'][map_orders_A, :] ** 2 +
(processed['ratioAB'] * input_data_B[obs]['e2ds_err'][map_orders_B, :]) ** 2)
""" Zero or negative values are identified, flagged and substituted with another value """
#replacement = 0.1
#processed[obs]['null'] = (processed[obs]['e2ds'] <= replacement)
#processed[obs]['e2ds'][processed[obs]['null']] = replacement
save_to_cpickle('skycorrected_fibA', processed, config_in['output'], night)
def plot_sky_correction(config_in, night_input=''):
night_dict = from_config_get_nights(config_in)
if night_input=='':
night_list = night_dict
else:
night_list = np.atleast_1d(night_input)
for night in night_list:
print("plot_sky_correction Night: ", night)
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
""" Retrieving the observations and calibration data for fiber B, if they exist"""
try:
input_data_B = load_from_cpickle('input_dataset_fibB', config_in['output'], night)
calib_data_B = load_from_cpickle('calibration_fibB', config_in['output'], night)
except:
print("No fiber_B dataset available, skipping sky correction plot")
continue
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
input_data_A = load_from_cpickle('input_dataset_fibA', config_in['output'], night)
processed = load_from_cpickle('skycorrected_fibA', config_in['output'], night)
colors_properties, colors_plot, colors_scatter = make_color_array_matplotlib3(lists, observational_pams)
fig, gs, cbax1, ax1, ax2, ax3 = grid_3plot_small()
for i, obs in enumerate(lists['observations']):
for k in range(0, input_data_B[obs]['n_orders']):
if i == 0 and k == 0:
ax2.scatter(input_data_B[obs]['wave'][k, :], input_data_B[obs]['e2ds'][k, :],
c=colors_scatter['mBJD'][obs], s=2, alpha=0.5, label='Sky observations (ORF)')
else:
ax2.scatter(input_data_B[obs]['wave'][k, :], input_data_B[obs]['e2ds'][k, :],
c=colors_scatter['mBJD'][obs], s=2, alpha=0.5)
for k in range(0, input_data_A[obs]['n_orders']):
if i == 0 and k == 0:
ax1.scatter(input_data_A[obs]['wave'][k, :], input_data_A[obs]['e2ds'][k, :],
c=colors_scatter['mBJD'][obs], s=1, alpha=0.5, label='Target observations (ORF)')
else:
ax1.scatter(input_data_A[obs]['wave'][k, :], input_data_A[obs]['e2ds'][k, :],
c=colors_scatter['mBJD'][obs], s=1, alpha=0.5)
ax3.scatter(input_data_A[obs]['wave'][k, :], processed[obs]['e2ds'][k, :],
c=colors_scatter['mBJD'][obs], s=1, alpha=0.5)
ax1.set_title('Night: {0:s} \n Input spectra'.format(night))
ax1.legend(loc=1)
ax2.set_title('Sky spectrum from fiber B')
ax3.set_title('After Sky correction')
ax3.set_xlabel('$\lambda$ [$\AA$]')
sm = plt.cm.ScalarMappable(cmap=colors_properties['cmap'], norm=colors_properties['norm']['mBJD'])
sm.set_array([]) # You have to set a dummy-array for this to work...
cbar = plt.colorbar(sm, cax=cbax1)
cbar.set_label('BJD - 2450000.0')
plt.show()
| 7,124 | 43.53125 | 113 | py |
SLOPpy | SLOPpy-main/SLOPpy/write_output_spectra.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.spectral_subroutines import *
from SLOPpy.subroutines.io_subroutines import *
from SLOPpy.subroutines.fit_subroutines import *
from SLOPpy.subroutines.shortcuts import *
from SLOPpy.subroutines.rebin_subroutines import *
from SLOPpy.subroutines.clv_rm_subroutines import *
__all__ = ['write_output_spectra']
def write_output_spectra(config_in):
subroutine_name = 'write_output_spectra'
clv_rm_correction = True
night_dict = from_config_get_nights(config_in)
instrument_dict = from_config_get_instrument(config_in)
system_dict = from_config_get_system(config_in)
planet_dict = from_config_get_planet(config_in)
shared_data = load_from_cpickle('shared', config_in['output'])
lightcurve_dict = from_config_get_transmission_lightcurve(config_in)
for night in night_dict:
clv_rm_correction = True
try:
clv_rm_modelling = load_from_cpickle('clv_rm_modelling', config_in['output'], night)
except:
clv_rm_correction = False
message = 'Computing'
try:
output_spectra = load_from_cpickle(subroutine_name, config_in['output'], night)
if clv_rm_correction and not output_spectra['clv_rm_correction']:
message = 'Updating with CLV-corrected spectra'
raise ValueError()
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, 'Retrieved'))
continue
except:
print("{0:45s} Night:{1:15s} {2:s}".format(subroutine_name, night, message))
print()
""" Retrieving the list of observations"""
lists = load_from_cpickle('lists', config_in['output'], night)
calib_data = load_from_cpickle('calibration_fibA', config_in['output'], night)
input_data = retrieve_observations( config_in['output'], night, lists['observations'])
observational_pams = load_from_cpickle('observational_pams', config_in['output'], night)
output_spectra = {
'subroutine': subroutine_name,
'clv_rm_correction': clv_rm_correction
}
""" Adding the C-bands arrays to the dictionary"""
if clv_rm_correction:
clv_rm_modelling = load_from_cpickle('clv_rm_modelling', config_in['output'], night)
for n_obs, obs in enumerate( lists['observations']):
output_spectra[obs] = {}
output_spectra[obs]['BJD'] = input_data[obs]['BJD']
preserve_flux = input_data[obs].get('absolute_flux', True)
output_spectra[obs]['SRF_rebinned'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds'],
calib_data['blaze'],
shared_data['coadd']['wave'],
shared_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'])
output_spectra[obs]['SRF_rebinned_err'] = \
rebin_2d_to_1d(input_data[obs]['wave'],
input_data[obs]['step'],
input_data[obs]['e2ds_err'],
calib_data['blaze'],
shared_data['coadd']['wave'],
shared_data['coadd']['step'],
preserve_flux=preserve_flux,
rv_shift=observational_pams[obs]['rv_shift_ORF2SRF'])
output_spectra[obs]['SRF_rescaling'], \
output_spectra[obs]['SRF_rescaled'], \
output_spectra[obs]['SRF_rescaled_err'] = perform_rescaling(
shared_data['coadd']['wave'], output_spectra[obs]['SRF_rebinned'], output_spectra[obs]['SRF_rebinned_err'],
observational_pams['wavelength_rescaling'])
if clv_rm_correction:
rv_shift = 0.0 # we always stay in SRF
correction, _ = clv_rm_correction_factor_computation(
clv_rm_modelling, shared_data['coadd']['wave'], shared_data['coadd']['step'], rv_shift, obs)
output_spectra[obs]['SRF_clv_rm_correction'] = correction
output_spectra[obs]['SRF_clv_rm_rebinned'] = output_spectra[obs]['SRF_rebinned'] / correction
output_spectra[obs]['SRF_clv_rm_rebinned_err'] = output_spectra[obs]['SRF_rebinned_err'] / correction
output_spectra[obs]['SRF_clv_rm_rescaled'] = output_spectra[obs]['SRF_rescaled'] / correction
output_spectra[obs]['SRF_clv_rm_rescaled_err'] = output_spectra[obs]['SRF_rescaled_err'] / correction
try:
output_spectra[obs]['phase'] = \
(observational_pams[obs]['BJD'] - night_dict[night]['time_of_transit'][0])/planet_dict['period'][0]
except:
output_spectra[obs]['phase'] = \
(observational_pams[obs]['BJD'] - night_dict[night]['time_of_transit'])/planet_dict['period'][0]
save_to_cpickle(subroutine_name, output_spectra, config_in['output'], night)
| 5,351 | 43.97479 | 123 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/rebin_subroutines.py | from __future__ import print_function, division
import numpy as np
from scipy.interpolate import interp1d
from SLOPpy.subroutines.constants import *
def shift_wavelength(wave, step, rv_shift):
wave_shift = rv_shift / speed_of_light_km + 1.00000
return wave * wave_shift, step * wave_shift
def shift_wavelength_array(wave, rv_shift):
wave_shift = rv_shift / speed_of_light_km + 1.00000
return wave * wave_shift
def shift_wavelength_to_rest(wave, step, rv_shift):
inverse_wave_shift = (-rv_shift) / speed_of_light_km + 1.00000
return wave / inverse_wave_shift, step / inverse_wave_shift
def rebin_exact_flux(wave_in, step_in, flux_in, wave_out, step_out,
quadrature=False,
preserve_flux=True):
"""
Previously named rebin_order
:param wave_in:
:param step_in:
:param flux_in:
:param wave_out:
:param step_out:
:param quadrature:
:param preserve_flux:
:return:
Spectral rebinning with flux conservation
"""
if quadrature:
flux_in = flux_in**2.
flux_out = np.zeros(np.shape(wave_out), dtype=np.double)
n1 = np.size(wave_in)
n2 = np.size(wave_out)
ns_prv = 0
for i in range(0, n2):
# print i, ' of ', n2
# Starting and ending point of the bin
wlb = wave_out[i] - step_out[i] / 2.000
wle = wave_out[i] + step_out[i] / 2.000
# Normalized flux value within the bin
fl_nm = 0.00
# b->blue and r->red side of the original spectrum which include the bin
# ib and ie are initialized with values close to the ones of the last iteration to save time
ib = ns_prv
ir = ns_prv
for ns in range(ns_prv, n1 - 1):
# simple algorithm to search the closest indexes near the bin boundaries
if wave_in[ib] + step_in[ib] / 2.00 < wlb: ib += 1
if wave_in[ir] + step_in[ir] / 2.00 < wle: ir += 1
# when we are close to the boundary of the spectra, we stop
if ir < ns - 3: break
# Fail-safe checks
if ib > ns_prv: ns_prv = ib - 3
if ib < 0 or ir > n1: continue
if ib > ir: continue
if ns_prv < 0: ns_prv = 0
# Now the true rebinning section
if ib == ir:
pix_s = (wle - wlb) / step_in[ib] # fraction
pix_e = 0.
flux_out[i] += pix_s * flux_in[ib]
fl_nm += pix_s
elif ib + 1 == ir:
pix_s = (wave_in[ib] + step_in[ib] * 0.5 - wlb) / step_in[ib]
pix_e = (wle - (wave_in[ir] - step_in[ir] * 0.5)) / step_in[ir]
flux_out[i] += (pix_s * flux_in[ib] + pix_e * flux_in[ir])
fl_nm += (pix_s + pix_e)
else:
pix_s = (wave_in[ib] + step_in[ib] * 0.5 - wlb) / step_in[ib]
pix_e = (wle - (wave_in[ir] - step_in[ir] * 0.5)) / step_in[ir]
flux_out[i] += (pix_s * flux_in[ib] + pix_e * flux_in[ir])
fl_nm += (pix_s + pix_e)
for j in range(ib + 1, ir):
flux_out[i] += flux_in[j]
fl_nm += 1.00
if (not preserve_flux) and fl_nm > 0.0:
if quadrature:
fl_nm *= fl_nm
flux_out[i] /= fl_nm
if quadrature:
return np.sqrt(flux_out)
else:
return flux_out
def rebin_with_interpolation(wave_in, step_in, flux_in, wave_out, step_out,
quadrature=False,
preserve_flux=True,
interp_kind='cubic'):
ndata = len(wave_in)
normalization_factor = 1.0
if preserve_flux:
step_in_internal = np.ones(ndata)
step_out_internal = np.ones(len(step_out))
else:
step_in_internal = step_in
step_out_internal = step_out
if quadrature:
normalization_factor = (np.median(step_out) / np.median(step_in))
if quadrature:
flux_in = np.power(flux_in, 2.)
wave_in_cumul = np.zeros(ndata+1)
flux_in_cumul = np.zeros(ndata+1)
flux_in_cumul[0] = 0.0
wave_in_cumul[0] = wave_in[0] - step_in[0] / 2.0
for i in range(1, ndata):
flux_in_cumul[i] = flux_in_cumul[i - 1] + flux_in[i - 1] * step_in_internal[i - 1]
# wave_in_cumul[i] = wave_in[i]-step_in[i]/2.
wave_in_cumul[i] = wave_in[i] - (wave_in[i] - wave_in[i - 1]) / 2.
flux_in_cumul[ndata] = flux_in_cumul[ndata - 1] + flux_in[ndata - 1] * step_in_internal[ndata - 1]
wave_in_cumul[ndata] = wave_in[ndata - 1] + step_in[ndata - 1] / 2.
flux_cumul_interp1d = interp1d(wave_in_cumul, flux_in_cumul, kind=interp_kind, bounds_error=False, fill_value=0.000)
flux_out = (flux_cumul_interp1d(wave_out + step_out / 2.) - flux_cumul_interp1d(
wave_out - step_out / 2.)) / step_out_internal
if quadrature:
return np.sqrt(flux_out) / normalization_factor
else:
return flux_out
def rebin_1d_to_1d(wave_in, step_in, flux_in, wave_out, step_out,
rv_shift=None,
is_error=False,
quadrature=False,
preserve_flux=True,
method='cubic_interpolation',
reference_value=None):
if is_error:
quadrature = True
method='exact_flux'
if rv_shift:
wave_in, step_in = shift_wavelength(wave_in, step_in, rv_shift)
if method == 'exact_flux':
flux_out = rebin_exact_flux(wave_in, step_in, flux_in, wave_out, step_out,
quadrature=quadrature, preserve_flux=preserve_flux)
elif method == 'cubic_interpolation':
flux_out = rebin_with_interpolation(wave_in, step_in, flux_in, wave_out, step_out,
quadrature=quadrature, preserve_flux=preserve_flux, interp_kind='cubic')
else:
raise ValueError("method ", method, 'not supported by rebinning subroutine')
if reference_value :
wave_sel = (wave_out<=wave_in[0] +0.005 ) | (wave_out>=wave_in[-1] -0.005)
flux_out[wave_sel] = reference_value
return flux_out
def rebin_2d_to_1d(wave_in, step_in, flux_in, blaze_in, wave_out, step_out,
rv_shift=None,
is_error=False,
quadrature=False,
preserve_flux=True,
skip_blaze_correction=False,
method='cubic_interpolation',
reference_value=None):
"""
:param wave_in:
:param step_in:
:param flux_in:
:param blaze_in:
:param wave_out:
:param step_out:
:param rv_shift:
:param is_error:
:param quadrature:
:param preserve_flux:
:param skip_blaze_correction:
:param method:
:return: flux_out
"""
if rv_shift:
wave_in, step_in = shift_wavelength(wave_in, step_in, rv_shift)
o_axis, f_axis = np.shape(wave_in)
n_rebin = np.size(wave_out)
if skip_blaze_correction or blaze_in is None:
flux_deblazed_in = flux_in
else:
flux_deblazed_in = flux_in / blaze_in
if is_error:
quadrature = True
method = 'exact_flux'
# Rebinning of the individual orders. We keep track of starting
# and ending points of each order in the rebinned solution
flux_rebin_pix = np.zeros([o_axis, n_rebin], dtype=np.double)
counter_is = np.zeros(o_axis, dtype=np.int64) - 1
counter_ie = np.zeros(o_axis, dtype=np.int64) - 1
skip_order = np.ones(o_axis, dtype=bool)
for ii in range(0, o_axis):
counter_is[ii] = np.argmin(abs(wave_in[ii, 0] - wave_out))
counter_ie[ii] = np.argmin(abs(wave_in[ii, -1] - wave_out))
if wave_in[ii, 0] > np.amax(wave_out) or wave_in[ii, -1] < np.amin(wave_out):
continue
skip_order[ii] = False
i = counter_is[ii]
j = counter_ie[ii]+1
flux_rebin_pix[ii, i:j] = rebin_1d_to_1d(wave_in[ii, :],
step_in[ii, :],
flux_deblazed_in[ii, :],
wave_out[i:j],
step_out[i:j],
quadrature=quadrature,
is_error=is_error,
preserve_flux=preserve_flux,
method=method,
reference_value=reference_value)
flux_out = np.zeros(n_rebin, dtype=np.double)
if reference_value:
flux_out += reference_value
if quadrature or is_error:
flux_rebin_pix = np.power(flux_rebin_pix, 2)
flux_out[counter_is[0]:counter_ie[0]] = flux_rebin_pix[0, counter_is[0]:counter_ie[0]]
for ii in range(1, o_axis):
if skip_order[ii]: continue
p_ie = counter_ie[ii - 1]
j_is = counter_is[ii]
j_ie = counter_ie[ii] + 1 # adding one because it is used in interval definition - Python quirks
if p_ie > j_is:
nr_joint = float(p_ie - j_is)
ij = np.arange(j_is, p_ie, 1, dtype=np.int64)
ij_fraction = np.power((ij-j_is) / nr_joint, 4)
flux_out[ij] = flux_rebin_pix[ii,ij] * ij_fraction + flux_rebin_pix[ii-1,ij] * (1. - ij_fraction)
flux_out[p_ie:j_ie] = flux_rebin_pix[ii, p_ie:j_ie]
else:
flux_out[j_is:j_ie] = flux_rebin_pix[ii, j_is:j_ie]
return np.sqrt(flux_out)
else:
flux_out[counter_is[0]:counter_ie[0]] = flux_rebin_pix[0, counter_is[0]:counter_ie[0]]
for ii in range(1, o_axis):
if skip_order[ii]: continue
p_ie = counter_ie[ii - 1]
j_is = counter_is[ii]
j_ie = counter_ie[ii] + 1
if p_ie > j_is:
nr_joint = float(p_ie - j_is)
ij = np.arange(j_is, p_ie, 1, dtype=np.int64)
ij_fraction = np.power((ij-j_is) / nr_joint, 2.)
flux_out[ij] = flux_rebin_pix[ii,ij] * ij_fraction + flux_rebin_pix[ii-1,ij] * (1. - ij_fraction)
flux_out[p_ie:j_ie] = flux_rebin_pix[ii, p_ie:j_ie]
else:
flux_out[j_is:j_ie] = flux_rebin_pix[ii, j_is:j_ie]
return flux_out
def rebin_1d_to_2d(wave_in, step_in, flux_in, wave_out, step_out,
rv_shift=None,
is_error=False,
quadrature=False,
preserve_flux=True,
method='cubic_interpolation',
reference_value=None):
"""
:param wave_in:
:param step_in:
:param flux_in:
:param wave_out:
:param step_out:
:param rv_shift:
:param is_error:
:param quadrature:
:param preserve_flux:
:param method:
:return:
"""
if rv_shift:
wave_in, step_in = shift_wavelength(wave_in, step_in, rv_shift)
if is_error:
quadrature = True
method='exact_flux'
o_axis, f_axis = np.shape(wave_out)
flux_out = np.zeros([o_axis, f_axis], dtype=np.double)
if reference_value:
flux_out += reference_value
for ii in range(0, o_axis):
flux_out[ii, :]= rebin_1d_to_1d(wave_in,
step_in,
flux_in,
wave_out[ii, :],
step_out[ii, :],
quadrature=quadrature,
is_error=is_error,
preserve_flux=preserve_flux,
method=method,
reference_value=reference_value)
return flux_out
def rebin_2d_to_2d(wave_in, step_in, flux_in, wave_out, step_out,
rv_shift=None,
is_error=False,
quadrature=False,
preserve_flux=True,
method='cubic_interpolation',
reference_value=None):
"""
:param wave_in: 1D wavelength array of the input spectrum
:param step_in: 1D step-size array of the input spectrum
:param flux_in: 1D flux array of the input spectrum
:param wave_out: 2D (order-by-order) wavelength array of the rebinned spectrum
:param step_out: 2D (order-by-order) step-size array of the rebinned spectrum
:param rv_shift:
:param is_error:
:param quadrature:
:param preserve_flux:
:param method:
:return: flux_out, 2D (order-by-order) flux array, with same size as wave_out
"""
if rv_shift:
wave_in, step_in = shift_wavelength(wave_in, step_in, rv_shift)
if is_error:
quadrature = True
method='exact_flux'
o_axis_input, f_axis_input = np.shape(wave_in)
o_axis, f_axis = np.shape(wave_out)
if o_axis_input != o_axis:
raise ValueError("Mismatch between input and output number of orders in rebin_2d_to_2d")
flux_out = np.zeros([o_axis, f_axis], dtype=np.double)
if reference_value:
flux_out += reference_value
for ii in range(0, o_axis):
flux_out[ii, :] = rebin_1d_to_1d(wave_in[ii, :],
step_in[ii, :],
flux_in[ii, :],
wave_out[ii, :],
step_out[ii, :],
quadrature=quadrature,
is_error=is_error,
preserve_flux=preserve_flux,
method=method,
reference_value=reference_value)
return flux_out | 13,958 | 33.8975 | 120 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/plot_subroutines.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
def make_color_array(lists, input_data):
""" Creation of the color array, based on the BJD of the observations
"""
bjd = []
am = []
for obs in lists['observations']:
bjd.append(input_data[obs]['BJD'] - 2450000.0)
am.append(input_data[obs]['AIRMASS'])
colors = np.asarray(bjd)
cmap = plt.cm.viridis
#cmap = plt.cm.Spectral
line_colors = cmap(np.linspace(0, 1, len(lists['observations'])))
return colors, cmap, line_colors
def make_color_array_matplotlib3(lists, input_data):
""" Creation of the color array, based on the BJD of the observations
"""
bjd = []
mbjd = []
am = []
for obs in lists['observations']:
bjd.append(input_data[obs]['BJD'])
mbjd.append(input_data[obs]['mBJD'])
am.append(input_data[obs]['AIRMASS'])
color_cmap = plt.cm.viridis
color_bjd_norm = plt.Normalize(vmin=bjd[0], vmax=bjd[-1])
colors_bjd = color_cmap(color_bjd_norm(np.asarray(bjd)))
color_am_norm = plt.Normalize(vmin=np.amin(am), vmax=np.amax(am))
colors_am = color_cmap(color_am_norm(np.asarray(am)))
colors_plot = {
'BJD' : {},
'mBJD' : {},
'AIRMASS' : {}
}
colors_scatter = {
'BJD' : {},
'mBJD' : {},
'AIRMASS' : {}
}
colors_properties = {
'norm' : {
'BJD': plt.Normalize(vmin=bjd[0], vmax=bjd[-1]),
'mBJD': plt.Normalize(vmin=mbjd[0], vmax=mbjd[-1]),
'AIRMASS': plt.Normalize(vmin=np.amin(am), vmax=np.amax(am))
},
'cmap' : plt.cm.viridis
}
for obs in lists['observations']:
colors_plot['mBJD'][obs] = colors_properties['cmap'](
colors_properties['norm']['mBJD'](input_data[obs]['mBJD']))
colors_plot['BJD'][obs] = colors_properties['cmap'](
colors_properties['norm']['BJD'](input_data[obs]['BJD']))
colors_plot['AIRMASS'][obs] = colors_properties['cmap'](
colors_properties['norm']['AIRMASS'](input_data[obs]['AIRMASS']))
colors_scatter['mBJD'][obs] = [colors_properties['cmap'](
colors_properties['norm']['mBJD'](input_data[obs]['mBJD']))[:-1]]
colors_scatter['BJD'][obs] = [colors_properties['cmap'](
colors_properties['norm']['BJD'](input_data[obs]['BJD']))[:-1]]
colors_scatter['AIRMASS'][obs] = [colors_properties['cmap'](
colors_properties['norm']['AIRMASS'](input_data[obs]['AIRMASS']))[:-1]]
return colors_properties, colors_plot, colors_scatter
def grid_1plot():
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(1, 2, width_ratios=[50, 1])
ax = plt.subplot(gs[0, 0])
cbax1 = plt.subplot(gs[0, 1])
fig.subplots_adjust(wspace=0.04, hspace=0.25)
return fig, gs, cbax1, ax
def grid_2plot(sharex=True, sharey=True):
fig = plt.figure(figsize=(12, 6))
gs = GridSpec(2, 2, width_ratios=[50, 1])
ax1 = plt.subplot(gs[0, 0])
if sharey and sharex:
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
elif sharex:
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
elif sharey:
ax2 = plt.subplot(gs[1, 0], sharey=ax1)
else:
ax2 = plt.subplot(gs[1, 0])
cbax1 = plt.subplot(gs[:, 1])
fig.subplots_adjust(wspace=0.04, hspace=0.25)
return fig, gs, cbax1, ax1, ax2
def grid_3plot_small(sharex=False, sharey=False, partial_share=True):
fig = plt.figure(figsize=(12, 9))
gs = GridSpec(3, 2, width_ratios=[50, 1], height_ratios = [3, 1, 3])
ax1 = plt.subplot(gs[0, 0])
if sharey and sharex:
ax2 = plt.subplot(gs[1, 0], sharex=ax1, sharey=ax1)
ax3 = plt.subplot(gs[2, 0], sharex=ax1, sharey=ax1)
elif sharex:
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
ax3 = plt.subplot(gs[2, 0], sharex=ax1)
elif sharey:
ax2 = plt.subplot(gs[1, 0], sharey=ax1)
ax3 = plt.subplot(gs[2, 0], sharey=ax1)
elif partial_share:
ax2 = plt.subplot(gs[1, 0], sharex=ax1)
ax3 = plt.subplot(gs[2, 0], sharex=ax1, sharey=ax1)
else:
ax2 = plt.subplot(gs[1, 0])
ax3 = plt.subplot(gs[2, 0])
cbax1 = plt.subplot(gs[:, 1])
fig.subplots_adjust(wspace=0.04, hspace=0.25)
return fig, gs, cbax1, ax1, ax2, ax3 | 4,351 | 31 | 83 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/constants.py |
from __future__ import division # no more "zero" integer division bugs!:P
import numpy as np # array
# radiants, degrees conversions etc.
pi = 4.*np.arctan(1.)
dpi = 2.*pi
deg2rad = pi/180.
rad2deg = 180./pi
# various
TOLERANCE = np.finfo(np.float64(1.0)).eps
d2s = 86400. # seconds in a day = 24h = 86400 s
d2m = 1440. # min in a day = 1440. min
# masses conversions
Msmer = 6.0236e6 # Msun to Mmer
Mmers = 1./Msmer # Mmer to Msun
Msven = 4.08523719e5 # Msun to Mven
Mvens = 1./Msven # Mven to Msun
Msear = 332946.0487 # Msun to Mear
Mears = 1./Msear # Mear to Msun
Msmar = 3.09870359e6 # Msun to Mmar
Mmars = 1./Msmar # Mmar to Msun
Msjup = 1.047348644e3 # Msun to Mjup
Mjups = 1./Msjup # Mjup to Msun
Mssat = 3.4979018e3 # Msun to Msat
Msats = 1./Mssat # Msat to Msun
Msura = 2.290298e4 # Msun to Mura
Muras = 1./Msura # Mura to Msun
Msnep = 1.941226e4 # Msun to Mnep
Mneps = 1./Msnep # Mnep to Msun
Mejup = Mears * Msjup # Mear to Mjup
Mjear = Mjups * Msear # Mjup to Mear
# masses of Solar System objects
Msun = 1.9884e30 # Sun mass in kg
Mmer = Msun*Mmers # Mercury mass in kg
Mven = Msun*Mvens # Venus mass in kg
Mear = 5.9722e24 # Earth mass in kg
Mmar = Msun*Mmars # Mars mass in kg
Mjup = Msun*Mjups # Jupiter mass in kg
Msat = Msun*Msats # Saturn mass in kg
Mura = Msun*Muras # Uranus mass in kg
Mnep = Msun*Mneps # Neptune mass in kg
# radii of Solar System objects
Rsun = 696000. # Sun radius in km
Rmer = 2439.7 # Mercury radius in km
Rven = 6051.8 # Venus radius in km
Rear = 6378.1366 # Earth radius in km
Rmar = 3396.19 # Mars radius in km
Rjup = 71492. # Jupiter radius in km
Rsat = 60268. # Saturn radius in km
Rura = 25559. # Uranus radius in km
Rnep = 24764. # Neptune radius in km
Rplu = 1195. # Pluto radius in km
Rsjup = Rsun/Rjup # Rsun to Rjup
Rjups = Rjup/Rsun # Rjup to Rsun
Rsear = Rsun/Rear # Rsun to Rear
Rears = Rear/Rsun # Rear to Rsun
Rsnep = Rsun/Rnep # Rsun to Rnep
Rneps = Rnep/Rsun # Rnep to Rsun
Rejup = Rear/Rjup # Rearth to Rjupiter
Rjear = Rjup/Rear # Rjupiter to Rearth
# astronomical constants
AU = 149597870700. #Astronomical Unit in meters
kappa = 0.01720209895 # Gaussian gravitational constant
Giau = kappa*kappa # G [AU^3/Msun/d^2]
Gsi = 6.67428e-11 #Gravitational Constants in SI system [m^3/kg/s^2]
Gaumjd = Gsi*d2s*d2s*Mjup/(AU**3) # G in [AU,Mjup,day]
speed = 299792458. # speed of light (c) in [m/s]
speedaud = speed*d2s/AU # speed of light in [AU/d]
pc2AU = 206264.806
# others
RsunAU = (Rsun*1.e3)/AU #Sun radius in AU
RjupAU = (Rjup*1.e3)/AU #Jupiter radius in AU
MJD = 2400000.5 # MJD ref time to convert to JD
speed_of_light = speed
sigma2fwhm = 2. * np.sqrt(2. * np.log(2))
speed_of_light_km = speed / 1000. | 2,781 | 28.595745 | 73 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/fit_subroutines.py | from __future__ import print_function, division
import numpy as np
from scipy.linalg import lstsq
from scipy.optimize import curve_fit
from sklearn import linear_model, datasets
def berv_telluric_curvefit(xdata, p0, p1, p2):
return xdata[0] * p0 + xdata[1] * p1 + p2
def berv_linear_curve_fit(airmass, berv, logi_array, sigi_array, n_axis):
C = []
pams = np.zeros(3)-0.1
ltel = np.empty(n_axis)
shift = np.empty(n_axis)
zero = np.empty(n_axis)
airmass_zero = 0. #np.average(airmass)
berv_zero = 0. #np.average(berv)
for ii in xrange(0, n_axis):
popt, pcov = curve_fit(berv_telluric_curvefit,
[airmass-airmass_zero, berv-berv_zero],
logi_array[:, ii],
p0=pams,
sigma = sigi_array[:, ii],
bounds=([-np.inf, -np.inf, -np.inf], [0.000, np.inf, np.inf]))
ltel[ii] = popt[0]
shift[ii] = popt[1]
zero[ii] = popt[2]
return ltel, shift, zero
def berv_linear_lstsq(airmass, berv, logi_array):
A = np.c_[airmass, berv, np.ones(logi_array.shape[0])]
C, _, _, _ = lstsq(A, logi_array) # coefficients
return C[0], C[1], C[2]
def airmass_telluric_curvefit(xdata, p0, p1):
return xdata * p0 + p1
def airmass_linear_curve_fit_ransac(airmass, logi_array, sigi_array, n_axis):
pams = np.zeros(2)
ltel = np.empty(n_axis)
zero = np.empty(n_axis)
airmass_zero = np.average(airmass)
airmass_reshape = (airmass-airmass_zero).reshape(-1,1)
ransac = linear_model.RANSACRegressor()
for ii in range(0, n_axis):
y_zero = np.average(logi_array[:, ii])
ransac.fit(airmass_reshape, logi_array[:, ii]-y_zero)
ltel[ii] = ransac.estimator_.coef_[0]
zero[ii] = ransac.estimator_.intercept_ + y_zero
return ltel, zero
def airmass_linear_curve_fit(airmass, logi_array, sigi_array, n_axis):
C = []
pams = np.zeros(2)
ltel = np.empty(n_axis)
zero = np.empty(n_axis)
airmass_zero = np.average(airmass)
for ii in range(0, n_axis):
y_zero = np.average(logi_array[:, ii])
popt, pcov = curve_fit(airmass_telluric_curvefit,
airmass-airmass_zero,
logi_array[:, ii]-y_zero,
p0=pams,
sigma = sigi_array[:, ii],
bounds=([-np.inf, -np.inf], [np.inf, np.inf]))
ltel[ii] = popt[0]
zero[ii] = popt[1]
return ltel, zero
def berv_linear_curve_fit_modified(airmass, berv, logi_array, sigi_array, n_axis):
C = []
pams = np.zeros(3)
ltel = np.empty(n_axis)
shift = np.empty(n_axis)
zero = np.empty(n_axis)
airmass_zero = np.average(airmass)
berv_zero = np.average(berv)
for ii in range(0, n_axis):
popt, pcov = curve_fit(berv_telluric_curvefit,
[airmass-airmass_zero, berv-berv_zero],
logi_array[:, ii],
p0=pams,
sigma = sigi_array[:, ii],
bounds=([-np.inf, -np.inf, -np.inf], [np.inf, np.inf, np.inf]))
ltel[ii] = popt[0]
shift[ii] = popt[1]
zero[ii] = popt[2]
return ltel, shift, zero
def airmass_linear_lstsq(airmass, logi_array):
A = np.c_[airmass, np.ones(logi_array.shape[0])]
C, _, _, _ = lstsq(A, logi_array) # coefficients
return C[0], C[1]
| 3,592 | 27.975806 | 94 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/smooth_subroutines.py | from __future__ import print_function, division
import numpy as np
def smooth(x, window_len=5, window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
# taken from here:
http://scipy-cookbook.readthedocs.io/items/SignalSmooth.html
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window can be only one of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]]
if window == 'flat': # moving average
w = np.ones(window_len, 'd')
else:
w = eval('np.' + window + '(window_len)')
y = np.convolve(w / w.sum(), s, mode='valid')
#return y
if np.size(x)==np.size(y):
return y
else:
return y[(window_len/2-1):-(window_len/2)]
| 2,109 | 31.461538 | 121 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/bayesian_emcee.py | from SLOPpy.subroutines.common import *
import os
from SLOPpy.subroutines.mcmc_fit_functions import *
from SLOPpy.subroutines.math_functions import interpolate2d_grid_nocheck
#from SLOPpy.subroutines.interpol import interpolate1d_grid_nocheck
from multiprocessing import Pool
import emcee
import time
# define theta pams
def define_theta_array(model_case,line_iter_dict, planet_dict, n_jitter, allow_emission=False):
pams_dict = {} # dictionary containing the index of a given parameter
pams_list = [] # list wirth the parameter names ordered according to their index
boundaries = np.empty([0, 2]) # boundaries for MCMC / nested sampling
theta_start = np.empty(0) # starting point for MCMC
lines_center = np.empty(0) # laboratory wavelength of spectral lines
pam_index = 0 # keep track of the number of variables
for line_key, line_val in line_iter_dict['lines'].items():
pam_name = line_key + '_contrast'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
if allow_emission:
boundaries = np.append(boundaries, [[-0.20, 0.20]], axis=0)
else:
boundaries = np.append(boundaries, [[0.00, 0.20]], axis=0)
theta_start = np.append(theta_start, 0.010)
pam_index += 1
lines_center = np.append(lines_center, line_val)
""" skip the inclusion of FWHM as a free parameter for each line
if the shared FWHM is selected
"""
if model_case in [0, 1, 2, 3, 10, 11, 14, 20, 21, 24]:
# if not line_iter_dict['fit_parameters']['shared_fwhm']:
pam_name = line_key + '_fwhm (km/s)'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[0.00, 100.00]], axis=0)
theta_start = np.append(theta_start, 5.0)
pam_index += 1
# if line_iter_dict['fit_parameters']['fixed_separation']: continue
# if not line_iter_dict['fit_parameters']['lines_shift']: continue
if model_case in [0, 2, 10, 12, 20, 22]:
pam_name = line_key + '_winds (km/s)'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[-25.00, 25.00]], axis=0)
theta_start = np.append(theta_start, 0.00)
pam_index += 1
if model_case in [12, 13, 15, 22, 23, 25]:
# if line_iter_dict['fit_parameters']['shared_fwhm']:
pam_name = 'shared_fwhm (km/s)'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[0.000, 100.00]], axis=0)
theta_start = np.append(theta_start, 5.000)
pam_index += 1
if model_case in [11, 13, 21, 23]:
# if line_iter_dict['fit_parameters']['fixed_separation'] and line_iter_dict['fit_parameters']['lines_shift']:
pam_name = 'shared_winds (km/s)'
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[-25.0, 25.0]], axis=0)
theta_start = np.append(theta_start, 0.000)
pam_index += 1
if model_case in [0, 1, 10, 11, 12, 13, 14, 15]:
pams_dict['rp_factor'] = pam_index
pams_list.append('rp_factor')
boundaries = np.append(boundaries, [[0.5, 2.0]], axis=0)
theta_start = np.append(theta_start, 1.0)
pam_index += 1
pams_dict['K_planet (km/s)'] = pam_index
pams_list.append('K_planet (km/s)')
#boundaries = np.append(boundaries,
# [[-300., planet_dict['RV_semiamplitude']
# [0] + 300.]],
# axis=0)
boundaries = np.append(boundaries,
[[planet_dict['RV_semiamplitude'][0] - 75.,
planet_dict['RV_semiamplitude'][0] + 75.]],
axis=0)
'''
boundaries = np.append(boundaries,
[[0.,
200.]],
axis=0)
'''
theta_start = np.append(
theta_start, planet_dict['RV_semiamplitude'][0] / 1000.0)
pam_index += 1
for i_j in range(0,n_jitter):
pam_name = 'jitter_' + repr(i_j)
pams_dict[pam_name] = pam_index
pams_list.append(pam_name)
boundaries = np.append(boundaries, [[10**(-12), 0.05]], axis=0)
theta_start = np.append(theta_start, 10**(-11))
pam_index += 1
return lines_center, pams_dict, pams_list, boundaries, theta_start
def emcee_lines_fit_functions(model_case,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
theta_start, boundaries, ndim, nwalkers, ngen, nsteps, nthin):
os.environ["OMP_NUM_THREADS"] = "1"
#""" Avoid starting values out of boundaries """
#for nd in range(0, ndim):
# sel = (point_star[:,nd] <= boundaries[nd,0]) | (point_star[:,nd] >= boundaries[nd,1])
# point_star[sel,nd] = theta_start[nd]
"""
print(np.shape(wave_append))
print(np.shape(flux_append))
print(np.shape(ferr_append))
print(np.shape(nobs_append))
print(np.shape(clv_rm_radius))
print(np.shape(clv_rm_grid))
print(np.shape(rvsys_PRF2ORF_append))
print(np.shape(planet_RVsinusoid_append))
print(np.shape(lines_center))
"""
model_dictionaries = {
0: logprob_case00,
1: logprob_case01,
2: logprob_case02,
3: logprob_case03,
10: logprob_case10,
11: logprob_case11,
12: logprob_case12,
13: logprob_case13,
14: logprob_case14,
15: logprob_case15,
20: logprob_case20,
21: logprob_case21,
22: logprob_case22,
23: logprob_case23,
24: logprob_case24,
25: logprob_case25,
}
logprob = model_dictionaries[model_case]
try:
from pyde.de import DiffEvol
use_pyde = True
except ImportError:
print(' Warnign: PyDE is not installed, random initialization point')
use_pyde = False
if ngen <= 1 : use_pyde = False
""" R_p is fixed to 1.0 """
if model_case in [2, 3, 20, 21, 22, 23, 24, 25]:
clv_model = interpolate2d_grid_nocheck(1.000, clv_rm_radius, clv_rm_grid)
args_input = (boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_model,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict)
else:
args_input = (boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict)
if use_pyde:
start = time.time()
with Pool() as pool:
de = DiffEvol(
logprob,
boundaries,
nwalkers,
maximize=True,
pool=pool,
args=args_input)
de.optimize(ngen)
end = time.time()
print("PyDE global optimization took {0:.1f} seconds".format( end - start))
theta_start = np.median(de.population, axis=0)
point_start = de.population
else:
point_start = theta_start + 1e-4 * np.abs(np.random.randn(nwalkers, ndim))
point_start[0, :] = theta_start
start = time.time()
with Pool() as pool:
sampler = emcee.EnsembleSampler(nwalkers,
ndim,
logprob,
args=args_input,
pool=pool)
population, prob, state = sampler.run_mcmc(point_start,
nsteps,
thin=nthin,
progress=True)
end = time.time()
print()
print("emcee MCMC optimization took {0:.1f} seconds".format(end - start))
return population, sampler.chain, sampler.lnprobability, point_start
def return_model(model_case,
theta,
wave_meshgrid,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index):
ndim = len(theta)
boundaries = np.empty([ndim, 2])
boundaries[:,0] = theta - 1.
boundaries[:,1] = theta + 1.
transmission_spec = np.ones(np.shape(wave_meshgrid))
transmission_spec_err = np.ones(np.shape(wave_meshgrid))
model_dictionaries = {
0: logprob_case00,
1: logprob_case01,
2: logprob_case02,
3: logprob_case03,
10: logprob_case10,
11: logprob_case11,
12: logprob_case12,
13: logprob_case13,
14: logprob_case14,
15: logprob_case15,
20: logprob_case20,
21: logprob_case21,
22: logprob_case22,
23: logprob_case23,
24: logprob_case24,
25: logprob_case25,
}
logprob = model_dictionaries[model_case]
if model_case in [2, 3, 20, 21, 22, 23, 24, 25]:
clv_model = interpolate2d_grid_nocheck(1.000, clv_rm_radius, clv_rm_grid)
lines_model, _, lines_array, planet_K, planet_R, jitter = logprob(
theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_model,
planet_RVsinusoid,
lines_center,
jitter_index,
{},
return_models=True)
else:
lines_model, clv_model, lines_array, planet_K, planet_R, jitter = logprob(
theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
{},
return_models=True)
return lines_model, clv_model, lines_array, planet_K, planet_R, jitter
def emcee_flatten_median(population, sampler_chain, sampler_lnprobability, nburnin, nthin, nwalkers):
flat_chain = emcee_flatchain(sampler_chain, nburnin, nthin)
flat_lnprob, _ = emcee_flatlnprob(
sampler_lnprobability, nburnin, nthin, population, nwalkers)
lnprob_med = compute_value_sigma(flat_lnprob)
chain_med = compute_value_sigma(flat_chain)
chain_MAP, lnprob_MAP = pick_MAP_parameters(flat_chain, flat_lnprob)
#n_samplings, n_pams = np.shape(flat_chain)
return flat_chain, flat_lnprob, chain_med, chain_MAP, lnprob_med, lnprob_MAP
def emcee_compute_BIC_AIC(lnprob_med, lnprob_MAP, ndata, ndim):
print()
print(' LN posterior: {0:12f} {1:12f} {2:12f} (15-84 p) MAP: {0:12f}'.format(
lnprob_med[0], lnprob_med[2], lnprob_med[1], lnprob_MAP))
BIC = -2.0 * lnprob_med[0] + np.log(ndata) * ndim
AIC = -2.0 * lnprob_med[0] + 2.0 * ndim
AICc = AIC + (2.0 + 2.0 * ndim) * ndim / (ndata - ndim - 1.0)
print()
print(' Median BIC = {}'.format(BIC))
print(' Median AIC = {}'.format(AIC))
print(' Median AICc = {}'.format(AICc))
BIC_map = -2.0 * lnprob_MAP + np.log(ndata) * ndim
AIC_map = -2.0 * lnprob_MAP + 2.0 * ndim
AICc_map = AIC + (2.0 + 2.0 * ndim) * ndim / (ndata - ndim - 1.0)
print()
print(' MAP BIC = {}'.format(BIC_map))
print(' MAP AIC = {}'.format(AIC_map))
print(' MAP AICc = {}'.format(AICc_map))
def emcee_burnin_check(chain, nburnin, nthin, nwalkers=False):
nburn = int(nburnin / nthin)
modified = False
if not nwalkers:
_, d, _ = np.shape(chain)
else:
v1, v2 = np.shape(chain)
if v1 == nwalkers:
d = v2
else:
d = v1
if nburn >= d * 0.9:
nburn = int(d / 4)
modified = True
return nburn, modified
def emcee_flatchain(chain, nburnin, nthin):
"""flattening of the emcee chains with removal of burn-in"""
nburn, _ = emcee_burnin_check(chain, nburnin, nthin)
s = chain[:, nburn:, :].shape
return chain[:, nburn:, :].reshape(s[0] * s[1], s[2])
def emcee_flatlnprob(lnprob, nburnin, nthin, population, nwalkers):
nburn, _ = emcee_burnin_check(lnprob, nburnin, nthin, nwalkers)
v1, v2 = np.shape(lnprob)
if v1 == nwalkers:
s = lnprob[:, nburn:].shape
return lnprob[:, nburn:].reshape(s[0] * s[1]), lnprob.T
else:
s = lnprob[nburn:, :].shape
return lnprob[nburn:, :].reshape(s[0] * s[1]), lnprob
def GelmanRubin_v2(sampler_chain):
"""
:param chain_T:
:return:
"""
"""
from http://joergdietrich.github.io/emcee-convergence.html
"""
ssq = np.var(sampler_chain, axis=1, ddof=1)
W = np.mean(ssq, axis=0)
theta_b = np.mean(sampler_chain, axis=1)
theta_bb = np.mean(theta_b, axis=0)
m = sampler_chain.shape[0] * 1.0
n = sampler_chain.shape[1] * 1.0
B = n / (m - 1) * np.sum((theta_bb - theta_b) ** 2, axis=0)
var_theta = (n - 1) / n * W + 1 / n * B
Rhat = np.sqrt(var_theta / W)
return Rhat
def compute_value_sigma(samples):
if np.size(np.shape(samples)) == 1:
sample_med = np.zeros(3)
#sample_tmp = np.percentile(samples, [15.865, 50, 84.135], axis=0)
sample_tmp = np.percentile(samples[np.isfinite(samples)], [15.865, 50, 84.135], axis=0)
sample_med[0] = sample_tmp[1]
sample_med[1] = sample_tmp[2] - sample_tmp[1]
sample_med[2] = sample_tmp[1] - sample_tmp[0]
elif np.size(np.shape(samples)) == 2:
#sample_med = np.asarray(list(map(lambda v: (v[1], v[2] - v[1], v[1] - v[0]),
#zip(*np.percentile(samples, [15.865, 50, 84.135], axis=0)))))
sample_med = np.zeros((samples.shape[1],3))
for k in range(samples.shape[1]):
ttt = samples[:,k]
sample_tmp = np.percentile(ttt[np.isfinite(ttt)], [15.865, 50, 84.135], axis=0)
sample_med[k,0] = sample_tmp[1]
sample_med[k,1] = sample_tmp[2] - sample_tmp[1]
sample_med[k,2] = sample_tmp[1] - sample_tmp[0]
else:
print('ERROR!!! ')
return None
return sample_med
def pick_MAP_parameters(samples, lnprob):
indmax = np.argmax(lnprob)
if np.size(np.shape(samples)) == 1:
return samples[indmax], lnprob[indmax]
elif np.size(np.shape(samples)) == 2:
return samples[indmax, :], lnprob[indmax]
else:
print('ERROR!!! ')
return None
| 15,492 | 32.827511 | 118 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/clv_rm_subroutines.py | import numpy as np
from SLOPpy.subroutines.rebin_subroutines import *
def clv_rm_correction_factor_computation(clv_rm_modelling, wave, step, rv_shift, obs):
ancillary = {}
ancillary['norm_convolved_shifted'] = \
rebin_1d_to_1d(clv_rm_modelling['common']['wave'],
clv_rm_modelling['common']['step'],
clv_rm_modelling['common']['norm_convolved'],
wave,
step,
rv_shift=rv_shift,
preserve_flux=False)
ancillary['stellar_spectra_convolved_shifted'] = \
rebin_1d_to_1d(clv_rm_modelling['common']['wave'],
clv_rm_modelling['common']['step'],
clv_rm_modelling[obs]['stellar_spectra_convolved'],
wave,
step,
rv_shift=rv_shift,
preserve_flux=False)
wavelength_exclusion = \
(wave <= clv_rm_modelling['common']['wave'][0] + 1) | \
(wave >= clv_rm_modelling['common']['wave'][-1] - 1)
wavelength_selection = \
(wave > clv_rm_modelling['common']['wave'][0] + 1) & \
(wave > clv_rm_modelling['common']['wave'][-1] - 1)
ancillary['norm_convolved_shifted'][wavelength_exclusion] = \
np.amax(ancillary['norm_convolved_shifted'][wavelength_selection])
ancillary['stellar_spectra_convolved_shifted'][wavelength_exclusion] = \
np.amax(ancillary['stellar_spectra_convolved_shifted'][wavelength_selection])
ancillary['correction'] = ancillary['stellar_spectra_convolved_shifted'] / ancillary['norm_convolved_shifted']
return ancillary['correction'], ancillary
| 1,724 | 40.071429 | 114 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/mcmc_fit_functions.py | import numpy as np
from SLOPpy.subroutines.math_functions import interpolate2d_grid_nocheck
from SLOPpy.subroutines.constants import *
def compute_single_line(wave_meshgrid, planet_RVsinusoid, planet_K, line_array):
""" computing the spectral shift in RV """
rv_shift = planet_K * planet_RVsinusoid
line_model = np.ones(np.shape(wave_meshgrid), dtype= np.double)
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
sigma = line_array[2] / sigma2fwhm * line_array[0] / speed_of_light_km
line_shifted = line_array[0] + (rv_shift + line_array[3]) * line_array[0] / speed_of_light_km
line_model -= line_array[1] * np.exp(-(1./(2*sigma**2))*(wave_meshgrid-line_shifted)**2)
return line_model
def compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines):
""" computing the spectral shift in RV """
rv_shift = planet_K * planet_RVsinusoid
lines_model = np.ones(np.shape(wave_meshgrid), dtype= np.double)
for ii in range(0, n_lines):
sigma = lines_array[2,ii] / sigma2fwhm * lines_array[0,ii] / speed_of_light_km
line_shifted = lines_array[0,ii] + (rv_shift + lines_array[3,ii]) * lines_array[0,ii] / speed_of_light_km
lines_model -= lines_array[1,ii] * np.exp(-(1./(2*sigma**2))*(wave_meshgrid-line_shifted)**2)
return lines_model
""" case 0: only one spectral line, default line parameters are contrast, FWHM, rv_shift """
def logprob_case00(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
planet_R = theta[-3-i_j]
""" computing interpolated model spectrum
"""
clv_model = interpolate2d_grid_nocheck(planet_R, clv_rm_radius, clv_rm_grid)
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
line_array = [lines_center[0], theta[0], theta[1], theta[2]]
line_model = compute_single_line(wave_meshgrid, planet_RVsinusoid, planet_K, line_array)
flux_res = transmission_spec / clv_model / line_model - 1.
ferr_res = transmission_spec_err / clv_model / line_model
if return_models:
lines_array = np.empty([4, 1])
lines_array[:, 0] = line_array
return line_model, clv_model, lines_array, planet_K, planet_R, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': planet_R,
'FWHM': line_array[2],
'winds': line_array[3],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
log_prior += (-(var_dict[key_name] - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" adding the jitter to error esitamets """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 1: only one spectral line, no winds """
def logprob_case01(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
planet_R = theta[-3-i_j]
""" computing interpolated model spectrum
"""
clv_model = interpolate2d_grid_nocheck(planet_R, clv_rm_radius, clv_rm_grid)
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
line_array = [lines_center[0], theta[0], theta[1], 0.000]
line_model = compute_single_line(wave_meshgrid, planet_RVsinusoid, planet_K, line_array)
flux_res = transmission_spec / clv_model / line_model - 1.
ferr_res = transmission_spec_err / clv_model / line_model
if return_models:
lines_array = np.empty([4, 1])
lines_array[:,0] = line_array
return line_model, clv_model, lines_array, planet_K, planet_R, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': planet_R,
'FWHM': line_array[2],
'winds': line_array[3],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
log_prior += (-(var_dict[key_name] - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 2: only one spectral line, no planetary radius dependance """
def logprob_case02(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_model,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
"""
planet_K = theta[-2-i_j]
line_array = [lines_center[0], theta[0], theta[1], theta[2]]
line_model = compute_single_line(wave_meshgrid, planet_RVsinusoid, planet_K, line_array)
flux_res = transmission_spec / clv_model / line_model - 1.
ferr_res = transmission_spec_err / clv_model / line_model
if return_models:
lines_array = np.empty([4, 1])
lines_array[:,0] = line_array
return line_model, clv_model, lines_array, planet_K, 1.00000, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': 1.000000,
'FWHM': line_array[2],
'winds': line_array[3],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
log_prior += (-(var_dict[key_name] - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 3: only one spectral line, no winds and no planetary radius dependance """
def logprob_case03(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_model,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
"""
planet_K = theta[-2-i_j]
line_array = [lines_center[0], theta[0], theta[1], 0.000]
line_model = compute_single_line(wave_meshgrid, planet_RVsinusoid, planet_K, line_array)
flux_res = transmission_spec / clv_model / line_model - 1.
ferr_res = transmission_spec_err / clv_model / line_model
if return_models:
lines_array = np.empty([4, 1])
lines_array[:,0] = line_array
return line_model, clv_model, lines_array, planet_K, 1.00000, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': 1.000000,
'FWHM': line_array[2],
'winds': line_array[3],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
log_prior += (-(var_dict[key_name] - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 10: more than one spectral lines, all line parameters are free and independent """
def logprob_case10(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
planet_R = theta[-3-i_j]
""" computing interpolated model spectrum
"""
clv_model = interpolate2d_grid_nocheck(planet_R, clv_rm_radius, clv_rm_grid)
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[i_pams]
i_pams += 1
lines_array[3, ii] = theta[i_pams]
i_pams += 1
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, planet_R, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': planet_R,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 11: more than one spectral lines, all lines are affected by the same wind """
def logprob_case11(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
planet_R = theta[-3-i_j]
""" computing interpolated model spectrum
"""
clv_model = interpolate2d_grid_nocheck(planet_R, clv_rm_radius, clv_rm_grid)
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[i_pams]
i_pams += 1
lines_array[3, ii] = theta[-4-i_j]
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, planet_R, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': planet_R,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 12: more than one spectral lines, all lines have same FWHM """
def logprob_case12(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
planet_R = theta[-3-i_j]
""" computing interpolated model spectrum
"""
clv_model = interpolate2d_grid_nocheck(planet_R, clv_rm_radius, clv_rm_grid)
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[-4-i_j]
lines_array[3, ii] = theta[i_pams]
i_pams += 1
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, planet_R, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': planet_R,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 13: more than one spectral lines, all lines are affected by the same wind and have same FWHM """
def logprob_case13(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
planet_R = theta[-3-i_j]
""" computing interpolated model spectrum
"""
clv_model = interpolate2d_grid_nocheck(planet_R, clv_rm_radius, clv_rm_grid)
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[-5-i_j]
lines_array[3, ii] = theta[-4-i_j]
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, planet_R, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': planet_R,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 14: more than one spectral lines, no winds """
def logprob_case14(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
planet_R = theta[-3-i_j]
""" computing interpolated model spectrum
"""
clv_model = interpolate2d_grid_nocheck(planet_R, clv_rm_radius, clv_rm_grid)
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[i_pams]
i_pams += 1
lines_array[3, ii] = 0.000
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, planet_R, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': planet_R,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 15: more than one spectral lines, no winds, all lines have same FWHM """
def logprob_case15(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_rm_radius,
clv_rm_grid,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
planet_R = theta[-3-i_j]
""" computing interpolated model spectrum
"""
clv_model = interpolate2d_grid_nocheck(planet_R, clv_rm_radius, clv_rm_grid)
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[-4-i_j]
lines_array[3, ii] = 0.000
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, planet_R, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': planet_R,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 20: more than one spectral lines, no Rp dependance, all line parameters are free and independent """
def logprob_case20(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_model,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[i_pams]
i_pams += 1
lines_array[3, ii] = theta[i_pams]
i_pams += 1
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, 1.0000, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': 1.00000,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 21: more than one spectral lines, no Rp dependance, all lines are affected by the same wind """
def logprob_case21(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_model,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[i_pams]
i_pams += 1
lines_array[3, ii] = theta[-3-i_j]
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, 1.0000, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': 1.000000,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 22: more than one spectral lines, no Rp dependance, all lines have same FWHM """
def logprob_case22(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_model,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[-3-i_j]
lines_array[3, ii] = theta[i_pams]
i_pams += 1
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, 1.0000, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': 1.000000,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 23: more than one spectral lines, no Rp dependance, all lines are affected by the same wind and have same FWHM """
def logprob_case23(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_model,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[-4-i_j]
lines_array[3, ii] = theta[-3-i_j]
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, 1.0000, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': 1.000000,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 24: more than one spectral lines, no Rp dependance, no winds """
def logprob_case24(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_model,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[i_pams]
i_pams += 1
lines_array[3, ii] = 0.000
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, 1.0000, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': 1.000000,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
""" case 25: more than one spectral lines, no Rp dependance, no winds, all lines have same FWHM """
def logprob_case25(theta,
boundaries,
wave_meshgrid,
transmission_spec,
transmission_spec_err,
clv_model,
planet_RVsinusoid,
lines_center,
jitter_index,
priors_dict,
return_models=False
):
""" check the boundaries """
if ((theta < boundaries[:,0]) | (theta > boundaries[:,1])).any():
return -np.inf
""" unfolding jitter parameters """
if jitter_index is None:
i_j = -1
jitter_array = 0.
jitter_pams = 0.
elif len(jitter_index) > 0:
jitter_array = jitter_index * 0.
n_jitter = np.int(np.amax(jitter_index) + 1)
jitter_pams = np.empty(n_jitter)
for i_j in range(0, n_jitter):
sel = (jitter_index == i_j)
jitter_array[sel] = theta[-1-i_j]
jitter_pams[i_j] = theta[-1-i_j]
else:
i_j = 0
jitter_array = wave_meshgrid*0. + theta[-1]
jitter_pams = theta[-1]
""" the second-last value after jitter is always the semi-amplitude of the planet
the third-last value after jitter is always the planetary radius, if included in the model
"""
planet_K = theta[-2-i_j]
""" line_array is always structured in this way:
line_array = np.empty(n_pams, n_lines)
line_array[0, 0] = wavelength
line_array[1, 0] = contrast
line_array[2, 0] = FWHM
line_array[3, 0] = winds
"""
n_lines = len(lines_center)
lines_array = np.empty([4, n_lines])
i_pams = 0
for ii in range(0, n_lines):
lines_array[0, ii] = lines_center[ii]
lines_array[1, ii] = theta[i_pams]
i_pams += 1
lines_array[2, ii] = theta[-3-i_j]
lines_array[3, ii] = 0.000
lines_model = compute_multiple_lines(wave_meshgrid, planet_RVsinusoid, planet_K, lines_array, n_lines)
flux_res = transmission_spec / clv_model / lines_model - 1.
ferr_res = transmission_spec_err / clv_model / lines_model
if return_models:
return lines_model, clv_model, lines_array, planet_K, 1.0000, jitter_pams
var_dict = {
'planet_K': planet_K,
'planet_R': 1.000000,
'FWHM': lines_array[2,:],
'winds': lines_array[3,:],
}
log_prior = 0.
for key_name, key_vals in priors_dict.items():
for var_value in np.atleast_1d(var_dict[key_name]):
log_prior += (-(var_value - key_vals[0]) ** 2 / (2 * key_vals[1] ** 2) - 0.5 * np.log(2*np.pi) - np.log(key_vals[1]))
""" the last value is always the jitter """
env = 1.0 / (jitter_array ** 2.0 + ferr_res ** 2.0)
return log_prior -0.5 * (np.size(flux_res) * np.log(2 * np.pi) +
np.sum((flux_res) ** 2 * env - np.log(env)))
| 48,048 | 33.345247 | 135 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/kepler_exo.py | import numpy as np
from scipy.optimize import fsolve
import SLOPpy.subroutines.constants as constants
# +
# NAME:
# exofast_keplereq
# PURPOSE:
# Solve Kepler's Equation
# DESCRIPTION:
# Solve Kepler's Equation. Method by S. Mikkola (1987) Celestial
# Mechanics, 40 , 329-334.
# result from Mikkola then used as starting value for
# Newton-Raphson iteration to extend the applicability of this
# function to higher eccentricities
__all__ = ["kepler_K1",
"kepler_RV",
"kepler_Tc2phase_Tref",
"kepler_phase2Tc_Tref",
"get_planet_mass",
"kepler_true_anomaly_orbital_distance"]
def kepler_E(M_in, ec):
E = 0.0
E0 = 0.0
M = np.atleast_1d(M_in)
ecc = np.asarray(ec, dtype=np.double)
eccanom = np.zeros(np.size(M), dtype=np.double)
for ii in range(0, np.size(M)):
# -np.pi < M < np.pi
mx = M[ii]
if mx > np.pi:
mx = mx % (2. * np.pi)
if mx > np.pi:
mx = mx - (2. * np.pi)
if mx <= -np.pi:
mx = mx % (2. * np.pi)
if mx < -np.pi:
mx += (2. * np.pi)
if ecc < 1e-10:
eccanom[ii] = mx
else:
# equation 9a
aux = 4.0 * ecc + 0.50
alpha = (1.0 - ecc) / aux
beta = mx / (2.0 * aux)
# equation 9b
## the actual equation 9b is much much slower, but gives the same
## answer (probably because more refinement necessary)
aux = np.sqrt(beta * beta + alpha * alpha * alpha)
z = beta + aux
if z < 0.:
z = beta - aux
z = z ** (1. / 3.)
if abs(z) < 1e-8:
s0 = 0.
else:
s0 = z - alpha / z
s1 = s0 - (0.078 * s0 ** 5) / ((1.) + ecc)
e0 = mx + ecc * (3. * s1 - 4. * s1 ** 3.)
se0 = np.sin(e0)
ce0 = np.cos(e0)
f = e0 - ecc * se0 - mx
f1 = (1.0) - ecc * ce0
f2 = ecc * se0
f3 = ecc * ce0
u1 = -f / f1
u2 = -f / (f1 + 0.5 * f2 * u1)
u3 = -f / (f1 + 0.5 * f2 * u2 + (1. / 6.) * f3 * u2 ** 2.)
u4 = -f / (f1 + 0.5 * f2 * u3 + (1. / 6.) * f3 * u3 ** 2 - (1. / 24.) * f2 * u3 ** 3)
ecan_tmp = e0 + u4
if ecan_tmp >= 2. * np.pi:
ecan_tmp = ecan_tmp - 2. * np.pi
if ecan_tmp < 0.:
ecan_tmp = ecan_tmp + 2. * np.pi
## Now get more precise solution using Newton Raphson method
## for those times when the Kepler equation is not yet solved
## to better than 1e-10
## (modification J. Wilms)
if mx < 0.:
mx = mx + 2. * np.pi
## calculate the differences
diff = abs(ecan_tmp - ecc * np.sin(ecan_tmp) - mx)
if diff > abs(diff - 2 * np.pi):
diff = abs(diff - 2 * np.pi)
thresh1 = 1e-8
thresh2 = 10000
countt = 0
while (diff > thresh1 and countt < thresh2):
## E-e sinE-M
fe = (ecan_tmp - ecc * np.sin(ecan_tmp) - mx) % (2 * np.pi)
## f' = 1-e*cosE
fs = (1. - ecc * np.cos(ecan_tmp)) % (2 * np.pi)
oldval = ecan_tmp
ecan_tmp = (oldval - fe / fs)
diff = abs(oldval - ecan_tmp)
countt += 1
## range reduction
if ecan_tmp >= 2. * np.pi:
ecan_tmp = ecan_tmp % 2. * np.pi
if ecan_tmp < 0.:
ecan_tmp = ecan_tmp % 2. * np.pi + 2. * np.pi
eccanom[ii] = ecan_tmp
return eccanom
def kepler_K1(m_star1, m_star2, period, i, e0):
""" Computes the radial velocity semi-amplitude of the primary star
:param m_star1: mass of the primary, in Solar mass units
:param m_star2: mass of the secondary/planet, in Solar mass units
:param period: orbital period of star2, in [d]
:param i: orbital inclination of star2 wrt the observer (0=face on), in [deg]
:param e0: orbital eccentricity of star2
:return: k1, the observed radial velocity semi-amplitude of the primary, in [m s^-1]
"""
# period must be given in days, conversion factor to seconds are included in the routine
# constants.Gsi: Gravitational constant in SI system [m^3 kg^-1 s^-2]
# constants.Msun: Sun mass in SI system [kg]
# 86400. / constants.d2s: seconds in a day
return (2. * np.pi * constants.Gsi * constants.Msun / 86400.) ** (1. / 3.) \
* (np.sin(i * np.pi / 180.0) / np.sqrt(1. - e0 ** 2.)) * period ** (-1. / 3.) \
* (m_star2 * (m_star1 + m_star2) ** (-2. / 3.))
def kepler_RV(BJD, TPeri, Period, gamma, K, e0, omega0):
# omega = argument of pericenter
# Mean Anomaly
#
MeAn = 2. * np.pi * (1. + (BJD - TPeri) / Period % 1.)
if abs(e0) < 1e-3:
TrAn = np.asarray(MeAn, dtype=np.double)
e = np.asarray(0., dtype=np.double)
omega = np.asarray(0., dtype=np.double)
else:
if e0 < 0.:
e = np.asarray(-e0, dtype=np.double)
omega = np.asarray(omega0, dtype=np.double) + np.pi
else:
e = np.asarray(e0, dtype=np.double)
omega = np.asarray(omega0, dtype=np.double)
# Eccentric Anomaly
EccAn = kepler_E(MeAn, e)
TrAn = 2. * np.arctan(np.sqrt((1.0 + e) / (1.0 - e)) * np.tan(EccAn / 2.0))
rv = K * (np.cos(TrAn + omega) + e * np.cos(omega)) + gamma
return rv
def kepler_RV_T0P(BJD0, phase, Period, K, e0, omega0):
# BJD0 is given as BJD-T0, where T0 is arbitrarily defined by the user
# Tperi_ is substituted by _phase_, which is the phase of the orbit where
# BJD0+T0+phase*Period = Tperi
# omega = argument of pericenter
#
omega = np.asarray(omega0, dtype=np.double)
e = np.asarray(e0, dtype=np.double)
MeAn = 2. * np.pi * (1. + ((BJD0 / Period) + (phase - omega0) / (2 * np.pi)) % 1.)
if abs(e0) < 1e-3:
TrAn = np.asarray(MeAn, dtype=np.double)
e = np.asarray(0., dtype=np.double)
else:
if e0 < 0.:
e = -1 * e
omega += np.pi
# Eccentric Anomaly
EccAn = kepler_E(MeAn, e)
TrAn = 2. * np.arctan(np.sqrt((1.0 + e) / (1.0 - e)) * np.tan(EccAn / 2.0))
rv = K * (np.cos(TrAn + omega) + e * np.cos(omega))
return rv
def kepler_true_anomaly_orbital_distance(BJD0, Tcent0, Period, e0, omega0, a_sm):
# BJD0 is given as BJD-T0, where T0 is arbitrarily defined by the user
# Tperi_ is substituted by _phase_, which is the phase of the orbit where
# BJD0+T0+phase*Period = Tperi
# omega = argument of pericenter
phase = kepler_Tc2phase_Tref(Period, Tcent0, e0, omega0)
omega = np.asarray(omega0, dtype=np.double)
e = np.asarray(e0, dtype=np.double)
MeAn = 2. * np.pi * (1. + ((BJD0 / Period) + (phase - omega0) / (2 * np.pi)) % 1.)
if abs(e0) < 1e-3:
TrAn = np.asarray(MeAn, dtype=np.double)
e = np.asarray(0., dtype=np.double)
r_orb = a_sm
else:
if e0 < 0.:
e = -1 * e
omega += np.pi
# Eccentric Anomaly
EccAn = kepler_E(MeAn, e)
TrAn = 2. * np.arctan(np.sqrt((1.0 + e) / (1.0 - e)) * np.tan(EccAn / 2.0))
r_orb = a_sm * (1. - e ** 2) / (1. + e * np.cos(TrAn))
return TrAn, r_orb
def kepler_phase2Tc_Tref(Period, phase, e0, omega0):
# The closest Tcent after Tref is given back
TrAn = np.pi / 2 - omega0
EccAn = 2. * np.arctan(np.sqrt((1.0 - e0) / (1.0 + e0)) * np.tan(TrAn / 2.0))
MeAn = EccAn - e0 * np.sin(EccAn)
return (MeAn - phase + omega0) / (2 * np.pi) * Period % Period
def kepler_Tc2phase_Tref(Period, Tcent, e0, omega0):
# The closest Tcent after Tref is given back
TrAn = np.pi / 2 - omega0
EccAn = 2. * np.arctan(np.sqrt((1.0 - e0) / (1.0 + e0)) * np.tan(TrAn / 2.0))
MeAn = EccAn - e0 * np.sin(EccAn)
return (omega0 + MeAn - Tcent / Period * 2 * np.pi) % (2 * np.pi)
def f_get_mass(m_star2, m_star1, period, e0, k1):
""" Computes the difference between the input radial velocity semi-amplitude
of the primary star and the value corresponding to the provided orbital parameters.
Supporting function to get_planet_mass subroutine
:param m_star2: mass of the secondary/planet, in Solar mass units
:param m_star1: mass of the primary, in Solar mass units
:param period: orbital period of star2, in [d]
:param e0: orbital eccentricity of star2
:param k1: observed RV semi-amplitude of the primary
:return: the difference between the observed and theoretical RV semi-amplitude of the primary, in [m s^-1]
"""
# period must be given in days, conversion factor to seconds are included in the routine
# constants.Gsi: Gravitational constant in SI system [m^3 kg^-1 s^-2]
# constants.Msun: Sun mass in SI system [kg]
# 86400. / constants.d2s: seconds in a day
# M_star1, M_star2 in solar masses
# P in days -> Period is converted in seconds in the routine
# inclination assumed to be 90 degrees
# Gravitational constant in SI system [in m^3 kg^-1 s^-2]
# output in m/s
return k1 \
- ((2. * np.pi * constants.Gsi * constants.Msun / 86400.0) ** (1. / 3.)
* (1. / np.sqrt(1. - e0 ** 2.))
* period ** (-1. / 3.)
* (m_star2 * (m_star1 + m_star2) ** (-2. / 3.)))
def get_approximate_mass(period, k1, e0, m_star1):
""" Return the approximate mass of the planet in Solar mass units, in the assumption that M_planet << M_star
:param period: orbital period of star2, in [d]
:param k1: observed RV semi-amplitude of the primary
:param e0: orbital eccentricity of star2
:param m_star1: mass of the primary, in Solar mass units
:return: mass of the planet, in Solar mass units
"""
return k1 / ((2. * np.pi * constants.Gsi * constants.Msun / 86400.0) ** (1. / 3.)
* (1. / np.sqrt(1. - e0 ** 2.))
* period ** (-1. / 3.)
* (m_star1 ** (-2. / 3.)))
def get_planet_mass(P, K, e, Mstar, approximation_limit=30.):
n = np.size(K)
if n == 1:
M_approx = min(get_approximate_mass(P, K, e, Mstar), 2*constants.Msear)
return fsolve(f_get_mass, M_approx, args=(Mstar, P, e, K))
M_approx = get_approximate_mass(P, K, e, Mstar)
if np.average(M_approx) > approximation_limit/constants.Msear:
print('Computing exact mass of the planet (average approximate mass larger than {0:3.1f} Me)'.format(approximation_limit))
M_init = np.average(M_approx)
for i in range(0, n):
M_approx[i] = fsolve(f_get_mass, np.average(M_init), args=(Mstar[i], P[i], e[i], K[i]))
else:
print('Computing planetary mass under the approximation M_planet << M_star (threshold at {0:3.1f} Me)'.format(approximation_limit))
return M_approx
| 11,085 | 34.993506 | 139 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/common.py | import sys
import os
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from matplotlib.colors import Normalize
import numpy as np
import scipy.stats as sci_stats
import scipy.optimize as sci_optimize
import scipy.interpolate as sci_int
from astropy.io import fits
import json
import warnings
import pygtc
import re
# Use ordered dictionaries for the observations
from collections import OrderedDict
"""
List of exceptions
"""
class MissingFileException(Exception):
pass
class MissingKeywordException(Exception):
pass
class OutOfRangeException(Exception):
pass
def difference_utc2tdb(jd):
# 2441317.5 1972-01-01T00:00:00 2272060800 10 42.184
# 2441499.5 1972-07-01T00:00:00 2287785600 11 43.184
# 2441683.5 1973-01-01T00:00:00 2303683200 12 44.184
# 2442048.5 1974-01-01T00:00:00 2335219200 13 45.184
# 2442413.5 1975-01-01T00:00:00 2366755200 14 46.184
# 2442778.5 1976-01-01T00:00:00 2398291200 15 47.184
# 2443144.5 1977-01-01T00:00:00 2429913600 16 48.184
# 2443509.5 1978-01-01T00:00:00 2461449600 17 49.184
# 2443874.5 1979-01-01T00:00:00 2492985600 18 50.184
# 2444239.5 1980-01-01T00:00:00 2524521600 19 51.184
# 2444786.5 1981-07-01T00:00:00 2571782400 20 52.184
# 2445151.5 1982-07-01T00:00:00 2603318400 21 53.184
# 2445516.5 1983-07-01T00:00:00 2634854400 22 54.184
# 2446247.5 1985-07-01T00:00:00 2698012800 23 55.184
# 2447161.5 1988-01-01T00:00:00 2776982400 24 56.184
# 2447892.5 1990-01-01T00:00:00 2840140800 25 57.184
# 2448257.5 1991-01-01T00:00:00 2871676800 26 58.184
# 2448804.5 1992-07-01T00:00:00 2918937600 27 59.184
# 2449169.5 1993-07-01T00:00:00 2950473600 28 60.184
# 2449534.5 1994-07-01T00:00:00 2982009600 29 61.184
# 2450083.5 1996-01-01T00:00:00 3029443200 30 62.184
# 2450630.5 1997-07-01T00:00:00 3076704000 31 63.184
# 2451179.5 1999-01-01T00:00:00 3124137600 32 64.184
# 2453736.5 2006-01-01T00:00:00 3345062400 33 65.184
# 2454832.5 2009-01-01T00:00:00 3439756800 34 66.184
# 2456109.5 2012-07-01T00:00:00 3550089600 35 67.184
# 2457204.5 2015-07-01T00:00:00 3644697600 36 68.184
# 2457754.5 2017-01-01T00:00:00 3692217600 37 69.184
jd_table = np.asarray([2441317.5, 2441499.5, 2441683.5, 2442048.5, 2442413.5, 2442778.5, 2443144.5, 2443509.5, 2443874.5, 2444239.5,
2444786.5, 2445151.5, 2445516.5, 2446247.5, 2447161.5, 2447892.5, 2448257.5, 2448804.5, 2449169.5, 2449534.5,
2450083.5, 2450630.5, 2451179.5, 2453736.5, 2454832.5, 2456109.5, 2457204.5, 2457754.5])
df_table = np.asarray([42.184, 43.184, 44.184, 45.184, 46.184, 47.184, 48.184, 49.184, 50.184, 51.184,
52.184, 53.184, 54.184, 55.184, 56.184, 57.184, 58.184, 59.184, 60.184, 61.184,
62.184, 63.184, 64.184, 65.184, 66.184, 67.184, 68.184, 69.184])/86400.
return df_table[(jd_table-jd<0)][-1] | 3,093 | 42.577465 | 136 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/math_functions.py | import numpy as np
from scipy.optimize import curve_fit
def interpolate1d_grid_nocheck(val, arr_1, arr_2):
# using enumerate() + next() to find index of
# first element just greater than 0.6
res = next(x for x, v in enumerate(arr_1) if v > val)
interp_out = (val-arr_1[res-1])/(arr_1[res]-arr_1[res-1])*(arr_2[res,:]-arr_2[res-1,:]) + arr_2[res-1,:]
return interp_out
def interpolate2d_grid_nocheck(val, arr_1, arr_2):
# using enumerate() + next() to find index of
# first element just greater than 0.6
res = next(x for x, v in enumerate(arr_1) if v > val)
interp_out = (val-arr_1[res-1])/(arr_1[res]-arr_1[res-1])*(arr_2[res,:,:]-arr_2[res-1,:,:]) + arr_2[res-1,:,:]
return interp_out
def first_derivative(x_arr, y_arr):
n_points = len(x_arr)
derivative = np.zeros(n_points, dtype=np.double)
derivative[1:-1] = (y_arr[2:] - y_arr[:-2]) / (x_arr[2:] - x_arr[:-2])
derivative[0] = derivative[1]
derivative[-1] = derivative[-2]
return derivative
| 1,020 | 35.464286 | 116 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/eso_skycalc_cli.py | from __future__ import print_function, division
from SLOPpy.subroutines.common import *
from SLOPpy.subroutines.io_subroutines import get_filename
def get_eso_sckycalc_harps(obs_ref, wave_range, ra, dec, night, output):
# https://www.eso.org/observing/etc/doc/skycalc/helpskycalccli.html
time_tag = obs_ref[6:25]
wave_range_nm = wave_range/10.
wdelta = 0.00075
input_filename = get_filename('skycalc_input_' + repr(wave_range_nm[0]) + '_' + repr(wave_range_nm[1]),
output, night, extension=".JSON")
alman_filename = get_filename('skycalc_alman_' + time_tag + '_' + repr(wave_range_nm[1]),
output, night, extension=".JSON")
output_filename = get_filename('skycalc_output_' + repr(wave_range_nm[0]) + '_' + repr(wave_range_nm[1]) + time_tag,
output, night, extension=".fits")
if os.path.isfile(output_filename):
skycalc_hdu = fits.open(night + '.fits')
data = skycalc_hdu[1].data
skycalc_hdu.close()
return data.field(0) * 10., \
np.ones(len(data.field(0))) * wdelta, \
data.field(14), \
np.ones(len(data.field(0)))
if not os.path.isfile(input_filename):
input_pams = {
"pwv_mode": "pwv",
"incl_moon": "N", # No moon contamination
"incl_starlight": "N", # No starlight
"incl_zodiacal": "N", # No zodiacal light
"incl_loweratm": "Y",
"incl_upperatm": "Y",
"incl_airglow": "N", # No airglow
"incl_therm": "N",
"vacair": "air", # compute in the air
"wmin": wave_range[0],
"wmax": wave_range[1],
"wgrid_mode": "fixed_wavelength_step",
"wdelta": wdelta,
"wres": 20000,
"lsf_type": "Gaussian", # Gaussian lsf
"lsf_gauss_fwhm": 5.5,
}
with open(input_filename, 'w') as outfile:
json.dump(input_pams, outfile)
if not os.path.isfile(alman_filename):
almanac_pams = {
"ra": ra,
"dec": dec,
"date": time_tag,
"observatory": "lasilla"
}
with open(alman_filename, 'w') as outfile:
json.dump(almanac_pams, outfile)
os.system('skycalc_cli'
+ ' --in ' + input_filename
+ ' --alm ' + alman_filename
+ ' --out ' + output_filename)
skycalc_hdu = fits.open(output_filename)
data = skycalc_hdu[1].data
skycalc_hdu.close()
return data.field(0) * 10., \
np.ones(len(data.field(0))) * wdelta, \
data.field(14), \
np.ones(len(data.field(0)))
| 2,761 | 33.098765 | 120 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/io_subroutines.py | from __future__ import print_function, division
import numpy as np
try:
import cPickle as pickle
except:
import pickle
import os
from os import path
import oyaml as yaml
from SLOPpy.subroutines.object_parameters import StarParameters, PlanetParameters
from SLOPpy.config_default import *
__all__ = ["save_to_cpickle",
"load_from_cpickle",
"delete_cpickle",
"check_existence_cpickle",
"get_filename",
"load_yaml_file",
"pars_input",
"yaml_parser",
"get_filelists",
"from_config_get_nights",
"from_config_get_instrument",
"from_config_get_system",
"from_config_get_pipeline",
"from_config_get_planet",
"from_config_get_star",
"from_config_get_clv_rm",
"from_config_refraction",
"from_config_get_interstellar_lines",
"from_config_get_transmission_lightcurve",
"from_config_get_transmission",
"from_config_get_molecfit",
"from_config_get_transmission_mcmc",
"from_config_get_spectral_lines",
"from_config_get_interactive_plots",
"from_config_get_pca_parameters",
"from_config_get_fullspectrum_parameters"]
accepted_extensions = ['.yaml', '.yml', '.conf', '.config', '.input', ]
def save_to_cpickle(fname, dictionary, output, night='', lines='', it_string=''):
output_file = get_filename(fname, output, night, lines, it_string)
pickle.dump(dictionary, open(output_file, "wb"))
def load_from_cpickle(fname, output, night='', lines='', it_string=''):
output_file = get_filename(fname, output, night, lines, it_string)
return pickle.load(open(output_file, "rb"))
def delete_cpickle(fname, output, night='', lines='', it_string=''):
output_file = get_filename(fname, output, night, lines, it_string)
os.remove(output_file)
def check_existence_cpickle(fname, output, night='', lines='', it_string=''):
output_file = get_filename(fname, output, night, lines, it_string)
return path.isfile(output_file)
def get_filename(fname, output, night, lines='', it_string='', extension=".p"):
str_lines = output
for str_input in [lines, night, fname, it_string]:
if len(str_input) > 0:
str_lines += '_' + str_input
return str_lines + extension
if lines == '':
if night == '':
return output + '_' + fname + extension
else:
return output + '_' + night + "_" + fname + extension
else:
if night == '':
return output + '_' + lines + '_' + fname + extension
else:
return output + '_' + lines + '_' + night + "_" + fname + extension
def load_yaml_file(file_conf):
# shortcut for jupyter notebook plots
config_in = yaml_parser(file_conf)
return pars_input(config_in)
def yaml_parser(file_conf):
stream = open(file_conf, 'r')
try:
config_in = yaml.load(stream, Loader=yaml.FullLoader)
except AttributeError:
config_in = yaml.load(stream)
print(' Consider updating YAML')
except:
print(' Some error happened while reading the configuration file')
quit()
if 'output' not in config_in:
for extension in accepted_extensions:
if file_conf.find(extension) > 0:
output_name = file_conf.replace(extension, "")
continue
config_in['output'] = output_name
return config_in
def pars_input(config_in):
config_in['system'] = {}
if 'settings' not in config_in:
config_in['settings'] = config_default['settings'].copy()
else:
for key, key_val in config_default['settings'].items():
if key not in config_in['settings']:
config_in['settings'][key] = key_val
for instrument in config_in['instruments']:
for key, key_val in config_default['instruments'].items():
if key not in config_in['instruments'][instrument]:
config_in['instruments'][instrument][key] = key_val
""" create the refraction dictionary if not listed under the instrument section"""
if 'refraction' not in config_in['instruments'][instrument]:
config_in['instruments'][instrument]['refraction'] = {}
""" when the refractions parameters are not explicitely specified in this section, they are either inherited
from the top level dictionary or copied from the default dictionary """
for key, key_val in config_default['refraction'].items():
if key not in config_in['instruments'][instrument]['refraction']:
try:
config_in['instruments'][instrument]['refraction'][key] = config_in['refraction'][key]
except:
config_in['instruments'][instrument]['refraction'][key] = key_val
if 'master-out' not in config_in:
config_in['master-out'] = config_default['master-out'].copy()
else:
for key, key_val in config_default['master-out'].items():
if key not in config_in['master-out']:
config_in['master-out'][key] = key_val
if 'shared' not in config_in['instruments']:
if 'wavelength_step' not in config_in['master-out']:
config_in['instruments']['shared'] = {
'wavelength_step': 0.0100
}
else:
config_in['instruments']['shared'] = {
'wavelength_step': config_in['master-out']['wavelength_step']
}
if 'molecfit' not in config_in:
config_in['molecfit'] = config_default['molecfit'].copy()
else:
for key, key_val in config_default['molecfit'].items():
if key not in config_in['molecfit']:
config_in['molecfit'][key] = key_val
if 'molecfit' not in config_in:
config_in['molecfit'] = config_default['molecfit'].copy()
else:
for key, key_val in config_default['molecfit'].items():
if key not in config_in['molecfit']:
config_in['molecfit'][key] = key_val
for night in config_in['nights']:
instrument = config_in['nights'][night]['instrument']
""" keywords are inherited from the instrument dictionary, when not explicitely specified"""
for key in copy_from_instrument:
if key not in config_in['nights'][night]:
config_in['nights'][night][key] = config_in['instruments'][instrument][key]
if 'refraction' not in config_in['nights'][night]:
config_in['nights'][night]['refraction'] = config_in['instruments'][instrument]['refraction'].copy()
else:
for key, key_val in config_in['instruments'][instrument]['refraction'].items():
if key not in config_in['nights'][night]['refraction']:
config_in['nights'][night]['refraction'][key] = key_val
#if 'master_out_method' not in config_in['nights'][night]:
# config_in['nights'][night]['master_out_method'] = None
if config_in['nights'][night]['use_analytical_rvs'] and 'RV_semiamplitude' not in config_in['star']:
print(" Missing RV_semiamplitude keyword for the star, the value will be computed from the RVs ")
config_in['nights'][night]['use_analytical_rvs'] = False
""" OLD approach to compute the RV of the planet, left here because it may be useful in the future
try:
_dict_star = {'mass': None, 'radius': None, 'gamma': None}
for key in config_in['star']:
_dict_star[key] = config_in['star'][key]
config_in['system']['star'] = StarParameters(
mass=_dict_star['mass'],
radius=_dict_star['radius'])
config_in['system']['common'] = {'degree': False, 'n_planets': 0, 'planets_list': []}
for key in config_in['planets']['common']:
config_in['system']['common'][key] = config_in['planets']['common'][key]
for key in config_in['planets']:
if key not in ['common']:
config_in['system'][key] = PlanetParameters()
config_in['system'][key].put_reference_epoch(config_in['system']['common']['Tref'])
config_in['system'][key].put_RVparameters(
P=config_in['planets'][key]['P'],
K=config_in['planets'][key]['K'],
f=config_in['planets'][key]['Tc'],
e=config_in['planets'][key]['e'],
o=config_in['planets'][key]['o'],
degree=config_in['system']['common']['degree'])
config_in['system'][key].put_RVplanet(config_in['planets'][key]['K_planet'])
config_in['system'][key].put_star(config_in['system']['star'])
config_in['system']['common']['n_planets'] += 1
config_in['system']['common']['planets_list'].extend([key])
except:
pass
"""
return config_in
def get_filelists(night_selected):
"""
:param night_selected: usually the night_dict[night] dictionary from the main program
:return:
"""
""" List files are supposed to be in the same directory of the yaml file,
NOT on the archive directory: in this way it is possible to try different
combinations of nights and files without making a mess in the archive """
files_list = np.atleast_1d(np.genfromtxt(night_selected['all'], dtype=str))
try:
files_transit_out = np.atleast_1d(np.genfromtxt(night_selected['out_transit'], dtype=str))
files_transit_in = np.atleast_1d(np.genfromtxt(night_selected['in_transit'], dtype=str))
files_transit_full = np.atleast_1d(np.genfromtxt(night_selected['full_transit'], dtype=str))
except (FileNotFoundError, IOError):
files_transit_out = None
files_transit_in = None
files_transit_full = None
try:
files_telluric = np.atleast_1d(np.genfromtxt(night_selected['telluric_list'], dtype=str))
except (FileNotFoundError, IOError):
files_telluric = None
if night_selected['telluric'] is not None:
files_star_telluric = np.atleast_1d(np.genfromtxt(night_selected['star_telluric'], dtype=str))
else:
files_star_telluric = None
return files_list, files_transit_out, files_transit_in, files_transit_full, files_telluric, files_star_telluric
def from_config_get_nights(config_in):
"""
This subroutine creates a shortcut to the night list
:param config_in:
:return: dictionary
"""
return config_in['nights']
def from_config_get_instrument(config_in):
"""
This subroutine creates a shortcut to the instrument list
:param config_in:
:return: dictionary
"""
return config_in['instruments']
def from_config_refraction(config_in, night):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
return config_in['nights'][night]['refraction']
def from_config_get_transmission_lightcurve(config_in):
"""
This subroutine creates a shortcut to the transmission_lightcurve dictionary
:param config_in:
:return: dictionary
"""
try:
return config_in['transmission_lightcurve']
except:
return config_in['transmission']
def from_config_get_transmission(config_in):
"""
This subroutine creates a shortcut to the transmission_lightcurve dictionary
:param config_in:
:return: dictionary
"""
try:
return config_in['transmission']
except:
return config_in['transmission_lightcurve']
def from_config_get_system(config_in):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
return config_in['system']
def from_config_get_pipeline(config_in):
"""
This subroutine creates a shortcut to the pipeline parameters dictionary
:param config_in:
:return: dictionary
"""
return config_in['pipeline']
def from_config_get_planet(config_in):
"""
This subroutine creates a shortcut to the planet dictionary
:param config_in:
:return: dictionary
"""
return config_in['planet']
def from_config_get_star(config_in):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
return config_in['star']
def from_config_get_clv_rm(config_in):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
return config_in['CLV_RM_correction']
def from_config_get_interstellar_lines(config_in):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
try:
return config_in['interstellar_lines']
except:
return None
def from_config_get_molecfit(config_in):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
try:
return config_in['molecfit']
except:
return None
def from_config_get_transmission_mcmc(config_in):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
try:
return config_in['transmission_mcmc']
except:
return None
def from_config_get_spectral_lines(config_in):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
try:
return config_in['spectral_lines']
except:
return None
def from_config_get_interactive_plots(config_in):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
try:
return config_in['interactive_plots']
except:
return False
def from_config_get_pca_parameters(config_in):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
try:
return config_in['pca_parameters']
except:
return {}
def from_config_get_fullspectrum_parameters(config_in):
"""
This subroutine creates a shortcut to the system dictionary
:param config_in:
:return: dictionary
"""
try:
return config_in['full_spectrum']
except:
return {} | 14,538 | 31.525727 | 116 | py |
SLOPpy | SLOPpy-main/SLOPpy/subroutines/spectral_subroutines.py | from __future__ import print_function, division
import numpy as np
from astropy.io import fits
from SLOPpy.subroutines.rebin_subroutines import *
from SLOPpy.subroutines.constants import *
""" Empty module left here for back-compatibility, as almost every module
is importing this one rather than the rebin_subroutines
Everything has been moved into the instruments folder for
easier handling of different instruments
"""
#from SLOPpy.instruments.HARPN_DRSv3 import *
#from SLOPpy.instruments.HARPS_DRSv3 import *
#from SLOPpy.instruments.PEPSI_reduced import *
#def get_calib_data(instrument, archive, file_rad, fiber='A', order_selection=None):
# if instrument =='HARPS-N':
# return HARPN_DRSv3_get_calib_data(archive, file_rad, fiber=fiber, order_selection=order_selection)
# elif instrument =='HARPS':
# return HARPS_DRSv3_get_calib_data(archive, file_rad, fiber=fiber, order_selection=order_selection)
# elif instrument =='PEPSI':
# return PEPSI_get_calib_data(archive, file_rad, fiber=fiber, order_selection=order_selection)
# else:
# raise ValueError("Instrument not supported")
#def get_input_data(instrument, archive, file_rad, mask, fiber='A', skip_ccf=None, skip_s1d=True, order_selection=None):
# if instrument =='HARPS-N':
# return HARPN_DRSv3_get_input_data(archive, file_rad, mask, fiber=fiber, skip_ccf=skip_ccf, skip_s1d=skip_s1d, order_selection=order_selection)
# elif instrument =='HARPS':
# return HARPS_DRSv3_get_input_data(archive, file_rad, mask, fiber=fiber, skip_ccf=skip_ccf, skip_s1d=skip_s1d, order_selection=order_selection)
# elif instrument =='PEPSI':
# return PEPSI_get_input_data(archive, file_rad, mask, fiber=fiber, skip_ccf=skip_ccf, skip_s1d=skip_s1d, order_selection=order_selection)
# else:
# raise ValueError("Instrument not supported")
| 1,883 | 46.1 | 151 | py |
SLOPpy | SLOPpy-main/SLOPpy/instruments/PEPSI_reduced.py | import numpy as np
from astropy.io import fits
from SLOPpy.subroutines.rebin_subroutines import *
from SLOPpy.subroutines import constants
from SLOPpy.subroutines.common import *
from astropy.coordinates import SkyCoord
from astropy import units as u
def PEPSI_get_instrument_keywords():
properties = {
# DRS-specific keywords
'time_stamp': 'mid_exposure',
'time_standard': 'TDB',
# Observatory-specific keywords
'geoelev': 3221.0, # meters
'longitude' : -7.325938, # Tel geo longitude (+=East) (deg)
'latitude' : 32.7013083, # Tel geo latitute (+=North) (deg)
# Instrument-specific keyword
# The following are the input values used by Molecfit, taken from Allart+2017
# for convenience, all the default values are listed here instead of being scattered into the code
'molecfit': {
'default_wstep': 0.01000, # default wavelength step size for the input stellar spectra
'molecules': ['H2O', 'O2'],
'ftol': 1e-9,
'xtol': 1e-9,
'cont_const': 1.0, # a0, This value differs from Allart+2017 since we are using normalized spectra
'cont_n': 3, # n_cont, Degree of coefficients for continuum fit
'wlc_n': 2, # n_lambda, Polynomial degree of the refined wavelength solution
'wlc_const': 0.0, # b0, Initial constant term for wavelength correction (shift relative to half wavelength range)
'res_gauss': 4.8, # omega_Gaussian, Initial value for FWHM of Gaussian in pixels
'kernfac': 15, #kernel_size, Size of Gaussian/Lorentzian/Voigtian kernel in FWHM
'slitwidth': 1.00, # in arcseconds
'pixelscale': 0.16,
}
}
return properties
def PEPSI_get_calib_data(archive, file_rad, fiber='A', order_selection=None):
""" There are no calibration files from PEPSI, so this subroutine will
provide the dictionary required by SLOPpy to properly work
"fiber", "order_selection" variables are kept for consistency with the
main code, but they do not have applicability
"""
calib_dict = {}
# file from which to extract the relevant keywords - it could be a science
# frame as well
pepsi_fits = fits.open(archive+'/'+file_rad)
data_fits = pepsi_fits[1].data['Fun']
# Blaze file - if only blaze-corrected files are available, set it equal to 1.
calib_dict['n_pixels'] = len(data_fits)
calib_dict['n_orders'] = 1
calib_dict['blaze'] = np.ones((calib_dict['n_orders'], calib_dict['n_pixels']))
return calib_dict
def PEPSI_get_input_data(archive, file_rad, mask, fiber='A', skip_ccf=None, skip_s1d=True, order_selection=None):
"""_summary_
Returns:
_type_: _description_
"""
""" PEPSI delivers calibrated, rebinned spectra only in a specific format
so many entry in the dictionary will be empty.
Given the simplicity of the format, this subroutines can be used as
template for other instruments
"fiber", "skip_ccf", "skip_s1d", "order_selection" variables are kept
for consistency with the main code, but they do not have applicability
"""
input_dict = {'mask': mask, 'header':{}}
input_s1d = {'header':{}}
properties = PEPSI_get_instrument_keywords()
pepsi_fits = fits.open(archive+'/'+file_rad)
input_dict['header']['e2ds'] = pepsi_fits[0].header
#Arg = file_fits[1].data['Arg']
#Fun = file_fits[1].data['Fun']
#Var = file_fits[1].data['Var']
#Mask = file_fits[1].data['Mask']
input_dict['n_pixels'] = pepsi_fits[1].header['NAXIS2']
input_dict['n_orders'] = 1
# Not sure if these keywords are required anywhere
#input_dict['DPR_CATG'] = 'SCIENCE'
#input_dict['DPR_TYPE'] = ??
input_dict['BERV'] = pepsi_fits[0].header['SSBVEL'] / 1000. # in km/s
input_dict['RVC'] = pepsi_fits[0].header['RADVEL'] / 1000.
# RV of the star, it must be provided but it can be bypassed
input_dict['EXPTIME'] = pepsi_fits[0].header['EXPTIME']
# BJD provided at midexposure ,no need to check for it
input_dict['BJD'] = pepsi_fits[0].header['JD-TDB']
input_dict['MJD'] = pepsi_fits[0].header['JD-OBS'] - constants.MJD
input_dict['AIRMASS'] = pepsi_fits[0].header['AIRMASS']
input_dict['UTC'] = (input_dict['MJD'] - int(input_dict['MJD'])) * 86400.
input_dict['HUMIDITY'] = pepsi_fits[0].header['LBTH'] # Relative humidity in % for GEOELEV.
input_dict['PRESSURE'] = pepsi_fits[0].header['LBTP']
input_dict['TEMPERATURE_EN'] = pepsi_fits[0].header['LBTT'] #Ambient temperature in C for GEOELEV
input_dict['TEMPERATURE_M1'] = pepsi_fits[0].header['LBTT'] #Temperature of primary mirror M1 in C (for emission spectra only)
input_dict['ELEVATION'] = np.arcsin(1./input_dict['AIRMASS']) * (180./np.pi)
input_dict['GEOELEV'] = properties['geoelev']
input_dict['GEOLONG'] = properties['longitude']
input_dict['GEOLAT'] = properties['latitude']
input_dict['molecfit'] = properties['molecfit']
skycoords = c = SkyCoord(pepsi_fits[0].header['RA'], pepsi_fits[0].header['DEC'], unit=(u.hourangle, u.deg))
input_dict['RA'] = c.ra.degree
input_dict['DEC'] = c.dec.degree
# Not sure if thes evalies are required, it may be possible
#input_dict['BLAZE_file'] = None
#input_dict['CCD_SIGDET'] = None
#input_dict['CCD_GAIN'] = None
# getting data
"""
NOTE: PEPSI provides 1D spectra in the stellar reference frame,
but SLOPpy requires 2D spectra (order by order) in the observer reference
frame. Thus:
1) we shift the wavelength from the stellar to the observer reference frame
2) we transform the array into (1,n_pixels) shaped arrays
An empty array as required by SLOPpy would have the shape
np.empty([n_orders, n_pixels])
"""
wave_stellar = pepsi_fits[1].data['Arg']
rvshift = pepsi_fits[0].header['SSTVEL'] / 1000.
input_dict['wave_size'] = pepsi_fits[1].header['NAXIS2']
input_dict['wave'] = np.reshape(shift_wavelength_array(wave_stellar, rvshift), (1, input_dict['wave_size']))
input_dict['e2ds'] = np.reshape(pepsi_fits[1].data['Fun'], (1, input_dict['wave_size']))
input_dict['e2ds_err'] = np.reshape(pepsi_fits[1].data['Var'], (1, input_dict['wave_size']))
""" PEPSI spectra are normalized to unity, but SLOPpy is expecting to have spectra in absolute counts
absolute counts mean that a larger step size will have a larger number of counts given the same
flux density at a specific wavelength.
PEPSI spectra have been resampled on a non-linear scale and than normalized, so not taking into account
the bin (or step) size would introduce a deformation during the rebinning phase
After several tests, I am forced to introduce a flag to force non-preservation of flux at every
rebinning step across the code
"""
input_dict['absolute_flux'] = False
input_dict['step'] = np.zeros_like(input_dict['wave'])
input_dict['step'][0,1:-1] = (input_dict['wave'][0,2:] - input_dict['wave'][0,:-2])/2.
input_dict['step'][0,0] = input_dict['step'][0,1]
input_dict['step'][0,-1] = input_dict['step'][0,-2]
# order selection is always equal the the first - and unique - order
input_dict['orders'] = [0]
input_dict['absolute_flux'] = False
pepsi_fits.close()
return input_dict,input_s1d
| 7,474 | 39.188172 | 131 | py |
SLOPpy | SLOPpy-main/SLOPpy/instruments/HARPS_DRSv3.py | from __future__ import print_function, division
from SLOPpy.instruments.common_DRSv3 import *
def HARPSv3_get_instrument_keywords():
""" These definitions applt to DRS version 3.x """
keywords = {
'header_rvc': 'HIERARCH ESO DRS CCF RVC',
'header_berv': 'HIERARCH ESO DRS BERV',
'header_bjd': 'HIERARCH ESO DRS BJD',
'header_mjd': 'MJD-OBS', # MJD in days. This parameter is required for the retrieval of GDAS data
'header_blaze': 'HIERARCH ESO DRS BLAZE FILE',
'header_ccd': 'HIERARCH ESO DRS CCD SIGDET',
'header_conad': 'HIERARCH ESO DRS CCD CONAD',
'header_dpr_catg': 'HIERARCH ESO DPR CATG',
'header_dpr_type': 'HIERARCH ESO DPR TYPE',
'header_deg_ll': 'HIERARCH ESO DRS CAL TH DEG LL',
'header_coeff_ll': 'HIERARCH ESO DRS CAL TH COEFF LL',
'airmass_alt_start': 'HIERARCH ESO TEL AIRM START',
'airmass_alt_end': 'HIERARCH ESO TEL AIRM END',
## Telescope altitude is computed using the middle values obtained from airmass
'humidity':'HIERARCH ESO TEL AMBI RHUM', # Relative humidity in % for GEOELEV.
#'pressure_start' : 'HIERARCH ESO TEL AMBI PRES START',
#'pressure_end': 'HIERARCH ESO TEL AMBI PRES END',
'pressure':'HIERARCH ESO TEL AMBI PRES END',
'temperature_env': 'HIERARCH ESO TEL AMBI TEMP', #Ambient temperature in C for GEOELEV
'temperature_m1': 'HIERARCH ESO TEL TH M1 TEMP', # Temperature of primary mirror M1 in C (for emission spectra only)
}
properties = {
# DRS-specific keywords
'time_stamp': 'mid_exposure',
'time_standard': 'UTC',
# Observatory-specific keywords
'geoelev': 2400.0, # meters
'longitude' : -70.7345, # Tel geo longitude (+=East) (deg)
'latitude' : -29.2584, # Tel geo latitute (+=North) (deg)
# Instrument-specific keyword
'n_orders_A': 72,
'n_orders_B': 71,
'orders_BtoA':
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, -1, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
69, 70],
# after many experiments, I found out the easiest and more robust way to define
# the order correspondence between fiber A anf B is just to write it down
'red_ccd':
[ 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71],
'blue_ccd':
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44],
'full_ccd':
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71],
# The following are the input values used by Molecfit, taken from Allart+2017
# for convenience, all the default values are listed here instead of being scattered into the code
'molecfit': {
'default_wstep': 0.01000, # default wavelength step size for the input stellar spectra
'molecules': ['H2O', 'O2'],
'ftol': 1e-9,
'xtol': 1e-9,
'cont_const': 1.0, # a0, This value differs from Allart+2017 since we are using normalized spectra
'cont_n': 3, # n_cont, Degree of coefficients for continuum fit
'wlc_n': 2, # n_lambda, Polynomial degree of the refined wavelength solution
'wlc_const': 0.0, # b0, Initial constant term for wavelength correction (shift relative to half wavelength range)
'res_gauss': 4.8, # omega_Gaussian, Initial value for FWHM of Gaussian in pixels
'kernfac': 15, #kernel_size, Size of Gaussian/Lorentzian/Voigtian kernel in FWHM
'slitwidth': 1.00, # in arcseconds
'pixelscale': 0.16,
}
}
return keywords, properties
# Shortcut from DRS-geenral to instrument-specific subroutine
def HARPS_DRSv3_get_calib_data(archive, file_rad, fiber='A', order_selection=None):
keywords, properties = HARPSv3_get_instrument_keywords()
return DRSv3_get_calib_data(archive, file_rad, keywords, properties, fiber=fiber, order_selection=order_selection)
# Shortcut from DRS-geenral to instrument-specific subroutine
def HARPS_DRSv3_get_input_data(archive, file_rad, mask, fiber='A', skip_ccf=None, skip_s1d=True, order_selection=None):
keywords, properties = HARPSv3_get_instrument_keywords()
return DRSv3_get_input_data(archive, file_rad, keywords, properties, mask, fiber=fiber, skip_ccf=skip_ccf, skip_s1d=skip_s1d, order_selection=order_selection)
| 5,356 | 46.830357 | 162 | py |
SLOPpy | SLOPpy-main/SLOPpy/instruments/HARPN_DRSv3.py | from __future__ import print_function, division
from SLOPpy.instruments.common_DRSv3 import *
def HARPNv3_get_instrument_keywords():
""" These definitions applt to DRS version 3.x """
keywords = {
'header_rvc': 'HIERARCH TNG DRS CCF RVC',
'header_berv': 'HIERARCH TNG DRS BERV',
'header_bjd': 'HIERARCH TNG DRS BJD',
'header_mjd': 'MJD-OBS', # MJD in days. This parameter is required for the retrieval of GDAS data
'header_blaze': 'HIERARCH TNG DRS BLAZE FILE',
'header_ccd': 'HIERARCH TNG DRS CCD SIGDET',
'header_conad': 'HIERARCH TNG DRS CCD CONAD',
'header_dpr_catg': 'HIERARCH TNG DPR CATG',
'header_dpr_type': 'HIERARCH TNG DPR TYPE',
'header_deg_ll': 'HIERARCH TNG DRS CAL TH DEG LL',
'header_coeff_ll': 'HIERARCH TNG DRS CAL TH COEFF LL',
'airmass_alt_start': 'HIERARCH TNG TEL AIRM START',
'airmass_alt_end': 'HIERARCH TNG TEL AIRM END',
## Telescope altitude is computed using the middle values obtained from airmass
'humidity':'HIERARCH TNG METEO HUMIDITY', # Relative humidity in % for GEOELEV.
'pressure':'HIERARCH TNG METEO PRESSURE',
'temperature_env': 'HIERARCH TNG METEO TEMP10M', #Ambient temperature in C for GEOELEV
'temperature_m1': 'HIERARCH TNG M1 CH1TEMP', # Temperature of primary mirror M1 in C (for emission spectra only)
}
properties = {
# DRS-specific keywords
'time_stamp': 'mid_exposure',
'time_standard': 'UTC',
# Observatory-specific keywords
'geoelev': 2387.2, # meters
'longitude' : -17.889, # Tel geo longitude (+=East) (deg)
'latitude' : 28.754, # Tel geo latitute (+=North) (deg)
# Instrument-specific keyword
'n_orders_A': 69,
'n_orders_B': 69,
'orders_BtoA':
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68],
# after many experiments, I found out the easiest and more robust way to define
# the order correspondence between fiber A anf B is just to write it down
'red_ccd':
[ 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68],
'blue_ccd':
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41],
'full_ccd':
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68],
# The following are the input values used by Molecfit, taken from Allart+2017
# for convenience, all the default values are listed here instead of being scattered into the code
'molecfit': {
'default_wstep': 0.01000, # default wavelength step size for the input stellar spectra
'molecules': ['H2O', 'O2'],
'ftol': "1e-9",
'xtol': "1e-9",
'cont_const': 1.0, # a0, This value differs from Allart+2017 since we are using normalized spectra
'cont_n': 3, # n_cont, Degree of coefficients for continuum fit
'wlc_n': 2, # n_lambda, Polynomial degree of the refined wavelength solution
'wlc_const': 0.0, # b0, Initial constant term for wavelength correction (shift relative to half wavelength range)
'res_gauss': 4.8, # omega_Gaussian, Initial value for FWHM of Gaussian in pixels
'kernfac': 15, #kernel_size, Size of Gaussian/Lorentzian/Voigtian kernel in FWHM
'slitwidth': 1.00, # in arcseconds
'pixelscale': 0.16,
}
}
return keywords, properties
# Shortcut from DRS-geenral to instrument-specific subroutine
def HARPN_DRSv3_get_calib_data(archive, file_rad, fiber='A', order_selection=None):
keywords, properties = HARPNv3_get_instrument_keywords()
return DRSv3_get_calib_data(archive, file_rad, keywords, properties, fiber=fiber, order_selection=order_selection)
# Shortcut from DRS-geenral to instrument-specific subroutine
def HARPN_DRSv3_get_input_data(archive, file_rad, mask, fiber='A', skip_ccf=None, skip_s1d=True, order_selection=None):
keywords, properties = HARPNv3_get_instrument_keywords()
return DRSv3_get_input_data(archive, file_rad, keywords, properties, mask, fiber=fiber, skip_ccf=skip_ccf, skip_s1d=skip_s1d, order_selection=order_selection)
| 5,057 | 46.716981 | 162 | py |
SLOPpy | SLOPpy-main/SLOPpy/instruments/common_DRSv3.py | import numpy as np
from astropy.io import fits
from SLOPpy.subroutines.rebin_subroutines import *
from SLOPpy.subroutines.constants import *
from SLOPpy.subroutines.common import *
def DRSv3_map_orders_AB(properties, order_selection):
n_orders_A = properties['n_orders_A']
map_orders_A_full = np.arange(0, n_orders_A, dtype=np.int16)
map_orders_BtoA = np.asarray(properties['orders_BtoA'])
map_orders_A = []
map_orders_B = []
#if order_selection == 'red_ccd':
# working_A_full = map_orders_A_full[properties['red_ccd']]
# working_BtoA = map_orders_BtoA[properties['red_ccd']]
if order_selection in ['red_ccd', 'blue_ccd', 'full_ccd']:
working_A_full = map_orders_A_full[properties[order_selection]]
working_BtoA = map_orders_BtoA[properties[order_selection]]
elif order_selection:
working_A_full = map_orders_A_full[order_selection]
working_BtoA = map_orders_BtoA[order_selection]
else:
working_A_full = map_orders_A_full
working_BtoA = map_orders_BtoA
for order_A, order_B in zip(working_A_full, working_BtoA):
if order_B < 0: continue
map_orders_A.extend([order_A])
map_orders_B.extend([order_B])
return map_orders_A, map_orders_B
def DRSv3_give_back_selected_orders(properties, fiber, order_selection):
map_orders_A, map_orders_B = DRSv3_map_orders_AB(properties, order_selection)
if fiber != 'A':
return map_orders_B
else:
return map_orders_A
def DRSv3_get_calib_data(archive, file_rad, keywords, properties, fiber='A', order_selection=None):
calib_dict = {}
selected_orders = DRSv3_give_back_selected_orders(properties, fiber, order_selection)
map_orders_A, map_orders_B = DRSv3_map_orders_AB(properties, order_selection)
if fiber=='A':
calib_dict['fibAB_orders_match'] = map_orders_A - np.min(selected_orders)
""" The first selected order could not have a match in fiber B, so we need to renumber from the first order of
the input selection, not from the first order that had a match """
else:
calib_dict['fibAB_orders_match'] = map_orders_B - np.min(map_orders_B)
""" Only the orders with a match in fiber A are read in the first place, so we can safely rescale with respect
to the number of the first order in the matched list """
e2ds_fits = fits.open(archive+'/'+file_rad+'_e2ds_'+fiber+'.fits')
if e2ds_fits[0].header[keywords['header_dpr_catg']] != 'SCIENCE':
return
try:
blaze_file = e2ds_fits[0].header[keywords['header_blaze']]
blaze_fits = fits.open(archive + '/' + blaze_file)
except:
blaze_file = e2ds_fits[0].header[keywords['header_blaze']].replace(':', '-')
blaze_fits = fits.open(archive + '/' + blaze_file)
# getting blaze file
calib_dict['blaze'] = blaze_fits[0].data[selected_orders, :]
# getting lamp file
try:
lamp_fits = fits.open(archive + '/' + blaze_file[:29] + '_lamp_' + fiber + '.fits')
calib_dict['lamp'] = lamp_fits[0].data[selected_orders, :]
lamp_fits.close()
except:
print("lamp files not available, sky correction will not be performed")
calib_dict['n_pixels'] = blaze_fits[0].header['NAXIS1']
calib_dict['n_orders'] = len(selected_orders)
blaze_fits.close()
return calib_dict
def DRSv3_get_input_data(archive, file_rad, keywords, properties, mask, fiber='A', skip_ccf=None, skip_s1d=True, order_selection=None):
input_dict = {'mask': mask, 'header':{}}
input_s1d = {'header':{}}
selected_orders = DRSv3_give_back_selected_orders(properties, fiber, order_selection)
if mask is None:
skip_ccf = True
e2ds_fits = fits.open(archive+'/'+file_rad+'_e2ds_'+fiber+'.fits')
input_dict['header']['e2ds'] = e2ds_fits[0].header
input_dict['n_pixels'] = e2ds_fits[0].header['NAXIS1']
input_dict['n_orders'] = len(selected_orders)
input_dict['DPR_CATG'] = e2ds_fits[0].header[keywords['header_dpr_catg']]
if input_dict['DPR_CATG'] != 'SCIENCE':
return
input_dict['DPR_TYPE'] = e2ds_fits[0].header[keywords['header_dpr_type']]
if not skip_s1d:
s1d_fits = fits.open(archive + '/' + file_rad + '_s1d_'+fiber+'.fits')
input_dict['header']['s1d'] = s1d_fits[0].header
input_s1d['header']['s1d'] = s1d_fits[0].header
temp_wave, temp_step = DRSv3_get_s1d_wave(s1d_fits)
sel_wave = (temp_wave >= 3879.99990) & (temp_wave <= 6900.0001)
input_s1d['flux'] = s1d_fits[0].data[sel_wave]
input_s1d['wave'] = temp_wave[sel_wave]
input_s1d['step'] = temp_step[sel_wave]
input_s1d['size'] = np.size(input_s1d['wave'])
s1d_fits.close()
if not skip_ccf:
ccf_fits = fits.open(archive+'/'+file_rad+'_ccf_'+mask+'_'+fiber+'.fits')
input_dict['RVC'] = ccf_fits[0].header[keywords['header_rvc']]
input_dict['header']['ccf'] = ccf_fits[0].header
input_dict['BERV'] = e2ds_fits[0].header[keywords['header_berv']]
input_dict['EXPTIME'] = e2ds_fits[0].header['EXPTIME']
if properties['time_stamp'] == 'start_exposure':
input_dict['BJD'] = e2ds_fits[0].header[keywords['header_bjd']] + input_dict['EXPTIME']/86400.
input_dict['MJD'] = e2ds_fits[0].header[keywords['header_mjd']] + input_dict['EXPTIME']/86400.
elif properties['time_stamp'] == 'mid_exposure':
input_dict['BJD'] = e2ds_fits[0].header[keywords['header_bjd']]
input_dict['MJD'] = e2ds_fits[0].header[keywords['header_mjd']]
elif properties['time_stamp'] == 'end_exposure':
input_dict['BJD'] = e2ds_fits[0].header[keywords['header_bjd']] - input_dict['EXPTIME']/86400.
input_dict['MJD'] = e2ds_fits[0].header[keywords['header_mjd']] - input_dict['EXPTIME']/86400.
else:
print('*** please specify the relationship between epoch and exposure time - assuming mid-exposure epochs')
input_dict['BJD'] = e2ds_fits[0].header[keywords['header_bjd']]
input_dict['MJD'] = e2ds_fits[0].header[keywords['header_mjd']]
if properties['time_standard'] == 'UTC':
input_dict['BJD']+= difference_utc2tdb(input_dict['MJD']+2400000.5)
input_dict['LST'] = e2ds_fits[0].header['LST']
try:
input_dict['AIRMASS'] = e2ds_fits[0].header['AIRMASS']
except:
input_dict['AIRMASS'] = (e2ds_fits[0].header[keywords['airmass_alt_start']]
+ e2ds_fits[0].header[keywords['airmass_alt_end']])/2.
input_dict['UTC'] = (input_dict['MJD'] - int(input_dict['MJD'])) * 86400.
input_dict['HUMIDITY'] = e2ds_fits[0].header[keywords['humidity']]
input_dict['PRESSURE'] = e2ds_fits[0].header[keywords['pressure']]
input_dict['TEMPERATURE_EN'] = e2ds_fits[0].header[keywords['temperature_env']]
input_dict['TEMPERATURE_M1'] = e2ds_fits[0].header[keywords['temperature_m1']]
input_dict['ELEVATION'] = np.arcsin(1./input_dict['AIRMASS']) * (180./np.pi)
input_dict['GEOELEV'] = properties['geoelev']
input_dict['GEOLONG'] = properties['longitude']
input_dict['GEOLAT'] = properties['latitude']
input_dict['molecfit'] = properties['molecfit']
try:
try:
input_dict['RA'] = e2ds_fits[0].header['RA-DEG']
input_dict['DEC'] = e2ds_fits[0].header['DEC-DEG']
except:
input_dict['RA'] = e2ds_fits[0].header['RA-RAD'] * 180.00 / np.pi
input_dict['DEC'] = e2ds_fits[0].header['DEC-RAD'] * 180.00 / np.pi # weird choice of using DEC in hours
except:
input_dict['RA'] = e2ds_fits[0].header['RA']
input_dict['DEC'] = e2ds_fits[0].header['DEC']
try:
input_dict['BERV'] = e2ds_fits[0].header[keywords['header_berv']]
input_dict['EXPTIME'] = e2ds_fits[0].header['EXPTIME']
if properties['time_stamp'] == 'start_exposure':
input_dict['BJD'] = e2ds_fits[0].header[keywords['header_bjd']] + input_dict['EXPTIME']/86400.
input_dict['MJD'] = e2ds_fits[0].header[keywords['header_mjd']] + input_dict['EXPTIME']/86400.
elif properties['time_stamp'] == 'mid_exposure':
input_dict['BJD'] = e2ds_fits[0].header[keywords['header_bjd']]
input_dict['MJD'] = e2ds_fits[0].header[keywords['header_mjd']]
elif properties['time_stamp'] == 'end_exposure':
input_dict['BJD'] = e2ds_fits[0].header[keywords['header_bjd']] - input_dict['EXPTIME']/86400.
input_dict['MJD'] = e2ds_fits[0].header[keywords['header_mjd']] - input_dict['EXPTIME']/86400.
else:
print('*** please specify the relationship between epoch and exposure time - assuming mid-exposure epochs')
input_dict['BJD'] = e2ds_fits[0].header[keywords['header_bjd']]
input_dict['MJD'] = e2ds_fits[0].header[keywords['header_mjd']]
if properties['time_standard'] == 'UTC':
input_dict['BJD']+= difference_utc2tdb(input_dict['MJD']+2400000.5)
input_dict['LST'] = e2ds_fits[0].header['LST']
try:
input_dict['AIRMASS'] = e2ds_fits[0].header['AIRMASS']
except:
input_dict['AIRMASS'] = (e2ds_fits[0].header[keywords['airmass_alt_start']]
+ e2ds_fits[0].header[keywords['airmass_alt_end']])/2.
input_dict['UTC'] = (input_dict['MJD'] - int(input_dict['MJD'])) * 86400.
input_dict['HUMIDITY'] = e2ds_fits[0].header[keywords['humidity']]
input_dict['PRESSURE'] = e2ds_fits[0].header[keywords['pressure']]
input_dict['TEMPERATURE_EN'] = e2ds_fits[0].header[keywords['temperature_env']]
input_dict['TEMPERATURE_M1'] = e2ds_fits[0].header[keywords['temperature_m1']]
input_dict['ELEVATION'] = np.arcsin(1./input_dict['AIRMASS']) * (180./np.pi)
input_dict['GEOELEV'] = properties['geoelev']
input_dict['GEOLONG'] = properties['longitude']
input_dict['GEOLAT'] = properties['latitude']
input_dict['molecfit'] = properties['molecfit']
try:
try:
input_dict['RA'] = e2ds_fits[0].header['RA-DEG']
input_dict['DEC'] = e2ds_fits[0].header['DEC-DEG']
except:
input_dict['RA'] = e2ds_fits[0].header['RA-RAD'] * 180.00 / np.pi
input_dict['DEC'] = e2ds_fits[0].header['DEC-RAD'] * 180.00 / np.pi # weird choice of using DEC in hours
except:
input_dict['RA'] = e2ds_fits[0].header['RA']
input_dict['DEC'] = e2ds_fits[0].header['DEC']
except:
print('Keyword error in prepare_dataset - check the FITS header of your files')
quit()
pass
input_dict['BLAZE_file'] = e2ds_fits[0].header[keywords['header_blaze']]
input_dict['CCD_SIGDET'] = e2ds_fits[0].header[keywords['header_ccd']]
input_dict['CCD_GAIN'] = e2ds_fits[0].header[keywords['header_conad']]
# getting data
input_dict['e2ds'] = e2ds_fits[0].data[selected_orders, :]
input_dict['e2ds_err'] = np.sqrt(np.abs(input_dict['e2ds']))
temp_wave, temp_step = DRSv3_get_e2ds_wave(e2ds_fits, keywords['header_deg_ll'], keywords['header_coeff_ll'])
input_dict['wave'] = temp_wave[selected_orders, :]
input_dict['step'] = temp_step[selected_orders, :]
input_dict['orders'] = len(selected_orders)
input_dict['wave_size'] = e2ds_fits[0].header['NAXIS1']
e2ds_fits.close()
if not skip_ccf:
ccf_fits.close()
return input_dict,input_s1d
def DRSv3_get_s1d_wave(s1d_fits):
return np.arange(0, s1d_fits[0].header['NAXIS1'], 1.)*s1d_fits[0].header['CDELT1'] + s1d_fits[0].header['CRVAL1'], \
np.ones(s1d_fits[0].header['NAXIS1'])*s1d_fits[0].header['CDELT1']
def DRSv3_get_e2ds_wave(e2ds_fits, header_deg_ll, header_coeff_ll, order=None):
e2ds_o = e2ds_fits[0].header['NAXIS2']
e2ds_w = e2ds_fits[0].header['NAXIS1']
e2ds_wave = np.zeros([e2ds_o, e2ds_w], dtype=np.double)
e2ds_step = np.zeros([e2ds_o, e2ds_w], dtype=np.double)
d = e2ds_fits[0].header[header_deg_ll]
x = np.arange(0, e2ds_w, 1.)
for n in range(0, e2ds_o):
for i in range(d, -1, -1):
a_sel = i + n*(1+d)
a_coeff = e2ds_fits[0].header[header_coeff_ll+repr(a_sel)]
if i == d:
y_w = a_coeff
y_s = i*a_coeff
else:
y_w = y_w*x + a_coeff
if i > 0: y_s = y_s*x + i*a_coeff
e2ds_wave[n, :] = y_w
e2ds_step[n, :] = y_s
if order is None:
return e2ds_wave, e2ds_step
else:
return e2ds_wave[order, :], e2ds_step[order, :]
| 12,657 | 40.366013 | 135 | py |
SLOPpy | SLOPpy-main/SLOPpy/instruments/get_data.py | from __future__ import print_function, division
import numpy as np
from astropy.io import fits
from SLOPpy.subroutines.rebin_subroutines import *
from SLOPpy.subroutines.constants import *
from SLOPpy.instruments.HARPN_DRSv3 import *
from SLOPpy.instruments.HARPS_DRSv3 import *
from SLOPpy.instruments.PEPSI_reduced import *
def get_calib_data(instrument, archive, file_rad, fiber='A', order_selection=None):
if instrument =='HARPS-N':
return HARPN_DRSv3_get_calib_data(archive, file_rad, fiber=fiber, order_selection=order_selection)
elif instrument =='HARPS':
return HARPS_DRSv3_get_calib_data(archive, file_rad, fiber=fiber, order_selection=order_selection)
elif instrument in ['PEPSI', 'PEPSI_red', 'PEPSI_blue']:
return PEPSI_get_calib_data(archive, file_rad, fiber=fiber, order_selection=order_selection)
else:
raise ValueError("Instrument not supported")
def get_input_data(instrument, archive, file_rad, mask, fiber='A', skip_ccf=None, skip_s1d=True, order_selection=None):
if instrument =='HARPS-N':
return HARPN_DRSv3_get_input_data(archive, file_rad, mask, fiber=fiber, skip_ccf=skip_ccf, skip_s1d=skip_s1d, order_selection=order_selection)
elif instrument =='HARPS':
return HARPS_DRSv3_get_input_data(archive, file_rad, mask, fiber=fiber, skip_ccf=skip_ccf, skip_s1d=skip_s1d, order_selection=order_selection)
elif instrument in ['PEPSI', 'PEPSI_red', 'PEPSI_blue']:
return PEPSI_get_input_data(archive, file_rad, mask, fiber=fiber, skip_ccf=skip_ccf, skip_s1d=skip_s1d, order_selection=order_selection)
else:
raise ValueError("Instrument not supported")
| 1,669 | 51.1875 | 150 | py |
BehaviorTree.CPP | BehaviorTree.CPP-master/convert_v3_to_v4.py | """Converts BehaviorTree.CPP V3 compatible tree xml files to V4 format.
"""
import argparse
import copy
import logging
import sys
import typing
import xml.etree.ElementTree as ET
logger = logging.getLogger(__name__)
def strtobool(val: typing.Union[str, int, bool]) -> bool:
"""``distutils.util.strtobool`` equivalent, since it will be deprecated.
origin: https://stackoverflow.com/a/715468/17094594
"""
return str(val).lower() in ("yes", "true", "t", "1")
# see ``XMLParser::Pimpl::createNodeFromXML`` for all underscores
SCRIPT_DIRECTIVES = [
"_successIf",
"_failureIf",
"_skipIf",
"_while",
"_onSuccess",
"_onFailure",
"_onHalted",
"_post",
]
def convert_single_node(node: ET.Element) -> None:
"""converts a leaf node from V3 to V4.
Args:
node (ET.Element): the node to convert.
"""
if node.tag == "root":
node.attrib["BTCPP_format"] = "4"
def convert_no_warn(node_type: str, v3_name: str, v4_name: str):
if node.tag == v3_name:
node.tag = v4_name
elif (
(node.tag == node_type)
and ("ID" in node.attrib)
and (node.attrib["ID"] == v3_name)
):
node.attrib["ID"] = v3_name
original_attrib = copy.copy(node.attrib)
convert_no_warn("Control", "SequenceStar", "SequenceWithMemory")
if node.tag == "SubTree":
logger.info(
"SubTree is now deprecated, auto converting to V4 SubTree"
" (formerly known as SubTreePlus)"
)
for key, val in original_attrib.items():
if key == "__shared_blackboard" and strtobool(val):
logger.warning(
"__shared_blackboard for subtree is deprecated"
", using _autoremap instead."
" Some behavior may change!"
)
node.attrib.pop(key)
node.attrib["_autoremap"] = "1"
elif key == "ID":
pass
else:
node.attrib[key] = f"{{{val}}}"
elif node.tag == "SubTreePlus":
node.tag = "SubTree"
for key, val in original_attrib.items():
if key == "__autoremap":
node.attrib.pop(key)
node.attrib["_autoremap"] = val
for key in node.attrib:
if key in SCRIPT_DIRECTIVES:
logging.error(
"node %s%s has port %s, this is reserved for scripts in V4."
" Please edit the node before converting to V4.",
node.tag,
f" with ID {node.attrib['ID']}" if "ID" in node.attrib else "",
key,
)
def convert_all_nodes(root_node: ET.Element) -> None:
"""recursively converts all nodes inside a root node.
Args:
root_node (ET.Element): the root node to start the conversion.
"""
def recurse(base_node: ET.Element) -> None:
convert_single_node(base_node)
for node in base_node:
recurse(node)
recurse(root_node)
def convert_stream(in_stream: typing.TextIO, out_stream: typing.TextIO):
"""Converts the behavior tree V3 xml from in_file to V4, and writes to out_file.
Args:
in_stream (typing.TextIO): The input file stream.
out_stream (typing.TextIO): The output file stream.
"""
class CommentedTreeBuilder(ET.TreeBuilder):
"""Class for preserving comments in xml
see: https://stackoverflow.com/a/34324359/17094594
"""
def comment(self, text):
self.start(ET.Comment, {})
self.data(text)
self.end(ET.Comment)
element_tree = ET.parse(in_stream, ET.XMLParser(target=CommentedTreeBuilder()))
convert_all_nodes(element_tree.getroot())
element_tree.write(out_stream, encoding="unicode", xml_declaration=True)
def main():
"""the main function when used in cli mode"""
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"-i",
"--in_file",
type=argparse.FileType("r"),
help="The file to convert from (v3). If absent, reads xml string from stdin.",
)
parser.add_argument(
"-o",
"--out_file",
nargs="?",
type=argparse.FileType("w"),
default=sys.stdout,
help="The file to write the converted xml (V4)."
" Prints to stdout if not specified.",
)
class ArgsType(typing.NamedTuple):
"""Dummy class to provide type hinting to arguments parsed with argparse"""
in_file: typing.Optional[typing.TextIO]
out_file: typing.TextIO
args: ArgsType = parser.parse_args()
if args.in_file is None:
if not sys.stdin.isatty():
args.in_file = sys.stdin
else:
logging.error(
"The input file was not specified, nor a stdin stream was detected."
)
sys.exit(1)
convert_stream(args.in_file, args.out_file)
if __name__ == "__main__":
main() | 5,144 | 28.739884 | 86 | py |
EPSANet | EPSANet-master/main.py | import argparse
import os
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from loss import CrossEntropyLabelSmooth
import models
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--arch', '-a', metavar='ARCH', default='epsanet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: epsanet50)')
parser.add_argument('--data', metavar='DIR',default='/path/dataset',
help='path to dataset')
parser.add_argument('-j', '--workers', default=10, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=120, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=100, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', default=False, dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=1, type=int,
help='number of distributed processes')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='gloo', type=str,
help='distributed backend')
parser.add_argument('--seed', default=None, type=int, nargs='+',
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--action', default='', type=str,
help='other information.')
best_prec1 = 0
best_prec5 = 0
best_epoch = 0
def main():
global args, best_prec1
args = parser.parse_args()
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
args.distributed = args.world_size > 1
if args.distributed:
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if args.gpu is not None:
model = model.cuda(args.gpu)
elif args.distributed:
model.cuda()
model = torch.nn.parallel.DistributedDataParallel(model)
else:
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
print(model)
# get the number of models parameters
print('Number of models parameters: {}'.format(
sum([p.data.nelement() for p in model.parameters()])))
# define loss function (criterion) and optimizer
criterion = CrossEntropyLabelSmooth(num_classes=1000, epsilon=0.1)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
del checkpoint
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
m = time.time()
_, _ = validate(val_loader, model, criterion)
n = time.time()
print((n - m) / 3600)
return
directory = "runs/%s/" % (args.arch + '_' + args.action)
if not os.path.exists(directory):
os.makedirs(directory)
Loss_plot = {}
train_prec1_plot = {}
train_prec5_plot = {}
val_prec1_plot = {}
val_prec5_plot = {}
for epoch in range(args.start_epoch, args.epochs):
start_time = time.time()
if args.distributed:
train_sampler.set_epoch(epoch)
adjust_learning_rate(optimizer, epoch)
# train for one epoch
# train(train_loader, model, criterion, optimizer, epoch)
loss_temp, train_prec1_temp, train_prec5_temp = train(train_loader, model, criterion, optimizer, epoch)
Loss_plot[epoch] = loss_temp
train_prec1_plot[epoch] = train_prec1_temp
train_prec5_plot[epoch] = train_prec5_temp
# evaluate on validation set
# prec1 = validate(val_loader, model, criterion)
prec1, prec5 = validate(val_loader, model, criterion)
val_prec1_plot[epoch] = prec1
val_prec5_plot[epoch] = prec5
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
}, is_best)
if is_best:
best_epoch = epoch + 1
best_prec5 = prec5
print(' * BestPrec so far@1 {top1:.3f} @5 {top5:.3f} in epoch {best_epoch}'.format(top1=best_prec1,
top5=best_prec5,
best_epoch=best_epoch))
data_save(directory + 'Loss_plot.txt', Loss_plot)
data_save(directory + 'train_prec1.txt', train_prec1_plot)
data_save(directory + 'train_prec5.txt', train_prec5_plot)
data_save(directory + 'val_prec1.txt', val_prec1_plot)
data_save(directory + 'val_prec5.txt', val_prec5_plot)
end_time = time.time()
time_value = (end_time - start_time) / 3600
print("-" * 80)
print(time_value)
print("-" * 80)
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
losses_batch = {}
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
return losses.avg, top1.avg, top5.avg
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if args.gpu is not None:
input = input.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg, top5.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
directory = "runs/%s/" % (args.arch + '_' + args.action)
filename = directory + filename
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, directory + 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def data_save(root, file):
if not os.path.exists(root):
os.mknod(root)
file_temp = open(root, 'r')
lines = file_temp.readlines()
if not lines:
epoch = -1
else:
epoch = lines[-1][:lines[-1].index(' ')]
epoch = int(epoch)
file_temp.close()
file_temp = open(root, 'a')
for line in file:
if line > epoch:
file_temp.write(str(line) + " " + str(file[line]) + '\n')
file_temp.close()
if __name__ == '__main__':
main()
| 15,032 | 34.707838 | 114 | py |
EPSANet | EPSANet-master/loss.py | import torch
import numpy as np
from torch import nn
from torch.autograd import Variable
import torch.nn.functional as F
class CrossEntropyLabelSmooth(nn.Module):
"""Cross entropy loss with label smoothing regularizer.
Reference:
Szegedy et al. Rethinking the Inception Architecture for Computer Vision. CVPR 2016.
Equation: y = (1 - epsilon) * y + epsilon / K.
Args:
num_classes (int): number of classes.
epsilon (float): weight.
"""
def __init__(self, num_classes, epsilon=0.1, use_gpu=True):
super(CrossEntropyLabelSmooth, self).__init__()
self.num_classes = num_classes
self.epsilon = epsilon
self.use_gpu = use_gpu
self.logsoftmax = nn.LogSoftmax(dim=1)
def forward(self, inputs, targets):
"""
Args:
inputs: prediction matrix (before softmax) with shape (batch_size, num_classes)
targets: ground truth labels with shape (num_classes)
"""
log_probs = self.logsoftmax(inputs)
targets = torch.zeros(log_probs.size()).scatter_(1, targets.unsqueeze(1).cpu(), 1)
if self.use_gpu: targets = targets.cuda()
targets = (1 - self.epsilon) * targets + self.epsilon / self.num_classes
loss = (- targets * log_probs).mean(0).sum()
return loss | 1,320 | 36.742857 | 91 | py |
EPSANet | EPSANet-master/models/SE_weight_module.py |
import torch.nn as nn
class SEWeightModule(nn.Module):
def __init__(self, channels, reduction=16):
super(SEWeightModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels//reduction, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels//reduction, channels, kernel_size=1, padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.avg_pool(x)
out = self.fc1(out)
out = self.relu(out)
out = self.fc2(out)
weight = self.sigmoid(out)
return weight | 651 | 30.047619 | 85 | py |
EPSANet | EPSANet-master/models/epsanet.py | import torch
import torch.nn as nn
import math
from .SE_weight_module import SEWeightModule
def conv(in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, groups=1):
"""standard convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation, groups=groups, bias=False)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class PSAModule(nn.Module):
def __init__(self, inplans, planes, conv_kernels=[3, 5, 7, 9], stride=1, conv_groups=[1, 4, 8, 16]):
super(PSAModule, self).__init__()
self.conv_1 = conv(inplans, planes//4, kernel_size=conv_kernels[0], padding=conv_kernels[0]//2,
stride=stride, groups=conv_groups[0])
self.conv_2 = conv(inplans, planes//4, kernel_size=conv_kernels[1], padding=conv_kernels[1]//2,
stride=stride, groups=conv_groups[1])
self.conv_3 = conv(inplans, planes//4, kernel_size=conv_kernels[2], padding=conv_kernels[2]//2,
stride=stride, groups=conv_groups[2])
self.conv_4 = conv(inplans, planes//4, kernel_size=conv_kernels[3], padding=conv_kernels[3]//2,
stride=stride, groups=conv_groups[3])
self.se = SEWeightModule(planes // 4)
self.split_channel = planes // 4
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
batch_size = x.shape[0]
x1 = self.conv_1(x)
x2 = self.conv_2(x)
x3 = self.conv_3(x)
x4 = self.conv_4(x)
feats = torch.cat((x1, x2, x3, x4), dim=1)
feats = feats.view(batch_size, 4, self.split_channel, feats.shape[2], feats.shape[3])
x1_se = self.se(x1)
x2_se = self.se(x2)
x3_se = self.se(x3)
x4_se = self.se(x4)
x_se = torch.cat((x1_se, x2_se, x3_se, x4_se), dim=1)
attention_vectors = x_se.view(batch_size, 4, self.split_channel, 1, 1)
attention_vectors = self.softmax(attention_vectors)
feats_weight = feats * attention_vectors
for i in range(4):
x_se_weight_fp = feats_weight[:, i, :, :]
if i == 0:
out = x_se_weight_fp
else:
out = torch.cat((x_se_weight_fp, out), 1)
return out
class EPSABlock(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, norm_layer=None, conv_kernels=[3, 5, 7, 9],
conv_groups=[1, 4, 8, 16]):
super(EPSABlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, planes)
self.bn1 = norm_layer(planes)
self.conv2 = PSAModule(planes, planes, stride=stride, conv_kernels=conv_kernels, conv_groups=conv_groups)
self.bn2 = norm_layer(planes)
self.conv3 = conv1x1(planes, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class EPSANet(nn.Module):
def __init__(self,block, layers, num_classes=1000):
super(EPSANet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layers(block, 64, layers[0], stride=1)
self.layer2 = self._make_layers(block, 128, layers[1], stride=2)
self.layer3 = self._make_layers(block, 256, layers[2], stride=2)
self.layer4 = self._make_layers(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layers(self, block, planes, num_blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, num_blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def epsanet50():
model = EPSANet(EPSABlock, [3, 4, 6, 3], num_classes=1000)
return model
def epsanet101():
model = EPSANet(EPSABlock, [3, 4, 23, 3], num_classes=1000)
return model
| 6,122 | 35.664671 | 113 | py |
trieste-develop | trieste-develop/setup.py | from pathlib import Path
from setuptools import find_packages, setup
with open("README.md", "r") as file:
long_description = file.read()
setup(
name="trieste",
version=Path("trieste/VERSION").read_text().strip(),
author="The Trieste contributors",
author_email="labs@secondmind.ai",
description="A Bayesian optimization research toolbox built on TensorFlow",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/secondmind-labs/trieste",
packages=find_packages(include=("trieste*",)),
package_data={
"trieste": ["py.typed", "VERSION"],
},
classifiers=[
"Programming Language :: Python :: 3.7",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
python_requires="~=3.7",
install_requires=[
"absl-py",
"dill!=0.3.6",
"gpflow>=2.8.1",
"gpflux>=0.4.2",
"numpy",
"tensorflow>=2.5; platform_system!='Darwin' or platform_machine!='arm64'",
"tensorflow-macos>=2.5; platform_system=='Darwin' and platform_machine=='arm64'",
"tensorflow-probability>=0.13",
"greenlet>=1.1.0",
],
extras_require={
"plotting": ["seaborn", "plotly"],
"qhsri": ["pymoo", "cvxpy"],
},
)
| 1,939 | 33.642857 | 89 | py |
trieste-develop | trieste-develop/trieste/bayesian_optimizer.py |
"""
This module contains the :class:`BayesianOptimizer` class, used to perform Bayesian optimization.
"""
from __future__ import annotations
import copy
import traceback
import warnings
from collections import Counter
from dataclasses import dataclass
from pathlib import Path
from typing import (
Any,
Callable,
ClassVar,
Dict,
Generic,
Mapping,
MutableMapping,
Optional,
TypeVar,
cast,
overload,
)
import absl
import dill
import numpy as np
import tensorflow as tf
from scipy.spatial.distance import pdist
from .acquisition.multi_objective import non_dominated
try:
import pandas as pd
import seaborn as sns
except ModuleNotFoundError:
pd = None
sns = None
from . import logging
from .acquisition.rule import TURBO, AcquisitionRule, EfficientGlobalOptimization
from .data import Dataset
from .models import SupportsCovarianceWithTopFidelity, TrainableProbabilisticModel
from .observer import OBJECTIVE, Observer
from .space import SearchSpace
from .types import State, Tag, TensorType
from .utils import Err, Ok, Result, Timer
StateType = TypeVar("StateType")
""" Unbound type variable. """
SearchSpaceType = TypeVar("SearchSpaceType", bound=SearchSpace)
""" Type variable bound to :class:`SearchSpace`. """
TrainableProbabilisticModelType = TypeVar(
"TrainableProbabilisticModelType", bound=TrainableProbabilisticModel, contravariant=True
)
""" Contravariant type variable bound to :class:`TrainableProbabilisticModel`. """
EarlyStopCallback = Callable[
[Mapping[Tag, Dataset], Mapping[Tag, TrainableProbabilisticModelType], Optional[StateType]],
bool,
]
""" Early stop callback type, generic in the model and state types. """
@dataclass(frozen=True)
class Record(Generic[StateType]):
"""Container to record the state of each step of the optimization process."""
datasets: Mapping[Tag, Dataset]
""" The known data from the observer. """
models: Mapping[Tag, TrainableProbabilisticModel]
""" The models over the :attr:`datasets`. """
acquisition_state: StateType | None
""" The acquisition state. """
@property
def dataset(self) -> Dataset:
"""The dataset when there is just one dataset."""
if len(self.datasets) == 1:
return next(iter(self.datasets.values()))
else:
raise ValueError(f"Expected a single dataset, found {len(self.datasets)}")
@property
def model(self) -> TrainableProbabilisticModel:
"""The model when there is just one dataset."""
if len(self.models) == 1:
return next(iter(self.models.values()))
else:
raise ValueError(f"Expected a single model, found {len(self.models)}")
def save(self, path: Path | str) -> FrozenRecord[StateType]:
"""Save the record to disk. Will overwrite any existing file at the same path."""
Path(path).parent.mkdir(exist_ok=True, parents=True)
with open(path, "wb") as f:
dill.dump(self, f, dill.HIGHEST_PROTOCOL)
return FrozenRecord(Path(path))
@dataclass(frozen=True)
class FrozenRecord(Generic[StateType]):
"""
A Record container saved on disk.
Note that records are saved via pickling and are therefore neither portable nor secure.
Only open frozen records generated on the same system.
"""
path: Path
""" The path to the pickled Record. """
def load(self) -> Record[StateType]:
"""Load the record into memory."""
with open(self.path, "rb") as f:
return dill.load(f)
@property
def datasets(self) -> Mapping[Tag, Dataset]:
"""The known data from the observer."""
return self.load().datasets
@property
def models(self) -> Mapping[Tag, TrainableProbabilisticModel]:
"""The models over the :attr:`datasets`."""
return self.load().models
@property
def acquisition_state(self) -> StateType | None:
"""The acquisition state."""
return self.load().acquisition_state
@property
def dataset(self) -> Dataset:
"""The dataset when there is just one dataset."""
return self.load().dataset
@property
def model(self) -> TrainableProbabilisticModel:
"""The model when there is just one dataset."""
return self.load().model
# this should be a generic NamedTuple, but mypy doesn't support them
@dataclass(frozen=True)
class OptimizationResult(Generic[StateType]):
"""The final result, and the historical data of the optimization process."""
final_result: Result[Record[StateType]]
"""
The final result of the optimization process. This contains either a :class:`Record` or an
exception.
"""
history: list[Record[StateType] | FrozenRecord[StateType]]
r"""
The history of the :class:`Record`\ s from each step of the optimization process. These
:class:`Record`\ s are created at the *start* of each loop, and as such will never
include the :attr:`final_result`. The records may be either in memory or on disk.
"""
@staticmethod
def step_filename(step: int, num_steps: int) -> str:
"""Default filename for saved optimization steps."""
return f"step.{step:0{len(str(num_steps - 1))}d}.pickle"
STEP_GLOB: ClassVar[str] = "step.*.pickle"
RESULTS_FILENAME: ClassVar[str] = "results.pickle"
def astuple(
self,
) -> tuple[Result[Record[StateType]], list[Record[StateType] | FrozenRecord[StateType]]]:
"""
**Note:** In contrast to the standard library function :func:`dataclasses.astuple`, this
method does *not* deepcopy instance attributes.
:return: The :attr:`final_result` and :attr:`history` as a 2-tuple.
"""
return self.final_result, self.history
@property
def is_ok(self) -> bool:
"""`True` if the final result contains a :class:`Record`."""
return self.final_result.is_ok
@property
def is_err(self) -> bool:
"""`True` if the final result contains an exception."""
return self.final_result.is_err
def try_get_final_datasets(self) -> Mapping[Tag, Dataset]:
"""
Convenience method to attempt to get the final data.
:return: The final data, if the optimization completed successfully.
:raise Exception: If an exception occurred during optimization.
"""
return self.final_result.unwrap().datasets
def try_get_final_dataset(self) -> Dataset:
"""
Convenience method to attempt to get the final data for a single dataset run.
:return: The final data, if the optimization completed successfully.
:raise Exception: If an exception occurred during optimization.
:raise ValueError: If the optimization was not a single dataset run.
"""
datasets = self.try_get_final_datasets()
if len(datasets) == 1:
return next(iter(datasets.values()))
else:
raise ValueError(f"Expected a single dataset, found {len(datasets)}")
def try_get_optimal_point(self) -> tuple[TensorType, TensorType, TensorType]:
"""
Convenience method to attempt to get the optimal point for a single dataset,
single objective run.
:return: Tuple of the optimal query point, observation and its index.
"""
dataset = self.try_get_final_dataset()
if tf.rank(dataset.observations) != 2 or dataset.observations.shape[1] != 1:
raise ValueError("Expected a single objective")
if tf.reduce_any(
[
isinstance(model, SupportsCovarianceWithTopFidelity)
for model in self.try_get_final_models()
]
):
raise ValueError("Expected single fidelity models")
arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))
return dataset.query_points[arg_min_idx], dataset.observations[arg_min_idx], arg_min_idx
def try_get_final_models(self) -> Mapping[Tag, TrainableProbabilisticModel]:
"""
Convenience method to attempt to get the final models.
:return: The final models, if the optimization completed successfully.
:raise Exception: If an exception occurred during optimization.
"""
return self.final_result.unwrap().models
def try_get_final_model(self) -> TrainableProbabilisticModel:
"""
Convenience method to attempt to get the final model for a single model run.
:return: The final model, if the optimization completed successfully.
:raise Exception: If an exception occurred during optimization.
:raise ValueError: If the optimization was not a single model run.
"""
models = self.try_get_final_models()
if len(models) == 1:
return next(iter(models.values()))
else:
raise ValueError(f"Expected single model, found {len(models)}")
@property
def loaded_history(self) -> list[Record[StateType]]:
"""The history of the optimization process loaded into memory."""
return [record if isinstance(record, Record) else record.load() for record in self.history]
def save_result(self, path: Path | str) -> None:
"""Save the final result to disk. Will overwrite any existing file at the same path."""
Path(path).parent.mkdir(exist_ok=True, parents=True)
with open(path, "wb") as f:
dill.dump(self.final_result, f, dill.HIGHEST_PROTOCOL)
def save(self, base_path: Path | str) -> None:
"""Save the optimization result to disk. Will overwrite existing files at the same path."""
path = Path(base_path)
num_steps = len(self.history)
self.save_result(path / self.RESULTS_FILENAME)
for i, record in enumerate(self.loaded_history):
record_path = path / self.step_filename(i, num_steps)
record.save(record_path)
@classmethod
def from_path(cls, base_path: Path | str) -> OptimizationResult[StateType]:
"""Load a previously saved OptimizationResult."""
try:
with open(Path(base_path) / cls.RESULTS_FILENAME, "rb") as f:
result = dill.load(f)
except FileNotFoundError as e:
result = Err(e)
history: list[Record[StateType] | FrozenRecord[StateType]] = [
FrozenRecord(file) for file in sorted(Path(base_path).glob(cls.STEP_GLOB))
]
return cls(result, history)
class BayesianOptimizer(Generic[SearchSpaceType]):
"""
This class performs Bayesian optimization, the data-efficient optimization of an expensive
black-box *objective function* over some *search space*. Since we may not have access to the
objective function itself, we speak instead of an *observer* that observes it.
"""
def __init__(self, observer: Observer, search_space: SearchSpaceType):
"""
:param observer: The observer of the objective function.
:param search_space: The space over which to search. Must be a
:class:`~trieste.space.SearchSpace`.
"""
self._observer = observer
self._search_space = search_space
def __repr__(self) -> str:
return f"BayesianOptimizer({self._observer!r}, {self._search_space!r})"
@overload
def optimize(
self,
num_steps: int,
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModel],
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModel, object]
] = None,
start_step: int = 0,
) -> OptimizationResult[None]:
...
@overload
def optimize(
self,
num_steps: int,
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModelType],
acquisition_rule: AcquisitionRule[
TensorType, SearchSpaceType, TrainableProbabilisticModelType
],
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModelType, object]
] = None,
start_step: int = 0,
# this should really be OptimizationResult[None], but tf.Tensor is untyped so the type
# checker can't differentiate between TensorType and State[S | None, TensorType], and
# the return types clash. object is close enough to None that object will do.
) -> OptimizationResult[object]:
...
@overload
def optimize(
self,
num_steps: int,
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModelType],
acquisition_rule: AcquisitionRule[
TensorType, SearchSpaceType, TrainableProbabilisticModelType
],
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModelType, object]
] = None,
start_step: int = 0,
) -> OptimizationResult[object]:
...
@overload
def optimize(
self,
num_steps: int,
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModelType],
acquisition_rule: AcquisitionRule[
State[StateType | None, TensorType], SearchSpaceType, TrainableProbabilisticModelType
],
acquisition_state: StateType | None = None,
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModelType, StateType]
] = None,
start_step: int = 0,
) -> OptimizationResult[StateType]:
...
@overload
def optimize(
self,
num_steps: int,
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModelType],
acquisition_rule: AcquisitionRule[
State[StateType | None, TensorType], SearchSpaceType, TrainableProbabilisticModelType
],
acquisition_state: StateType | None = None,
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModelType, StateType]
] = None,
start_step: int = 0,
) -> OptimizationResult[StateType]:
...
@overload
def optimize(
self,
num_steps: int,
datasets: Dataset,
models: TrainableProbabilisticModel,
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModel, object]
] = None,
start_step: int = 0,
) -> OptimizationResult[None]:
...
@overload
def optimize(
self,
num_steps: int,
datasets: Dataset,
models: TrainableProbabilisticModelType,
acquisition_rule: AcquisitionRule[
TensorType, SearchSpaceType, TrainableProbabilisticModelType
],
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModelType, object]
] = None,
start_step: int = 0,
) -> OptimizationResult[object]:
...
@overload
def optimize(
self,
num_steps: int,
datasets: Dataset,
models: TrainableProbabilisticModelType,
acquisition_rule: AcquisitionRule[
TensorType, SearchSpaceType, TrainableProbabilisticModelType
],
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModelType, object]
] = None,
start_step: int = 0,
) -> OptimizationResult[object]:
...
@overload
def optimize(
self,
num_steps: int,
datasets: Dataset,
models: TrainableProbabilisticModelType,
acquisition_rule: AcquisitionRule[
State[StateType | None, TensorType], SearchSpaceType, TrainableProbabilisticModelType
],
acquisition_state: StateType | None = None,
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModelType, StateType]
] = None,
start_step: int = 0,
) -> OptimizationResult[StateType]:
...
@overload
def optimize(
self,
num_steps: int,
datasets: Dataset,
models: TrainableProbabilisticModelType,
acquisition_rule: AcquisitionRule[
State[StateType | None, TensorType], SearchSpaceType, TrainableProbabilisticModelType
],
acquisition_state: StateType | None = None,
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModelType, StateType]
] = None,
start_step: int = 0,
) -> OptimizationResult[StateType]:
...
def optimize(
self,
num_steps: int,
datasets: Mapping[Tag, Dataset] | Dataset,
models: Mapping[Tag, TrainableProbabilisticModelType] | TrainableProbabilisticModelType,
acquisition_rule: AcquisitionRule[
TensorType | State[StateType | None, TensorType],
SearchSpaceType,
TrainableProbabilisticModelType,
]
| None = None,
acquisition_state: StateType | None = None,
*,
track_state: bool = True,
track_path: Optional[Path | str] = None,
fit_model: bool = True,
fit_initial_model: bool = True,
early_stop_callback: Optional[
EarlyStopCallback[TrainableProbabilisticModelType, StateType]
] = None,
start_step: int = 0,
) -> OptimizationResult[StateType] | OptimizationResult[None]:
"""
Attempt to find the minimizer of the ``observer`` in the ``search_space`` (both specified at
:meth:`__init__`). This is the central implementation of the Bayesian optimization loop.
For each step in ``num_steps``, this method:
- Finds the next points with which to query the ``observer`` using the
``acquisition_rule``'s :meth:`acquire` method, passing it the ``search_space``,
``datasets``, ``models``, and current acquisition state.
- Queries the ``observer`` *once* at those points.
- Updates the datasets and models with the data from the ``observer``.
If any errors are raised during the optimization loop, this method will catch and return
them instead and print a message (using `absl` at level `absl.logging.ERROR`).
If ``track_state`` is enabled, then in addition to the final result, the history of the
optimization process will also be returned. If ``track_path`` is also set, then
the history and final result will be saved to disk rather than all being kept in memory.
**Type hints:**
- The ``acquisition_rule`` must use the same type of
:class:`~trieste.space.SearchSpace` as specified in :meth:`__init__`.
- The ``acquisition_state`` must be of the type expected by the ``acquisition_rule``.
Any acquisition state in the optimization result will also be of this type.
:param num_steps: The number of optimization steps to run.
:param datasets: The known observer query points and observations for each tag.
:param models: The model to use for each :class:`~trieste.data.Dataset` in
``datasets``.
:param acquisition_rule: The acquisition rule, which defines how to search for a new point
on each optimization step. Defaults to
:class:`~trieste.acquisition.rule.EfficientGlobalOptimization` with default
arguments. Note that if the default is used, this implies the tags must be
`OBJECTIVE`, the search space can be any :class:`~trieste.space.SearchSpace`, and the
acquisition state returned in the :class:`OptimizationResult` will be `None`.
:param acquisition_state: The acquisition state to use on the first optimization step.
This argument allows the caller to restore the optimization process from an existing
:class:`Record`.
:param track_state: If `True`, this method saves the optimization state at the start of each
step. Models and acquisition state are copied using `copy.deepcopy`.
:param track_path: If set, the optimization state is saved to disk at this path,
rather than being copied in memory.
:param fit_model: If `False` then we never fit the model during BO (e.g. if we
are using a rule that doesn't rely on the models and don't want to waste computation).
:param fit_initial_model: If `False` then we assume that the initial models have
already been optimized on the datasets and so do not require optimization before
the first optimization step.
:param early_stop_callback: An optional callback that is evaluated with the current
datasets, models and optimization state before every optimization step. If this
returns `True` then the optimization loop is terminated early.
:param start_step: The step number to start with. This number is removed from ``num_steps``
and is useful for restarting previous computations.
:return: An :class:`OptimizationResult`. The :attr:`final_result` element contains either
the final optimization data, models and acquisition state, or, if an exception was
raised while executing the optimization loop, it contains the exception raised. In
either case, the :attr:`history` element is the history of the data, models and
acquisition state at the *start* of each optimization step (up to and including any step
that fails to complete). The history will never include the final optimization result.
:raise ValueError: If any of the following are true:
- ``num_steps`` is negative.
- the keys in ``datasets`` and ``models`` do not match
- ``datasets`` or ``models`` are empty
- the default `acquisition_rule` is used and the tags are not `OBJECTIVE`.
"""
if isinstance(datasets, Dataset):
datasets = {OBJECTIVE: datasets}
models = {OBJECTIVE: models} # type: ignore[dict-item]
# reassure the type checker that everything is tagged
datasets = cast(Dict[Tag, Dataset], datasets)
models = cast(Dict[Tag, TrainableProbabilisticModelType], models)
if num_steps < 0:
raise ValueError(f"num_steps must be at least 0, got {num_steps}")
if datasets.keys() != models.keys():
raise ValueError(
f"datasets and models should contain the same keys. Got {datasets.keys()} and"
f" {models.keys()} respectively."
)
if not datasets:
raise ValueError("dicts of datasets and models must be populated.")
if fit_model and isinstance(acquisition_rule, TURBO):
warnings.warn(
"""
Are you sure you want to keep fitting the global model even though you
are using TURBO which has only local models? This is a waste of computation.
Consider setting 'fit_model'='False'.
"""
)
if acquisition_rule is None:
if datasets.keys() != {OBJECTIVE}:
raise ValueError(
f"Default acquisition rule EfficientGlobalOptimization requires tag"
f" {OBJECTIVE!r}, got keys {datasets.keys()}"
)
acquisition_rule = EfficientGlobalOptimization[
SearchSpaceType, TrainableProbabilisticModelType
]()
history: list[FrozenRecord[StateType] | Record[StateType]] = []
query_plot_dfs: dict[int, pd.DataFrame] = {}
observation_plot_dfs = observation_plot_init(datasets)
summary_writer = logging.get_tensorboard_writer()
if summary_writer:
with summary_writer.as_default(step=0):
write_summary_init(
self._observer,
self._search_space,
acquisition_rule,
datasets,
models,
num_steps,
)
for step in range(start_step + 1, num_steps + 1):
logging.set_step_number(step)
if early_stop_callback and early_stop_callback(datasets, models, acquisition_state):
tf.print("Optimization terminated early", output_stream=absl.logging.INFO)
break
try:
if track_state:
try:
if track_path is None:
datasets_copy = copy.deepcopy(datasets)
models_copy = copy.deepcopy(models)
acquisition_state_copy = copy.deepcopy(acquisition_state)
record = Record(datasets_copy, models_copy, acquisition_state_copy)
history.append(record)
else:
track_path = Path(track_path)
record = Record(datasets, models, acquisition_state)
file_name = OptimizationResult.step_filename(step, num_steps)
history.append(record.save(track_path / file_name))
except Exception as e:
raise NotImplementedError(
"Failed to save the optimization state. Some models do not support "
"deecopying or serialization and cannot be saved. "
"(This is particularly common for deep neural network models, though "
"some of the model wrappers accept a model closure as a workaround.) "
"For these models, the `track_state`` argument of the "
":meth:`~trieste.bayesian_optimizer.BayesianOptimizer.optimize` method "
"should be set to `False`. This means that only the final model "
"will be available."
) from e
if step == 1 and fit_model and fit_initial_model:
with Timer() as initial_model_fitting_timer:
for tag, model in models.items():
dataset = datasets[tag]
model.update(dataset)
model.optimize(dataset)
if summary_writer:
logging.set_step_number(0)
with summary_writer.as_default(step=0):
write_summary_initial_model_fit(
datasets, models, initial_model_fitting_timer
)
logging.set_step_number(step)
with Timer() as total_step_wallclock_timer:
with Timer() as query_point_generation_timer:
points_or_stateful = acquisition_rule.acquire(
self._search_space, models, datasets=datasets
)
if callable(points_or_stateful):
acquisition_state, query_points = points_or_stateful(acquisition_state)
else:
query_points = points_or_stateful
observer_output = self._observer(query_points)
tagged_output = (
observer_output
if isinstance(observer_output, Mapping)
else {OBJECTIVE: observer_output}
)
datasets = {tag: datasets[tag] + tagged_output[tag] for tag in tagged_output}
with Timer() as model_fitting_timer:
if fit_model:
for tag, model in models.items():
dataset = datasets[tag]
model.update(dataset)
model.optimize(dataset)
if summary_writer:
with summary_writer.as_default(step=step):
write_summary_observations(
datasets,
models,
tagged_output,
model_fitting_timer,
observation_plot_dfs,
)
write_summary_query_points(
datasets,
models,
self._search_space,
query_points,
query_point_generation_timer,
query_plot_dfs,
)
logging.scalar("wallclock/step", total_step_wallclock_timer.time)
except Exception as error: # pylint: disable=broad-except
tf.print(
f"\nOptimization failed at step {step}, encountered error with traceback:"
f"\n{traceback.format_exc()}"
f"\nTerminating optimization and returning the optimization history. You may "
f"be able to use the history to restart the process from a previous successful "
f"optimization step.\n",
output_stream=absl.logging.ERROR,
)
if isinstance(error, MemoryError):
tf.print(
"\nOne possible cause of memory errors is trying to evaluate acquisition "
"\nfunctions over large datasets, e.g. when initializing optimizers. "
"\nYou may be able to word around this by splitting up the evaluation "
"\nusing split_acquisition_function or split_acquisition_function_calls.",
output_stream=absl.logging.ERROR,
)
result = OptimizationResult(Err(error), history)
if track_state and track_path is not None:
result.save_result(Path(track_path) / OptimizationResult.RESULTS_FILENAME)
return result
tf.print("Optimization completed without errors", output_stream=absl.logging.INFO)
record = Record(datasets, models, acquisition_state)
result = OptimizationResult(Ok(record), history)
if track_state and track_path is not None:
result.save_result(Path(track_path) / OptimizationResult.RESULTS_FILENAME)
return result
def continue_optimization(
self,
num_steps: int,
optimization_result: OptimizationResult[StateType],
*args: Any,
**kwargs: Any,
) -> OptimizationResult[StateType]:
"""
Continue a previous optimization that either failed, was terminated early, or which
you simply wish to run for more steps.
:param num_steps: The total number of optimization steps, including any that have already
been run.
:param optimization_result: The optimization result from which to extract the datasets,
models and acquisition state. If the result was successful then the final result is
used; otherwise the last record in the history is used. The size of the history
is used to determine how many more steps are required.
:param args: Any more positional arguments to pass on to optimize.
:param kwargs: Any more keyword arguments to pass on to optimize.
:return: An :class:`OptimizationResult`. The history will contain both the history from
`optimization_result` (including the `final_result` if that was successful) and
any new records.
"""
history: list[Record[StateType] | FrozenRecord[StateType]] = []
history.extend(optimization_result.history)
if optimization_result.final_result.is_ok:
history.append(optimization_result.final_result.unwrap())
if not history:
raise ValueError("Cannot continue from empty optimization result")
result = self.optimize( # type: ignore[call-overload]
num_steps,
history[-1].datasets,
history[-1].models,
*args,
acquisition_state=history[-1].acquisition_state,
**kwargs,
start_step=len(history) - 1,
)
result.history[:1] = history
return result
def write_summary_init(
observer: Observer,
search_space: SearchSpace,
acquisition_rule: AcquisitionRule[
TensorType | State[StateType | None, TensorType],
SearchSpaceType,
TrainableProbabilisticModelType,
],
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModel],
num_steps: int,
) -> None:
"""Write initial BO loop TensorBoard summary."""
devices = tf.config.list_logical_devices()
logging.text(
"metadata",
f"Observer: `{observer}`\n\n"
f"Number of steps: `{num_steps}`\n\n"
f"Number of initial points: "
f"`{dict((k, len(v)) for k, v in datasets.items())}`\n\n"
f"Search Space: `{search_space}`\n\n"
f"Acquisition rule:\n\n {acquisition_rule}\n\n"
f"Models:\n\n {models}\n\n"
f"Available devices: `{dict(Counter(d.device_type for d in devices))}`",
)
def write_summary_initial_model_fit(
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModel],
model_fitting_timer: Timer,
) -> None:
"""Write TensorBoard summary for the model fitting to the initial data."""
for tag, model in models.items():
with tf.name_scope(f"{tag}.model"):
model.log(datasets[tag])
logging.scalar(
"wallclock/model_fitting",
model_fitting_timer.time,
)
def observation_plot_init(
datasets: Mapping[Tag, Dataset],
) -> dict[Tag, pd.DataFrame]:
"""Initialise query point pairplot dataframes with initial observations.
Also logs warnings if pairplot dependencies are not installed."""
observation_plot_dfs: dict[Tag, pd.DataFrame] = {}
if logging.get_tensorboard_writer():
seaborn_warning = False
if logging.include_summary("query_points/_pairplot") and not (pd and sns):
seaborn_warning = True
for tag in datasets:
if logging.include_summary(f"{tag}.observations/_pairplot"):
output_dim = tf.shape(datasets[tag].observations)[-1]
if output_dim >= 2:
if not (pd and sns):
seaborn_warning = True
else:
columns = [f"x{i}" for i in range(output_dim)]
observation_plot_dfs[tag] = pd.DataFrame(
datasets[tag].observations, columns=columns
).applymap(float)
observation_plot_dfs[tag]["observations"] = "initial"
if seaborn_warning:
tf.print(
"\nPairplot TensorBoard summaries require seaborn to be installed."
"\nOne way to do this is to install 'trieste[plotting]'.",
output_stream=absl.logging.INFO,
)
return observation_plot_dfs
def write_summary_observations(
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModel],
tagged_output: Mapping[Tag, TensorType],
model_fitting_timer: Timer,
observation_plot_dfs: MutableMapping[Tag, pd.DataFrame],
) -> None:
"""Write TensorBoard summary for the current step observations."""
for tag in datasets:
with tf.name_scope(f"{tag}.model"):
models[tag].log(datasets[tag])
output_dim = tf.shape(tagged_output[tag].observations)[-1]
for i in tf.range(output_dim):
suffix = f"[{i}]" if output_dim > 1 else ""
if tf.size(tagged_output[tag].observations) > 0:
logging.histogram(
f"{tag}.observation{suffix}/new_observations",
tagged_output[tag].observations[..., i],
)
logging.scalar(
f"{tag}.observation{suffix}/best_new_observation",
np.min(tagged_output[tag].observations[..., i]),
)
if tf.size(datasets[tag].observations) > 0:
logging.scalar(
f"{tag}.observation{suffix}/best_overall",
np.min(datasets[tag].observations[..., i]),
)
if logging.include_summary(f"{tag}.observations/_pairplot") and (
pd and sns and output_dim >= 2
):
columns = [f"x{i}" for i in range(output_dim)]
observation_new_df = pd.DataFrame(
tagged_output[tag].observations, columns=columns
).applymap(float)
observation_new_df["observations"] = "new"
observation_plot_df = pd.concat(
(observation_plot_dfs.get(tag), observation_new_df),
copy=False,
ignore_index=True,
)
hue_order = ["initial", "old", "new"]
palette = {"initial": "tab:green", "old": "tab:green", "new": "tab:orange"}
markers = {"initial": "X", "old": "o", "new": "o"}
# assume that any OBJECTIVE- or single-tagged multi-output dataset => multi-objective
# more complex scenarios (e.g. constrained data) need to be plotted by the acq function
if len(datasets) > 1 and tag != OBJECTIVE:
observation_plot_df["observation type"] = observation_plot_df.apply(
lambda x: x["observations"],
axis=1,
)
else:
observation_plot_df["pareto"] = non_dominated(datasets[tag].observations)[1]
observation_plot_df["observation type"] = observation_plot_df.apply(
lambda x: x["observations"] + x["pareto"] * " (non-dominated)",
axis=1,
)
hue_order += [hue + " (non-dominated)" for hue in hue_order]
palette.update(
{
"initial (non-dominated)": "tab:purple",
"old (non-dominated)": "tab:purple",
"new (non-dominated)": "tab:red",
}
)
markers.update(
{
"initial (non-dominated)": "X",
"old (non-dominated)": "o",
"new (non-dominated)": "o",
}
)
pairplot = sns.pairplot(
observation_plot_df,
vars=columns,
hue="observation type",
hue_order=hue_order,
palette=palette,
markers=markers,
)
logging.pyplot(f"{tag}.observations/_pairplot", pairplot.fig)
observation_plot_df.loc[
observation_plot_df["observations"] == "new", "observations"
] = "old"
observation_plot_dfs[tag] = observation_plot_df
logging.scalar(
"wallclock/model_fitting",
model_fitting_timer.time,
)
def write_summary_query_points(
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModel],
search_space: SearchSpace,
query_points: TensorType,
query_point_generation_timer: Timer,
query_plot_dfs: MutableMapping[int, pd.DataFrame],
) -> None:
"""Write TensorBoard summary for the current step query points."""
if tf.rank(query_points) == 2:
for i in tf.range(tf.shape(query_points)[1]):
if len(query_points) == 1:
logging.scalar(f"query_point/[{i}]", float(query_points[0, i]))
else:
logging.histogram(f"query_points/[{i}]", query_points[:, i])
logging.histogram("query_points/euclidean_distances", lambda: pdist(query_points))
if pd and sns and logging.include_summary("query_points/_pairplot"):
columns = [f"x{i}" for i in range(tf.shape(query_points)[1])]
qp_preds = query_points
for tag in datasets:
pred = models[tag].predict(query_points)[0]
qp_preds = tf.concat([qp_preds, tf.cast(pred, query_points.dtype)], 1)
output_dim = tf.shape(pred)[-1]
for i in range(output_dim):
columns.append(f"{tag}{i if (output_dim > 1) else ''} predicted")
query_new_df = pd.DataFrame(qp_preds, columns=columns).applymap(float)
query_new_df["query points"] = "new"
query_plot_df = pd.concat(
(query_plot_dfs.get(0), query_new_df), copy=False, ignore_index=True
)
pairplot = sns.pairplot(
query_plot_df, hue="query points", hue_order=["old", "new"], height=2.25
)
padding = 0.025 * (search_space.upper - search_space.lower)
upper_limits = search_space.upper + padding
lower_limits = search_space.lower - padding
for i in range(search_space.dimension):
pairplot.axes[0, i].set_xlim((lower_limits[i], upper_limits[i]))
pairplot.axes[i, 0].set_ylim((lower_limits[i], upper_limits[i]))
logging.pyplot("query_points/_pairplot", pairplot.fig)
query_plot_df["query points"] = "old"
query_plot_dfs[0] = query_plot_df
logging.scalar(
"wallclock/query_point_generation",
query_point_generation_timer.time,
)
def stop_at_minimum(
minimum: Optional[tf.Tensor] = None,
minimizers: Optional[tf.Tensor] = None,
minimum_atol: float = 0,
minimum_rtol: float = 0.05,
minimizers_atol: float = 0,
minimizers_rtol: float = 0.05,
objective_tag: Tag = OBJECTIVE,
minimum_step_number: Optional[int] = None,
) -> EarlyStopCallback[TrainableProbabilisticModel, object]:
"""
Generate an early stop function that terminates a BO loop when it gets close enough to the
given objective minimum and/or minimizer points.
:param minimum: Optional minimum to stop at, with shape [1].
:param minimizers: Optional minimizer points to stop at, with shape [N, D].
:param minimum_atol: Absolute tolerance for minimum.
:param minimum_rtol: Relative tolerance for minimum.
:param minimizers_atol: Absolute tolerance for minimizer point.
:param minimizers_rtol: Relative tolerance for minimizer point.
:param objective_tag: The tag for the objective data.
:param minimum_step_number: Minimum step number to stop at.
:return: An early stop function that terminates if we get close enough to both the minimum
and any of the minimizer points.
"""
def early_stop_callback(
datasets: Mapping[Tag, Dataset],
_models: Mapping[Tag, TrainableProbabilisticModel],
_acquisition_state: object,
) -> bool:
if minimum_step_number is not None and logging.get_step_number() < minimum_step_number:
return False
dataset = datasets[objective_tag]
arg_min_idx = tf.squeeze(tf.argmin(dataset.observations, axis=0))
if minimum is not None:
best_y = dataset.observations[arg_min_idx]
close_y = np.isclose(best_y, minimum, atol=minimum_atol, rtol=minimum_rtol)
if not tf.reduce_all(close_y):
return False
if minimizers is not None:
best_x = dataset.query_points[arg_min_idx]
close_x = np.isclose(best_x, minimizers, atol=minimizers_atol, rtol=minimizers_rtol)
if not tf.reduce_any(tf.reduce_all(close_x, axis=-1), axis=0):
return False
return True
return early_stop_callback
| 46,324 | 40.39857 | 100 | py |
trieste-develop | trieste-develop/trieste/ask_tell_optimization.py |
"""
This module contains the Ask/Tell API for users of Trieste who would like to
perform Bayesian Optimization with external control of the optimization loop.
"""
from __future__ import annotations
from copy import deepcopy
from typing import Dict, Generic, Mapping, TypeVar, cast, overload
try:
import pandas as pd
except ModuleNotFoundError:
pd = None
import warnings
from . import logging
from .acquisition.rule import TURBO, AcquisitionRule, EfficientGlobalOptimization
from .bayesian_optimizer import (
FrozenRecord,
OptimizationResult,
Record,
observation_plot_init,
write_summary_initial_model_fit,
write_summary_observations,
write_summary_query_points,
)
from .data import Dataset
from .models import TrainableProbabilisticModel
from .observer import OBJECTIVE
from .space import SearchSpace
from .types import State, Tag, TensorType
from .utils import Ok, Timer
StateType = TypeVar("StateType")
""" Unbound type variable. """
SearchSpaceType = TypeVar("SearchSpaceType", bound=SearchSpace)
""" Type variable bound to :class:`SearchSpace`. """
TrainableProbabilisticModelType = TypeVar(
"TrainableProbabilisticModelType", bound=TrainableProbabilisticModel, contravariant=True
)
""" Contravariant type variable bound to :class:`TrainableProbabilisticModel`. """
class AskTellOptimizer(Generic[SearchSpaceType, TrainableProbabilisticModelType]):
"""
This class provides Ask/Tell optimization interface. It is designed for those use cases
when control of the optimization loop by Trieste is impossible or not desirable.
For more details about the Bayesian Optimization routine, refer to :class:`BayesianOptimizer`.
"""
@overload
def __init__(
self,
search_space: SearchSpaceType,
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModelType],
*,
fit_model: bool = True,
):
...
@overload
def __init__(
self,
search_space: SearchSpaceType,
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModelType],
acquisition_rule: AcquisitionRule[
TensorType, SearchSpaceType, TrainableProbabilisticModelType
],
*,
fit_model: bool = True,
):
...
@overload
def __init__(
self,
search_space: SearchSpaceType,
datasets: Mapping[Tag, Dataset],
models: Mapping[Tag, TrainableProbabilisticModelType],
acquisition_rule: AcquisitionRule[
State[StateType | None, TensorType], SearchSpaceType, TrainableProbabilisticModelType
],
acquisition_state: StateType | None,
*,
fit_model: bool = True,
):
...
@overload
def __init__(
self,
search_space: SearchSpaceType,
datasets: Dataset,
models: TrainableProbabilisticModelType,
*,
fit_model: bool = True,
):
...
@overload
def __init__(
self,
search_space: SearchSpaceType,
datasets: Dataset,
models: TrainableProbabilisticModelType,
acquisition_rule: AcquisitionRule[
TensorType, SearchSpaceType, TrainableProbabilisticModelType
],
*,
fit_model: bool = True,
):
...
@overload
def __init__(
self,
search_space: SearchSpaceType,
datasets: Dataset,
models: TrainableProbabilisticModelType,
acquisition_rule: AcquisitionRule[
State[StateType | None, TensorType], SearchSpaceType, TrainableProbabilisticModelType
],
acquisition_state: StateType | None = None,
*,
fit_model: bool = True,
):
...
def __init__(
self,
search_space: SearchSpaceType,
datasets: Mapping[Tag, Dataset] | Dataset,
models: Mapping[Tag, TrainableProbabilisticModelType] | TrainableProbabilisticModelType,
acquisition_rule: AcquisitionRule[
TensorType | State[StateType | None, TensorType],
SearchSpaceType,
TrainableProbabilisticModelType,
]
| None = None,
acquisition_state: StateType | None = None,
*,
fit_model: bool = True,
):
"""
:param search_space: The space over which to search for the next query point.
:param datasets: Already observed input-output pairs for each tag.
:param models: The model to use for each :class:`~trieste.data.Dataset` in
``datasets``.
:param acquisition_rule: The acquisition rule, which defines how to search for a new point
on each optimization step. Defaults to
:class:`~trieste.acquisition.rule.EfficientGlobalOptimization` with default
arguments. Note that if the default is used, this implies the tags must be
`OBJECTIVE` and the search space can be any :class:`~trieste.space.SearchSpace`.
:param acquisition_state: The optional acquisition state for stateful acquisitions.
:param fit_model: If `True` (default), models passed in will be optimized on the given data.
If `False`, the models are assumed to be optimized already.
:raise ValueError: If any of the following are true:
- the keys in ``datasets`` and ``models`` do not match
- ``datasets`` or ``models`` are empty
- default acquisition is used but incompatible with other inputs
"""
self._search_space = search_space
self._acquisition_state = acquisition_state
if not datasets or not models:
raise ValueError("dicts of datasets and models must be populated.")
if isinstance(datasets, Dataset):
datasets = {OBJECTIVE: datasets}
models = {OBJECTIVE: models} # type: ignore[dict-item]
# reassure the type checker that everything is tagged
datasets = cast(Dict[Tag, Dataset], datasets)
models = cast(Dict[Tag, TrainableProbabilisticModelType], models)
if datasets.keys() != models.keys():
raise ValueError(
f"datasets and models should contain the same keys. Got {datasets.keys()} and"
f" {models.keys()} respectively."
)
self._datasets = datasets
self._models = models
self._query_plot_dfs: dict[int, pd.DataFrame] = {}
self._observation_plot_dfs = observation_plot_init(self._datasets)
if acquisition_rule is None:
if self._datasets.keys() != {OBJECTIVE}:
raise ValueError(
f"Default acquisition rule EfficientGlobalOptimization requires tag"
f" {OBJECTIVE!r}, got keys {self._datasets.keys()}"
)
self._acquisition_rule = cast(
AcquisitionRule[TensorType, SearchSpaceType, TrainableProbabilisticModelType],
EfficientGlobalOptimization(),
)
else:
self._acquisition_rule = acquisition_rule
if (fit_model) and isinstance(acquisition_rule, TURBO):
warnings.warn(
"""
Are you sure you want to keep fitting the global model even though you
are using TURBO which uses local models? This is a waste of computation.
"""
)
if fit_model:
with Timer() as initial_model_fitting_timer:
for tag, model in self._models.items():
dataset = datasets[tag]
model.update(dataset)
model.optimize(dataset)
summary_writer = logging.get_tensorboard_writer()
if summary_writer:
with summary_writer.as_default(step=logging.get_step_number()):
write_summary_initial_model_fit(
self._datasets, self._models, initial_model_fitting_timer
)
def __repr__(self) -> str:
"""Print-friendly string representation"""
return f"""AskTellOptimizer({self._search_space!r}, {self._datasets!r},
{self._models!r}, {self._acquisition_rule!r}), "
{self._acquisition_state!r}"""
@property
def datasets(self) -> Mapping[Tag, Dataset]:
"""The current datasets."""
return self._datasets
@property
def dataset(self) -> Dataset:
"""The current dataset when there is just one dataset."""
if len(self.datasets) == 1:
return next(iter(self.datasets.values()))
else:
raise ValueError(f"Expected a single dataset, found {len(self.datasets)}")
@property
def models(self) -> Mapping[Tag, TrainableProbabilisticModelType]:
"""The current models."""
return self._models
@models.setter
def models(self, models: Mapping[Tag, TrainableProbabilisticModelType]) -> None:
"""Update the current models."""
if models.keys() != self.models.keys():
raise ValueError(
f"New models contain incorrect keys. Expected {self.models.keys()}, "
f"received {models.keys()}."
)
self._models = dict(models)
@property
def model(self) -> TrainableProbabilisticModel:
"""The current model when there is just one model."""
if len(self.models) == 1:
return next(iter(self.models.values()))
else:
raise ValueError(f"Expected a single model, found {len(self.models)}")
@model.setter
def model(self, model: TrainableProbabilisticModelType) -> None:
"""Update the current model, using the OBJECTIVE tag."""
if len(self.models) != 1:
raise ValueError(f"Expected a single model, found {len(self.models)}")
elif self.models.keys() != {OBJECTIVE}:
raise ValueError(
f"Expected a single model tagged OBJECTIVE, found {self.models.keys()}. "
"To update this, pass in a dictionary to the models property instead."
)
self._models = {OBJECTIVE: model}
@property
def acquisition_state(self) -> StateType | None:
"""The current acquisition state."""
return self._acquisition_state
@classmethod
def from_record(
cls,
record: Record[StateType] | FrozenRecord[StateType],
search_space: SearchSpaceType,
acquisition_rule: AcquisitionRule[
TensorType | State[StateType | None, TensorType],
SearchSpaceType,
TrainableProbabilisticModelType,
]
| None = None,
) -> AskTellOptimizer[SearchSpaceType, TrainableProbabilisticModelType]:
"""Creates new :class:`~AskTellOptimizer` instance from provided optimization state.
Model training isn't triggered upon creation of the instance.
:param record: Optimization state record.
:param search_space: The space over which to search for the next query point.
:param acquisition_rule: The acquisition rule, which defines how to search for a new point
on each optimization step. Defaults to
:class:`~trieste.acquisition.rule.EfficientGlobalOptimization` with default
arguments.
:return: New instance of :class:`~AskTellOptimizer`.
"""
# we are recovering previously saved optimization state
# so the model was already trained
# thus there is no need to train it again
# type ignore below is due to the fact that overloads don't allow
# optional acquisition_rule along with acquisition_state
return cls(
search_space,
record.datasets,
cast(Mapping[Tag, TrainableProbabilisticModelType], record.models),
acquisition_rule=acquisition_rule, # type: ignore
acquisition_state=record.acquisition_state,
fit_model=False,
)
def to_record(self, copy: bool = True) -> Record[StateType]:
"""Collects the current state of the optimization, which includes datasets,
models and acquisition state (if applicable).
:param copy: Whether to return a copy of the current state or the original. Copying
is not supported for all model types. However, continuing the optimization will
modify the original state.
:return: An optimization state record.
"""
try:
datasets_copy = deepcopy(self._datasets) if copy else self._datasets
models_copy = deepcopy(self._models) if copy else self._models
state_copy = deepcopy(self._acquisition_state) if copy else self._acquisition_state
except Exception as e:
raise NotImplementedError(
"Failed to copy the optimization state. Some models do not support "
"deecopying (this is particularly common for deep neural network models). "
"For these models, the `copy` argument of the `to_record` or `to_result` "
"methods should be set to `False`. This means that the returned state may be "
"modified by subsequent optimization."
) from e
return Record(datasets=datasets_copy, models=models_copy, acquisition_state=state_copy)
def to_result(self, copy: bool = True) -> OptimizationResult[StateType]:
"""Converts current state of the optimization
into a :class:`~trieste.data.OptimizationResult` object.
:param copy: Whether to return a copy of the current state or the original. Copying
is not supported for all model types. However, continuing the optimization will
modify the original state.
:return: A :class:`~trieste.data.OptimizationResult` object.
"""
record: Record[StateType] = self.to_record(copy=copy)
return OptimizationResult(Ok(record), [])
def ask(self) -> TensorType:
"""Suggests a point (or points in batch mode) to observe by optimizing the acquisition
function. If the acquisition is stateful, its state is saved.
:return: A :class:`TensorType` instance representing suggested point(s).
"""
# This trick deserves a comment to explain what's going on
# acquisition_rule.acquire can return different things:
# - when acquisition has no state attached, it returns just points
# - when acquisition has state, it returns a Callable
# which, when called, returns state and points
# so code below is needed to cater for both cases
with Timer() as query_point_generation_timer:
points_or_stateful = self._acquisition_rule.acquire(
self._search_space, self._models, datasets=self._datasets
)
if callable(points_or_stateful):
self._acquisition_state, query_points = points_or_stateful(self._acquisition_state)
else:
query_points = points_or_stateful
summary_writer = logging.get_tensorboard_writer()
if summary_writer:
with summary_writer.as_default(step=logging.get_step_number()):
write_summary_query_points(
self._datasets,
self._models,
self._search_space,
query_points,
query_point_generation_timer,
self._query_plot_dfs,
)
return query_points
def tell(self, new_data: Mapping[Tag, Dataset] | Dataset) -> None:
"""Updates optimizer state with new data.
:param new_data: New observed data.
:raise ValueError: If keys in ``new_data`` do not match those in already built dataset.
"""
if isinstance(new_data, Dataset):
new_data = {OBJECTIVE: new_data}
if self._datasets.keys() != new_data.keys():
raise ValueError(
f"new_data keys {new_data.keys()} doesn't "
f"match dataset keys {self._datasets.keys()}"
)
for tag in self._datasets:
self._datasets[tag] += new_data[tag]
with Timer() as model_fitting_timer:
for tag, model in self._models.items():
dataset = self._datasets[tag]
model.update(dataset)
model.optimize(dataset)
summary_writer = logging.get_tensorboard_writer()
if summary_writer:
with summary_writer.as_default(step=logging.get_step_number()):
write_summary_observations(
self._datasets,
self._models,
new_data,
model_fitting_timer,
self._observation_plot_dfs,
)
| 17,459 | 37.886414 | 100 | py |
trieste-develop | trieste-develop/trieste/logging.py | """ This module contains logging utilities. """
from __future__ import annotations
import io
from contextlib import contextmanager
from typing import TYPE_CHECKING, Any, Callable, Iterator, Optional, TypeVar, Union
import absl
import tensorflow as tf
from tensorflow.python.eager import context
from trieste.types import TensorType
if TYPE_CHECKING:
import matplotlib
SummaryFilter = Callable[[str], bool]
def default_summary_filter(name: str) -> bool:
"""Default summary filter: omits any names that start with _."""
return not (name.startswith("_") or "/_" in name)
_TENSORBOARD_WRITER: Optional[tf.summary.SummaryWriter] = None
_STEP_NUMBER: int = 0
_SUMMARY_FILTER: SummaryFilter = default_summary_filter
def set_tensorboard_writer(summary_writer: Optional[tf.summary.SummaryWriter]) -> None:
"""
Set a :class:`~tf.summary.SummaryWriter` instance to use for logging
to TensorBoard, or `None` to disable.
:param summary_writer: optional summary writer instance.
"""
global _TENSORBOARD_WRITER
_TENSORBOARD_WRITER = summary_writer
def get_tensorboard_writer() -> Optional[tf.summary.SummaryWriter]:
"""
Returns a :class:`~tf.summary.SummaryWriter` instance to use for logging
to TensorBoard, or `None`.
:return: optional summary writer instance.
"""
return _TENSORBOARD_WRITER
@contextmanager
def tensorboard_writer(summary_writer: Optional[tf.summary.SummaryWriter]) -> Iterator[None]:
"""
A context manager for setting or overriding a TensorBoard summary writer inside a code block.
:param summary_writer: optional summary writer instance.
"""
old_writer = get_tensorboard_writer()
set_tensorboard_writer(summary_writer)
yield
set_tensorboard_writer(old_writer)
def set_step_number(step_number: int) -> None:
"""
Set an optimization step number to use for logging purposes.
:param step_number: current step number
:raise ValueError: if step_number < 0
"""
global _STEP_NUMBER
_STEP_NUMBER = step_number
def get_step_number() -> int:
"""
Get the optimization step number used for logging purposes.
:return: current step number.
"""
return _STEP_NUMBER
@contextmanager
def step_number(step_number: int) -> Iterator[None]:
"""
A context manager for setting or overriding the optimization step number inside a code block.
:param step_number: current step number
"""
old_step_number = get_step_number()
set_step_number(step_number)
yield
set_step_number(old_step_number)
def set_summary_filter(summary_filter: SummaryFilter) -> None:
"""
Set a filter on summary names. The default is to only omit names that start with _.
:param summary_filter: new summary filter
"""
global _SUMMARY_FILTER
_SUMMARY_FILTER = summary_filter
def get_summary_filter() -> SummaryFilter:
"""
Get the current filter on summary names. The default is to only omit names that start with _.
:return: current summary filter.
"""
return _SUMMARY_FILTER
def get_current_name_scope() -> str:
"""Returns the full name scope. Copied from TF 2.5."""
ctx = context.context()
if ctx.executing_eagerly():
return ctx.scope_name.rstrip("/")
else:
return tf.compat.v1.get_default_graph().get_name_scope()
def include_summary(name: str) -> bool:
"""
Whether a summary name should be included.
:param: full summary name (including name spaces)
:return: whether the summary should be included.
"""
full_name = get_current_name_scope() + "/" + name
return _SUMMARY_FILTER(full_name)
T = TypeVar("T")
def evaluate_data(data: T | Callable[[], T]) -> T:
"""Return the passed in data, evaluating it if it's inside a closure."""
return data() if callable(data) else data
def histogram(name: str, data: TensorType | Callable[[], TensorType], **kwargs: Any) -> bool:
"""
Wrapper for tf.summary.histogram that first filters out unwanted summaries by name.
Accepts either data or closures that only get evaluated when logged.
"""
if include_summary(name):
try:
return tf.summary.histogram(name, evaluate_data(data), **kwargs)
except Exception as e:
tf.print(
f"Failed to write tensorboard histogram summary '{name}':\n\n{e}",
output_stream=absl.logging.INFO,
)
return False
def scalar(name: str, data: float | Callable[[], float], **kwargs: Any) -> bool:
"""
Wrapper for tf.summary.scalar that first filters out unwanted summaries by name.
Accepts either data or closures that only get evaluated when logged.
"""
if include_summary(name):
try:
return tf.summary.scalar(name, evaluate_data(data), **kwargs)
except Exception as e:
tf.print(
f"Failed to write tensorboard scalar summary '{name}':\n\n{e}",
output_stream=absl.logging.INFO,
)
return False
def text(name: str, data: str | Callable[[], str], **kwargs: Any) -> bool:
"""
Wrapper for tf.summary.text that first filters out unwanted summaries by name.
Accepts either data or closures that only get evaluated when logged.
"""
if include_summary(name):
try:
return tf.summary.text(name, evaluate_data(data), **kwargs)
except Exception as e:
tf.print(
f"Failed to write tensorboard text summary '{name}':\n\n{e}",
output_stream=absl.logging.INFO,
)
return False
def pyplot(
name: str, figure: Union["matplotlib.figure.Figure", Callable[[], "matplotlib.figure.Figure"]]
) -> bool:
"""
Utility function for passing a matplotlib figure to tf.summary.image.
Accepts either data or closures that only get evaluated when logged.
"""
if include_summary(name):
try:
figure = evaluate_data(figure)
with io.BytesIO() as buffer:
figure.savefig(buffer, dpi=150.0, format="png")
buffer.seek(0)
image = tf.image.decode_png(buffer.getvalue(), channels=4)
image = tf.expand_dims(image, 0)
return tf.summary.image(name, image)
except Exception as e:
tf.print(
f"Failed to write tensorboard image summary '{name}':\n\n{e}",
output_stream=absl.logging.INFO,
)
return False
| 7,102 | 30.153509 | 98 | py |
trieste-develop | trieste-develop/trieste/space.py | """ This module contains implementations of various types of search space. """
from __future__ import annotations
import operator
from abc import ABC, abstractmethod
from functools import reduce
from typing import Callable, Optional, Sequence, Tuple, TypeVar, Union, overload
import numpy as np
import scipy.optimize as spo
import tensorflow as tf
import tensorflow_probability as tfp
from .types import TensorType
SearchSpaceType = TypeVar("SearchSpaceType", bound="SearchSpace")
""" A type variable bound to :class:`SearchSpace`. """
DEFAULT_DTYPE: tf.DType = tf.float64
""" Default dtype to use when none is provided. """
class SampleTimeoutError(Exception):
"""Raised when sampling from a search space has timed out."""
class NonlinearConstraint(spo.NonlinearConstraint): # type: ignore[misc]
"""
A wrapper class for nonlinear constraints on variables. The constraints expression is of the
form::
lb <= fun(x) <= ub
:param fun: The function defining the nonlinear constraints; with input shape [..., D] and
output shape [..., 1], returning a scalar value for each input point.
:param lb: The lower bound of the constraint. Should be a scalar or of shape [1].
:param ub: The upper bound of the constraint. Should be a scalar or of shape [1].
:param keep_feasible: Keep the constraints feasible throughout optimization iterations if this
is `True`.
"""
def __init__(
self,
fun: Callable[[TensorType], TensorType],
lb: Sequence[float] | TensorType,
ub: Sequence[float] | TensorType,
keep_feasible: bool = False,
):
# Implement caching to avoid calling the constraint function multiple times to get value
# and gradient.
def _constraint_value_and_gradient(x: TensorType) -> Tuple[TensorType, TensorType]:
val, grad = tfp.math.value_and_gradient(fun, x)
tf.debugging.assert_shapes(
[(val, [..., 1])],
message="Nonlinear constraint only supports single output function.",
)
return tf.cast(val, dtype=x.dtype), tf.cast(grad, dtype=x.dtype)
cache_x: TensorType = tf.constant([])
cache_f: TensorType = tf.constant([])
cache_df_dx: TensorType = tf.constant([])
def val_fun(x: TensorType) -> TensorType:
nonlocal cache_x, cache_f, cache_df_dx
if not np.array_equal(x, cache_x):
cache_f, cache_df_dx = _constraint_value_and_gradient(x)
cache_x = x
return cache_f
def jac_fun(x: TensorType) -> TensorType:
nonlocal cache_x, cache_f, cache_df_dx
if not np.array_equal(x, cache_x):
cache_f, cache_df_dx = _constraint_value_and_gradient(x)
cache_x = x
return cache_df_dx
self._orig_fun = fun # Used for constraints equality check.
super().__init__(val_fun, lb, ub, jac=jac_fun, keep_feasible=keep_feasible)
def residual(self, points: TensorType) -> TensorType:
"""
Calculate the residuals between the constraint function and its lower/upper limits.
:param points: The points to calculate the residuals for, with shape [..., D].
:return: A tensor containing the lower and upper residual values with shape [..., 2].
"""
tf.debugging.assert_rank_at_least(points, 2)
non_d_axes = np.ones_like(points.shape)[:-1] # Avoid adding axes shape to static graph.
lb = tf.cast(tf.reshape(self.lb, (*non_d_axes, -1)), dtype=points.dtype)
ub = tf.cast(tf.reshape(self.ub, (*non_d_axes, -1)), dtype=points.dtype)
fval = self.fun(points)
fval = tf.reshape(fval, (*points.shape[:-1], -1)) # Atleast 2D.
fval = tf.cast(fval, dtype=points.dtype)
values = [fval - lb, ub - fval]
values = tf.concat(values, axis=-1)
return values
def __repr__(self) -> str:
return f"""
NonlinearConstraint({self.fun!r}, {self.lb!r}, {self.ub!r}, {self.keep_feasible!r})"
"""
def __eq__(self, other: object) -> bool:
"""
:param other: A constraint.
:return: Whether the constraint is identical to this one.
"""
if not isinstance(other, NonlinearConstraint):
return False
return bool(
self._orig_fun == other._orig_fun
and tf.reduce_all(self.lb == other.lb)
and tf.reduce_all(self.ub == other.ub)
and self.keep_feasible == other.keep_feasible
)
class LinearConstraint(spo.LinearConstraint): # type: ignore[misc]
"""
A wrapper class for linear constraints on variables. The constraints expression is of the form::
lb <= A @ x <= ub
:param A: The matrix defining the linear constraints with shape [M, D], where M is the
number of constraints.
:param lb: The lower bound of the constraint. Should be a scalar or of shape [M].
:param ub: The upper bound of the constraint. Should be a scalar or of shape [M].
:param keep_feasible: Keep the constraints feasible throughout optimization iterations if this
is `True`.
"""
def __init__(
self,
A: TensorType,
lb: Sequence[float] | TensorType,
ub: Sequence[float] | TensorType,
keep_feasible: bool = False,
):
super().__init__(A, lb, ub, keep_feasible=keep_feasible)
def residual(self, points: TensorType) -> TensorType:
"""
Calculate the residuals between the constraint function and its lower/upper limits.
:param points: The points to calculate the residuals for, with shape [..., D].
:return: A tensor containing the lower and upper residual values with shape [..., M*2].
"""
tf.debugging.assert_rank_at_least(points, 2)
non_d_axes = np.ones_like(points.shape)[:-1] # Avoid adding axes shape to static graph.
lb = tf.cast(tf.reshape(self.lb, (*non_d_axes, -1)), dtype=points.dtype)
ub = tf.cast(tf.reshape(self.ub, (*non_d_axes, -1)), dtype=points.dtype)
A = tf.cast(self.A, dtype=points.dtype)
fval = tf.linalg.matmul(points, A, transpose_b=True)
fval = tf.reshape(fval, (*points.shape[:-1], -1)) # Atleast 2D.
values = [fval - lb, ub - fval]
values = tf.concat(values, axis=-1)
return values
def __repr__(self) -> str:
return f"""
LinearConstraint({self.A!r}, {self.lb!r}, {self.ub!r}, {self.keep_feasible!r})"
"""
def __eq__(self, other: object) -> bool:
"""
:param other: A constraint.
:return: Whether the constraint is identical to this one.
"""
if not isinstance(other, LinearConstraint):
return False
return bool(
tf.reduce_all(self.A == other.A)
and tf.reduce_all(self.lb == other.lb)
and tf.reduce_all(self.ub == other.ub)
and tf.reduce_all(self.keep_feasible == other.keep_feasible)
)
Constraint = Union[LinearConstraint, NonlinearConstraint]
""" Type alias for constraints. """
class SearchSpace(ABC):
"""
A :class:`SearchSpace` represents the domain over which an objective function is optimized.
"""
@abstractmethod
def sample(self, num_samples: int, seed: Optional[int] = None) -> TensorType:
"""
:param num_samples: The number of points to sample from this search space.
:param seed: Random seed for reproducibility.
:return: ``num_samples`` i.i.d. random points, sampled uniformly from this search space.
"""
def contains(self, value: TensorType) -> TensorType:
"""Method for checking membership.
:param value: A point or points to check for membership of this :class:`SearchSpace`.
:return: A boolean array showing membership for each point in value.
:raise ValueError (or tf.errors.InvalidArgumentError): If ``value`` has a different
dimensionality points from this :class:`SearchSpace`.
"""
tf.debugging.assert_equal(
tf.rank(value) > 0 and tf.shape(value)[-1] == self.dimension,
True,
message=f"""
Dimensionality mismatch: space is {self.dimension}, value is {tf.shape(value)[-1]}
""",
)
return self._contains(value)
@abstractmethod
def _contains(self, value: TensorType) -> TensorType:
"""Space-specific implementation of membership. Can assume valid input shape.
:param value: A point or points to check for membership of this :class:`SearchSpace`.
:return: A boolean array showing membership for each point in value.
"""
def __contains__(self, value: TensorType) -> bool:
"""Method called by `in` operator. Doesn't support broadcasting as Python insists
on converting the result to a boolean.
:param value: A single point to check for membership of this :class:`SearchSpace`.
:return: `True` if ``value`` is a member of this search space, else `False`.
:raise ValueError (or tf.errors.InvalidArgumentError): If ``value`` has a different
dimensionality from this :class:`SearchSpace`.
"""
tf.debugging.assert_equal(
tf.rank(value) == 1,
True,
message=f"""
Rank mismatch: expected 1, got {tf.rank(value)}. To get a tensor of boolean
membership values from a tensor of points, use `space.contains(value)`
rather than `value in space`.
""",
)
return self.contains(value)
@property
@abstractmethod
def dimension(self) -> TensorType:
"""The number of inputs in this search space."""
@property
@abstractmethod
def lower(self) -> TensorType:
"""The lowest value taken by each search space dimension."""
@property
@abstractmethod
def upper(self) -> TensorType:
"""The highest value taken by each search space dimension."""
@abstractmethod
def product(self: SearchSpaceType, other: SearchSpaceType) -> SearchSpaceType:
"""
:param other: A search space of the same type as this search space.
:return: The Cartesian product of this search space with the ``other``.
"""
@overload
def __mul__(self: SearchSpaceType, other: SearchSpaceType) -> SearchSpaceType:
...
@overload
def __mul__(self: SearchSpaceType, other: SearchSpace) -> SearchSpace: # type: ignore[misc]
# mypy complains that this is superfluous, but it seems to use it fine to infer
# that Box * Box = Box, while Box * Discrete = SearchSpace.
...
def __mul__(self, other: SearchSpace) -> SearchSpace:
"""
:param other: A search space.
:return: The Cartesian product of this search space with the ``other``.
If both spaces are of the same type then this calls the :meth:`product` method.
Otherwise, it generates a :class:`TaggedProductSearchSpace`.
"""
# If the search space has any constraints, always return a tagged product search space.
if not self.has_constraints and not other.has_constraints and isinstance(other, type(self)):
return self.product(other)
return TaggedProductSearchSpace((self, other))
def __pow__(self: SearchSpaceType, other: int) -> SearchSpaceType:
"""
Return the Cartesian product of ``other`` instances of this search space. For example, for
an exponent of `3`, and search space `s`, this is `s ** 3`, which is equivalent to
`s * s * s`.
:param other: The exponent, or number of instances of this search space to multiply
together. Must be strictly positive.
:return: The Cartesian product of ``other`` instances of this search space.
:raise tf.errors.InvalidArgumentError: If the exponent ``other`` is less than 1.
"""
tf.debugging.assert_positive(other, message="Exponent must be strictly positive")
return reduce(operator.mul, [self] * other)
def discretize(self, num_samples: int) -> DiscreteSearchSpace:
"""
:param num_samples: The number of points in the :class:`DiscreteSearchSpace`.
:return: A discrete search space consisting of ``num_samples`` points sampled uniformly from
this search space.
:raise NotImplementedError: If this :class:`SearchSpace` has constraints.
"""
if self.has_constraints: # Constraints are not supported.
raise NotImplementedError(
"Discretization is currently not supported in the presence of constraints."
)
return DiscreteSearchSpace(points=self.sample(num_samples))
@abstractmethod
def __eq__(self, other: object) -> bool:
"""
:param other: A search space.
:return: Whether the search space is identical to this one.
"""
@property
def constraints(self) -> Sequence[Constraint]:
"""The sequence of explicit constraints specified in this search space."""
return []
def constraints_residuals(self, points: TensorType) -> TensorType:
"""
Return residuals for all the constraints in this :class:`SearchSpace`.
:param points: The points to get the residuals for, with shape [..., D].
:return: A tensor of all the residuals with shape [..., C], where C is the total number of
constraints.
:raise NotImplementedError: If this :class:`SearchSpace` does not support constraints.
"""
raise NotImplementedError("Constraints are currently not supported for this search space.")
def is_feasible(self, points: TensorType) -> TensorType:
"""
Checks if points satisfy the explicit constraints of this :class:`SearchSpace`.
Note membership of the search space is not checked.
:param points: The points to check constraints feasibility for, with shape [..., D].
:return: A tensor of booleans. Returns `True` for each point if it is feasible in this
search space, else `False`.
:raise NotImplementedError: If this :class:`SearchSpace` has constraints.
"""
# Everything is feasible in the absence of constraints. Must be overriden if there are
# constraints.
if self.has_constraints:
raise NotImplementedError("Feasibility check is not implemented for this search space.")
return tf.cast(tf.ones(points.shape[:-1]), dtype=bool)
@property
def has_constraints(self) -> bool:
"""Returns `True` if this search space has any explicit constraints specified."""
# By default assume there are no constraints; can be overridden by a subclass.
return False
class DiscreteSearchSpace(SearchSpace):
r"""
A discrete :class:`SearchSpace` representing a finite set of :math:`D`-dimensional points in
:math:`\mathbb{R}^D`.
For example:
>>> points = tf.constant([[-1.0, 0.4], [-1.0, 0.6], [0.0, 0.4]])
>>> search_space = DiscreteSearchSpace(points)
>>> assert tf.constant([0.0, 0.4]) in search_space
>>> assert tf.constant([1.0, 0.5]) not in search_space
"""
def __init__(self, points: TensorType):
"""
:param points: The points that define the discrete space, with shape ('N', 'D').
:raise ValueError (or tf.errors.InvalidArgumentError): If ``points`` has an invalid shape.
"""
tf.debugging.assert_shapes([(points, ("N", "D"))])
self._points = points
self._dimension = tf.shape(self._points)[-1]
def __repr__(self) -> str:
return f"DiscreteSearchSpace({self._points!r})"
@property
def lower(self) -> TensorType:
"""The lowest value taken across all points by each search space dimension."""
return tf.reduce_min(self.points, -2)
@property
def upper(self) -> TensorType:
"""The highest value taken across all points by each search space dimension."""
return tf.reduce_max(self.points, -2)
@property
def points(self) -> TensorType:
"""All the points in this space."""
return self._points
@property
def dimension(self) -> TensorType:
"""The number of inputs in this search space."""
return self._dimension
def _contains(self, value: TensorType) -> TensorType:
comparison = tf.math.equal(self._points, tf.expand_dims(value, -2)) # [..., N, D]
return tf.reduce_any(tf.reduce_all(comparison, axis=-1), axis=-1) # [...]
def sample(self, num_samples: int, seed: Optional[int] = None) -> TensorType:
"""
:param num_samples: The number of points to sample from this search space.
:param seed: Random seed for reproducibility.
:return: ``num_samples`` i.i.d. random points, sampled uniformly,
from this search space.
"""
if seed is not None: # ensure reproducibility
tf.random.set_seed(seed)
if num_samples == 0:
return self.points[:0, :]
else:
sampled_indices = tf.random.categorical(
tf.ones((1, tf.shape(self.points)[0])), num_samples, seed=seed
)
return tf.gather(self.points, sampled_indices)[0, :, :] # [num_samples, D]
def product(self, other: DiscreteSearchSpace) -> DiscreteSearchSpace:
r"""
Return the Cartesian product of the two :class:`DiscreteSearchSpace`\ s. For example:
>>> sa = DiscreteSearchSpace(tf.constant([[0, 1], [2, 3]]))
>>> sb = DiscreteSearchSpace(tf.constant([[4, 5, 6], [7, 8, 9]]))
>>> (sa * sb).points.numpy()
array([[0, 1, 4, 5, 6],
[0, 1, 7, 8, 9],
[2, 3, 4, 5, 6],
[2, 3, 7, 8, 9]], dtype=int32)
:param other: A :class:`DiscreteSearchSpace` with :attr:`points` of the same dtype as this
search space.
:return: The Cartesian product of the two :class:`DiscreteSearchSpace`\ s.
:raise TypeError: If one :class:`DiscreteSearchSpace` has :attr:`points` of a different
dtype to the other.
"""
if self.points.dtype is not other.points.dtype:
return NotImplemented
tile_self = tf.tile(self.points[:, None], [1, len(other.points), 1])
tile_other = tf.tile(other.points[None], [len(self.points), 1, 1])
cartesian_product = tf.concat([tile_self, tile_other], axis=2)
product_space_dimension = self.points.shape[-1] + other.points.shape[-1]
return DiscreteSearchSpace(tf.reshape(cartesian_product, [-1, product_space_dimension]))
def __eq__(self, other: object) -> bool:
"""
:param other: A search space.
:return: Whether the search space is identical to this one.
"""
if not isinstance(other, DiscreteSearchSpace):
return NotImplemented
return bool(tf.reduce_all(tf.sort(self.points, 0) == tf.sort(other.points, 0)))
def __deepcopy__(self, memo: dict[int, object]) -> DiscreteSearchSpace:
return self
class Box(SearchSpace):
r"""
Continuous :class:`SearchSpace` representing a :math:`D`-dimensional box in
:math:`\mathbb{R}^D`. Mathematically it is equivalent to the Cartesian product of :math:`D`
closed bounded intervals in :math:`\mathbb{R}`.
"""
@overload
def __init__(
self,
lower: Sequence[float],
upper: Sequence[float],
constraints: Optional[Sequence[Constraint]] = None,
ctol: float | TensorType = 1e-7,
):
...
@overload
def __init__(
self,
lower: TensorType,
upper: TensorType,
constraints: Optional[Sequence[Constraint]] = None,
ctol: float | TensorType = 1e-7,
):
...
def __init__(
self,
lower: Sequence[float] | TensorType,
upper: Sequence[float] | TensorType,
constraints: Optional[Sequence[Constraint]] = None,
ctol: float | TensorType = 1e-7,
):
r"""
If ``lower`` and ``upper`` are `Sequence`\ s of floats (such as lists or tuples),
they will be converted to tensors of dtype `DEFAULT_DTYPE`.
:param lower: The lower (inclusive) bounds of the box. Must have shape [D] for positive D,
and if a tensor, must have float type.
:param upper: The upper (inclusive) bounds of the box. Must have shape [D] for positive D,
and if a tensor, must have float type.
:param constraints: Sequence of explicit input constraints for this search space.
:param ctol: Tolerance to use to check constraints satisfaction.
:raise ValueError (or tf.errors.InvalidArgumentError): If any of the following are true:
- ``lower`` and ``upper`` have invalid shapes.
- ``lower`` and ``upper`` do not have the same floating point type.
- ``upper`` is not greater than ``lower`` across all dimensions.
"""
tf.debugging.assert_shapes([(lower, ["D"]), (upper, ["D"])])
tf.assert_rank(lower, 1)
tf.assert_rank(upper, 1)
tf.debugging.assert_non_negative(ctol, message="Tolerance must be non-negative")
if isinstance(lower, Sequence):
self._lower = tf.constant(lower, dtype=DEFAULT_DTYPE)
self._upper = tf.constant(upper, dtype=DEFAULT_DTYPE)
else:
self._lower = tf.convert_to_tensor(lower)
self._upper = tf.convert_to_tensor(upper)
tf.debugging.assert_same_float_dtype([self._lower, self._upper])
tf.debugging.assert_less(self._lower, self._upper)
self._dimension = tf.shape(self._upper)[-1]
if constraints is None:
self._constraints: Sequence[Constraint] = []
else:
self._constraints = constraints
self._ctol = ctol
def __repr__(self) -> str:
return f"Box({self._lower!r}, {self._upper!r}, {self._constraints!r}, {self._ctol!r})"
@property
def lower(self) -> tf.Tensor:
"""The lower bounds of the box."""
return self._lower
@property
def upper(self) -> tf.Tensor:
"""The upper bounds of the box."""
return self._upper
@property
def dimension(self) -> TensorType:
"""The number of inputs in this search space."""
return self._dimension
@property
def constraints(self) -> Sequence[Constraint]:
"""The sequence of explicit constraints specified in this search space."""
return self._constraints
def _contains(self, value: TensorType) -> TensorType:
"""
For each point in ``value``, return `True` if the point is a member of this search space,
else `False`. A point is a member if all of its coordinates lie in the closed intervals
bounded by the lower and upper bounds.
:param value: A point or points to check for membership of this :class:`SearchSpace`.
:return: A boolean array showing membership for each point in value.
"""
return tf.reduce_all(value >= self._lower, axis=-1) & tf.reduce_all(
value <= self._upper, axis=-1
)
def _sample(self, num_samples: int, seed: Optional[int] = None) -> TensorType:
# Internal common method to sample randomly from the space.
dim = tf.shape(self._lower)[-1]
return tf.random.uniform(
(num_samples, dim),
minval=self._lower,
maxval=self._upper,
dtype=self._lower.dtype,
seed=seed,
)
def sample(self, num_samples: int, seed: Optional[int] = None) -> TensorType:
"""
Sample randomly from the space.
:param num_samples: The number of points to sample from this search space.
:param seed: Random seed for reproducibility.
:return: ``num_samples`` i.i.d. random points, sampled uniformly,
from this search space with shape '[num_samples, D]' , where D is the search space
dimension.
"""
tf.debugging.assert_non_negative(num_samples)
if seed is not None: # ensure reproducibility
tf.random.set_seed(seed)
return self._sample(num_samples, seed)
def _sample_halton(
self,
start: int,
num_samples: int,
seed: Optional[int] = None,
) -> TensorType:
# Internal common method to sample from the space using a Halton sequence.
tf.debugging.assert_non_negative(num_samples)
if num_samples == 0:
return tf.constant([])
if seed is not None: # ensure reproducibility
tf.random.set_seed(seed)
dim = tf.shape(self._lower)[-1]
sequence_indices = tf.range(start=start, limit=start + num_samples, dtype=tf.int32)
return (self._upper - self._lower) * tfp.mcmc.sample_halton_sequence(
dim=dim, sequence_indices=sequence_indices, dtype=self._lower.dtype, seed=seed
) + self._lower
def sample_halton(self, num_samples: int, seed: Optional[int] = None) -> TensorType:
"""
Sample from the space using a Halton sequence. The resulting samples are guaranteed to be
diverse and are reproducible by using the same choice of ``seed``.
:param num_samples: The number of points to sample from this search space.
:param seed: Random seed for the halton sequence
:return: ``num_samples`` of points, using halton sequence with shape '[num_samples, D]' ,
where D is the search space dimension.
"""
return self._sample_halton(0, num_samples, seed)
def sample_sobol(self, num_samples: int, skip: Optional[int] = None) -> TensorType:
"""
Sample a diverse set from the space using a Sobol sequence.
If ``skip`` is specified, then the resulting samples are reproducible.
:param num_samples: The number of points to sample from this search space.
:param skip: The number of initial points of the Sobol sequence to skip
:return: ``num_samples`` of points, using sobol sequence with shape '[num_samples, D]' ,
where D is the search space dimension.
"""
tf.debugging.assert_non_negative(num_samples)
if num_samples == 0:
return tf.constant([])
if skip is None: # generate random skip
skip = tf.random.uniform([1], maxval=2**16, dtype=tf.int32)[0]
dim = tf.shape(self._lower)[-1]
return (self._upper - self._lower) * tf.math.sobol_sample(
dim=dim, num_results=num_samples, dtype=self._lower.dtype, skip=skip
) + self._lower
def _sample_feasible_loop(
self,
num_samples: int,
sampler: Callable[[], TensorType],
max_tries: int = 100,
) -> TensorType:
"""
Rejection sampling using provided callable. Try ``max_tries`` number of times to find
``num_samples`` feasible points.
:param num_samples: The number of feasible points to sample from this search space.
:param sampler: Callable to return samples. Called potentially multiple times.
:param max_tries: Maximum attempts to sample the requested number of points.
:return: ``num_samples`` feasible points sampled using ``sampler``.
:raise SampleTimeoutError: If ``max_tries`` are exhausted before ``num_samples`` are
sampled.
"""
xs = []
count = 0
tries = 0
while count < num_samples and tries < max_tries:
tries += 1
xi = sampler()
mask = self.is_feasible(xi)
xo = tf.boolean_mask(xi, mask)
xs.append(xo)
count += xo.shape[0]
if count < num_samples:
raise SampleTimeoutError(
f"""Failed to sample {num_samples} feasible point(s), even after {tries} attempts.
Sampled only {count} feasible point(s)."""
)
xs = tf.concat(xs, axis=0)[:num_samples]
return xs
def sample_feasible(
self, num_samples: int, seed: Optional[int] = None, max_tries: int = 100
) -> TensorType:
"""
Sample feasible points randomly from the space.
:param num_samples: The number of feasible points to sample from this search space.
:param seed: Random seed for reproducibility.
:param max_tries: Maximum attempts to sample the requested number of points.
:return: ``num_samples`` i.i.d. random points, sampled uniformly,
from this search space with shape '[num_samples, D]' , where D is the search space
dimension.
:raise SampleTimeoutError: If ``max_tries`` are exhausted before ``num_samples`` are
sampled.
"""
tf.debugging.assert_non_negative(num_samples)
# Without constraints or zero-num-samples use the normal sample method directly.
if not self.has_constraints or num_samples == 0:
return self.sample(num_samples, seed)
if seed is not None: # ensure reproducibility
tf.random.set_seed(seed)
def _sampler() -> TensorType:
return self._sample(num_samples, seed)
return self._sample_feasible_loop(num_samples, _sampler, max_tries)
def sample_halton_feasible(
self, num_samples: int, seed: Optional[int] = None, max_tries: int = 100
) -> TensorType:
"""
Sample feasible points from the space using a Halton sequence. The resulting samples are
guaranteed to be diverse and are reproducible by using the same choice of ``seed``.
:param num_samples: The number of feasible points to sample from this search space.
:param seed: Random seed for the halton sequence
:param max_tries: Maximum attempts to sample the requested number of points.
:return: ``num_samples`` of points, using halton sequence with shape '[num_samples, D]' ,
where D is the search space dimension.
:raise SampleTimeoutError: If ``max_tries`` are exhausted before ``num_samples`` are
sampled.
"""
tf.debugging.assert_non_negative(num_samples)
# Without constraints or zero-num-samples use the normal sample method directly.
if not self.has_constraints or num_samples == 0:
return self.sample_halton(num_samples, seed)
start = 0
def _sampler() -> TensorType:
nonlocal start
# Global seed is set on every call in _sample_halton() so that we always sample from
# the same (randomised) sequence, and skip the relevant number of beginning samples.
samples = self._sample_halton(start, num_samples, seed)
start += num_samples
return samples
return self._sample_feasible_loop(num_samples, _sampler, max_tries)
def sample_sobol_feasible(
self, num_samples: int, skip: Optional[int] = None, max_tries: int = 100
) -> TensorType:
"""
Sample a diverse set of feasible points from the space using a Sobol sequence.
If ``skip`` is specified, then the resulting samples are reproducible.
:param num_samples: The number of feasible points to sample from this search space.
:param skip: The number of initial points of the Sobol sequence to skip
:param max_tries: Maximum attempts to sample the requested number of points.
:return: ``num_samples`` of points, using sobol sequence with shape '[num_samples, D]' ,
where D is the search space dimension.
:raise SampleTimeoutError: If ``max_tries`` are exhausted before ``num_samples`` are
sampled.
"""
tf.debugging.assert_non_negative(num_samples)
# Without constraints or zero-num-samples use the normal sample method directly.
if not self.has_constraints or num_samples == 0:
return self.sample_sobol(num_samples, skip)
if skip is None: # generate random skip
skip = tf.random.uniform([1], maxval=2**16, dtype=tf.int32)[0]
_skip: TensorType = skip # To keep mypy happy.
def _sampler() -> TensorType:
nonlocal _skip
samples = self.sample_sobol(num_samples, skip=_skip)
# Skip the relevant number of beginning samples from previous iterations.
_skip += num_samples
return samples
return self._sample_feasible_loop(num_samples, _sampler, max_tries)
def product(self, other: Box) -> Box:
r"""
Return the Cartesian product of the two :class:`Box`\ es (concatenating their respective
lower and upper bounds). For example:
>>> unit_interval = Box([0.0], [1.0])
>>> square_at_origin = Box([-2.0, -2.0], [2.0, 2.0])
>>> new_box = unit_interval * square_at_origin
>>> new_box.lower.numpy()
array([ 0., -2., -2.])
>>> new_box.upper.numpy()
array([1., 2., 2.])
:param other: A :class:`Box` with bounds of the same type as this :class:`Box`.
:return: The Cartesian product of the two :class:`Box`\ es.
:raise TypeError: If the bounds of one :class:`Box` have different dtypes to those of
the other :class:`Box`.
"""
if self.lower.dtype is not other.lower.dtype:
return NotImplemented
product_lower_bound = tf.concat([self._lower, other.lower], axis=-1)
product_upper_bound = tf.concat([self._upper, other.upper], axis=-1)
return Box(product_lower_bound, product_upper_bound)
def __eq__(self, other: object) -> bool:
"""
:param other: A search space.
:return: Whether the search space is identical to this one.
"""
if not isinstance(other, Box):
return NotImplemented
return bool(
tf.reduce_all(self.lower == other.lower)
and tf.reduce_all(self.upper == other.upper)
# Constraints match only if they are exactly the same (in the same order).
and self._constraints == other._constraints
)
def __deepcopy__(self, memo: dict[int, object]) -> Box:
return self
def constraints_residuals(self, points: TensorType) -> TensorType:
"""
Return residuals for all the constraints in this :class:`SearchSpace`.
:param points: The points to get the residuals for, with shape [..., D].
:return: A tensor of all the residuals with shape [..., C], where C is the total number of
constraints.
"""
residuals = [constraint.residual(points) for constraint in self._constraints]
residuals = tf.concat(residuals, axis=-1)
return residuals
def is_feasible(self, points: TensorType) -> TensorType:
"""
Checks if points satisfy the explicit constraints of this :class:`SearchSpace`.
Note membership of the search space is not checked.
:param points: The points to check constraints feasibility for, with shape [..., D].
:return: A tensor of booleans. Returns `True` for each point if it is feasible in this
search space, else `False`.
"""
return tf.math.reduce_all(self.constraints_residuals(points) >= -self._ctol, axis=-1)
@property
def has_constraints(self) -> bool:
"""Returns `True` if this search space has any explicit constraints specified."""
return len(self._constraints) > 0
class TaggedProductSearchSpace(SearchSpace):
r"""
Product :class:`SearchSpace` consisting of a product of
multiple :class:`SearchSpace`. This class provides functionality for
accessing either the resulting combined search space or each individual space.
Note that this class assumes that individual points in product spaces are
represented with their inputs in the same order as specified when initializing
the space.
"""
def __init__(self, spaces: Sequence[SearchSpace], tags: Optional[Sequence[str]] = None):
r"""
Build a :class:`TaggedProductSearchSpace` from a list ``spaces`` of other spaces. If
``tags`` are provided then they form the identifiers of the subspaces, otherwise the
subspaces are labelled numerically.
:param spaces: A sequence of :class:`SearchSpace` objects representing the space's subspaces
:param tags: An optional list of tags giving the unique identifiers of
the space's subspaces.
:raise ValueError (or tf.errors.InvalidArgumentError): If ``spaces`` has a different
length to ``tags`` when ``tags`` is provided or if ``tags`` contains duplicates.
"""
number_of_subspaces = len(spaces)
if tags is None:
tags = [str(index) for index in range(number_of_subspaces)]
else:
number_of_tags = len(tags)
tf.debugging.assert_equal(
number_of_tags,
number_of_subspaces,
message=f"""
Number of tags must match number of subspaces but
received {number_of_tags} tags and {number_of_subspaces} subspaces.
""",
)
number_of_unique_tags = len(set(tags))
tf.debugging.assert_equal(
number_of_tags,
number_of_unique_tags,
message=f"Subspace names must be unique but received {tags}.",
)
self._spaces = dict(zip(tags, spaces))
subspace_sizes = [space.dimension for space in spaces]
self._subspace_sizes_by_tag = {
tag: subspace_size for tag, subspace_size in zip(tags, subspace_sizes)
}
self._subspace_starting_indices = dict(zip(tags, tf.cumsum(subspace_sizes, exclusive=True)))
self._dimension = tf.cast(tf.reduce_sum(subspace_sizes), dtype=tf.int32)
self._tags = tuple(tags) # avoid accidental modification by users
def __repr__(self) -> str:
return f"""TaggedProductSearchSpace(spaces =
{[self.get_subspace(tag) for tag in self.subspace_tags]},
tags = {self.subspace_tags})
"""
@property
def lower(self) -> TensorType:
"""The lowest values taken by each space dimension, concatenated across subspaces."""
lower_for_each_subspace = [self.get_subspace(tag).lower for tag in self.subspace_tags]
return (
tf.concat(lower_for_each_subspace, axis=-1)
if lower_for_each_subspace
else tf.constant([], dtype=DEFAULT_DTYPE)
)
@property
def upper(self) -> TensorType:
"""The highest values taken by each space dimension, concatenated across subspaces."""
upper_for_each_subspace = [self.get_subspace(tag).upper for tag in self.subspace_tags]
return (
tf.concat(upper_for_each_subspace, axis=-1)
if upper_for_each_subspace
else tf.constant([], dtype=DEFAULT_DTYPE)
)
@property
def subspace_tags(self) -> tuple[str, ...]:
"""Return the names of the subspaces contained in this product space."""
return self._tags
@property
def dimension(self) -> TensorType:
"""The number of inputs in this product search space."""
return self._dimension
def get_subspace(self, tag: str) -> SearchSpace:
"""
Return the domain of a particular subspace.
:param tag: The tag specifying the target subspace.
:return: Target subspace.
"""
tf.debugging.assert_equal(
tag in self.subspace_tags,
True,
message=f"""
Attempted to access a subspace that does not exist. This space only contains
subspaces with the tags {self.subspace_tags} but received {tag}.
""",
)
return self._spaces[tag]
def fix_subspace(self, tag: str, values: TensorType) -> TaggedProductSearchSpace:
"""
Return a new :class:`TaggedProductSearchSpace` with the specified subspace replaced with
a :class:`DiscreteSearchSpace` containing ``values`` as its points. This is useful if you
wish to restrict subspaces to sets of representative points.
:param tag: The tag specifying the target subspace.
:param values: The values used to populate the new discrete subspace.z
:return: New :class:`TaggedProductSearchSpace` with the specified subspace replaced with
a :class:`DiscreteSearchSpace` containing ``values`` as its points.
"""
new_spaces = [
self.get_subspace(t) if t != tag else DiscreteSearchSpace(points=values)
for t in self.subspace_tags
]
return TaggedProductSearchSpace(spaces=new_spaces, tags=self.subspace_tags)
def get_subspace_component(self, tag: str, values: TensorType) -> TensorType:
"""
Returns the components of ``values`` lying in a particular subspace.
:param tag: Subspace tag.
:param values: Points from the :class:`TaggedProductSearchSpace` of shape [N,Dprod].
:return: The sub-components of ``values`` lying in the specified subspace, of shape
[N, Dsub], where Dsub is the dimensionality of the specified subspace.
"""
starting_index_of_subspace = self._subspace_starting_indices[tag]
ending_index_of_subspace = starting_index_of_subspace + self._subspace_sizes_by_tag[tag]
return values[..., starting_index_of_subspace:ending_index_of_subspace]
def _contains(self, value: TensorType) -> TensorType:
"""
Return `True` if ``value`` is a member of this search space, else `False`. A point is a
member if each of its subspace components lie in each subspace.
Recall that individual points in product spaces are represented with their inputs in the
same order as specified when initializing the space.
:param value: A point to check for membership of this :class:`SearchSpace`.
:return: `True` if ``value`` is a member of this search space, else `False`. May return a
scalar boolean `TensorType` instead of the `bool` itself.
:raise ValueError (or tf.errors.InvalidArgumentError): If ``value`` has a different
dimensionality from the search space.
"""
in_each_subspace = [
self._spaces[tag].contains(self.get_subspace_component(tag, value))
for tag in self._tags
]
return tf.reduce_all(in_each_subspace, axis=0)
def sample(self, num_samples: int, seed: Optional[int] = None) -> TensorType:
"""
Sample randomly from the space by sampling from each subspace
and concatenating the resulting samples.
:param num_samples: The number of points to sample from this search space.
:param seed: Optional tf.random seed.
:return: ``num_samples`` i.i.d. random points, sampled uniformly,
from this search space with shape '[num_samples, D]' , where D is the search space
dimension.
"""
tf.debugging.assert_non_negative(num_samples)
if seed is not None: # ensure reproducibility
tf.random.set_seed(seed)
subspace_samples = [self._spaces[tag].sample(num_samples, seed=seed) for tag in self._tags]
return tf.concat(subspace_samples, -1)
def product(self, other: TaggedProductSearchSpace) -> TaggedProductSearchSpace:
r"""
Return the Cartesian product of the two :class:`TaggedProductSearchSpace`\ s,
building a tree of :class:`TaggedProductSearchSpace`\ s.
:param other: A search space of the same type as this search space.
:return: The Cartesian product of this search space with the ``other``.
"""
return TaggedProductSearchSpace(spaces=[self, other])
def __eq__(self, other: object) -> bool:
"""
:param other: A search space.
:return: Whether the search space is identical to this one.
"""
if not isinstance(other, TaggedProductSearchSpace):
return NotImplemented
return self._tags == other._tags and self._spaces == other._spaces
def __deepcopy__(self, memo: dict[int, object]) -> TaggedProductSearchSpace:
return self
| 45,403 | 41.040741 | 100 | py |
trieste-develop | trieste-develop/trieste/data.py | """ This module contains utilities for :class:`~trieste.observer.Observer` data. """
from __future__ import annotations
from dataclasses import dataclass
from typing import Optional, Sequence
import tensorflow as tf
from trieste.types import TensorType
@dataclass(frozen=True)
class Dataset:
"""
Container for the query points and corresponding observations from an
:class:`~trieste.observer.Observer`.
"""
query_points: TensorType
""" The points at which the :class:`~trieste.observer.Observer` was queried. """
observations: TensorType
""" The observed output of the :class:`~trieste.observer.Observer` for each query point. """
def __post_init__(self) -> None:
"""
:raise ValueError (or InvalidArgumentError): If ``query_points`` or ``observations`` have \
rank less than two, or they have unequal shape in any but their last dimension.
"""
tf.debugging.assert_rank_at_least(self.query_points, 2)
tf.debugging.assert_rank_at_least(self.observations, 2)
if 0 in (self.query_points.shape[-1], self.observations.shape[-1]):
raise ValueError(
f"query_points and observations cannot have dimension 0, got shapes"
f" {self.query_points.shape} and {self.observations.shape}."
)
if (
self.query_points.shape[:-1] != self.observations.shape[:-1]
# can't check dynamic shapes, so trust that they're ok (if not, they'll fail later)
and None not in self.query_points.shape[:-1]
):
raise ValueError(
f"Leading shapes of query_points and observations must match. Got shapes"
f" {self.query_points.shape}, {self.observations.shape}."
)
def __add__(self, rhs: Dataset) -> Dataset:
r"""
Return the :class:`Dataset` whose query points are the result of concatenating the
`query_points` in each :class:`Dataset` along the zeroth axis, and the same for the
`observations`. For example:
>>> d1 = Dataset(
... tf.constant([[0.1, 0.2], [0.3, 0.4]]),
... tf.constant([[0.5, 0.6], [0.7, 0.8]])
... )
>>> d2 = Dataset(tf.constant([[0.9, 1.0]]), tf.constant([[1.1, 1.2]]))
>>> (d1 + d2).query_points
<tf.Tensor: shape=(3, 2), dtype=float32, numpy=
array([[0.1, 0.2],
[0.3, 0.4],
[0.9, 1. ]], dtype=float32)>
>>> (d1 + d2).observations
<tf.Tensor: shape=(3, 2), dtype=float32, numpy=
array([[0.5, 0.6],
[0.7, 0.8],
[1.1, 1.2]], dtype=float32)>
:param rhs: A :class:`Dataset` with the same shapes as this one, except in the zeroth
dimension, which can have any size.
:return: The result of concatenating the :class:`Dataset`\ s.
:raise InvalidArgumentError: If the shapes of the `query_points` in each :class:`Dataset`
differ in any but the zeroth dimension. The same applies for `observations`.
"""
return Dataset(
tf.concat([self.query_points, rhs.query_points], axis=0),
tf.concat([self.observations, rhs.observations], axis=0),
)
def __len__(self) -> tf.Tensor:
"""
:return: The number of query points, or equivalently the number of observations.
"""
return tf.shape(self.observations)[0]
def __deepcopy__(self, memo: dict[int, object]) -> Dataset:
return self
def astuple(self) -> tuple[TensorType, TensorType]:
"""
**Note:** Unlike the standard library function `dataclasses.astuple`, this method does
**not** deepcopy the attributes.
:return: A 2-tuple of the :attr:`query_points` and :attr:`observations`.
"""
return self.query_points, self.observations
def check_and_extract_fidelity_query_points(
query_points: TensorType, max_fidelity: Optional[int] = None
) -> tuple[TensorType, TensorType]:
"""Check whether the final column of a tensor is close enough to ints
to be reasonably considered to represent fidelities.
The final input column of multi-fidelity data should be a reference to
the fidelity of the query point. We cannot have mixed type tensors, but
we can check that thhe final column values are suitably close to integers.
:param query_points: Data to check final column of.
:raise: ValueError: If there are not enough columns to be multifidelity data
:raise InvalidArgumentError: If any value in the final column is far from an integer
:return: Query points without fidelity column
and the fidelities of each of the query points
"""
# Check we have sufficient columns
if query_points.shape[-1] < 2:
raise ValueError(
"Query points do not have enough columns to be multifidelity,"
f" need at least 2, got {query_points.shape[1]}"
)
input_points = query_points[..., :-1]
fidelity_col = query_points[..., -1:]
# Check fidelity column values are close to ints
tf.debugging.assert_equal(
tf.round(fidelity_col),
fidelity_col,
message="Fidelity column should be float(int), but got a float that"
" was not close to an int",
)
# Check fidelity column values are non-negative
tf.debugging.assert_non_negative(fidelity_col, message="Fidelity must be non-negative")
if max_fidelity is not None:
max_input_fid = tf.reduce_max(fidelity_col)
max_fidelity_float = tf.cast(max_fidelity, dtype=query_points.dtype)
tf.debugging.assert_less_equal(
max_input_fid,
max_fidelity_float,
message=(
f"Model only supports fidelities up to {max_fidelity},"
f" but {max_input_fid} was passed"
),
)
return input_points, fidelity_col
def split_dataset_by_fidelity(dataset: Dataset, num_fidelities: int) -> Sequence[Dataset]:
"""Split dataset into individual datasets without fidelity information
:param dataset: Dataset for which to split fidelities
:param num_fidlities: Number of fidelities in the problem (not just dataset)
:return: Ordered list of datasets with lowest fidelity at index 0 and highest at -1
"""
if num_fidelities < 1:
raise ValueError(f"Data must have 1 or more fidelities, got {num_fidelities}")
datasets = [get_dataset_for_fidelity(dataset, fidelity) for fidelity in range(num_fidelities)]
return datasets
def get_dataset_for_fidelity(dataset: Dataset, fidelity: int) -> Dataset:
"""Get a dataset with only the specified fidelity of data in
:param dataset: The dataset from which to extract the single fidelity data
:param fidelity: The fidelity to extract the data for
:return: Dataset with a single fidelity and no fidelity column
"""
input_points, fidelity_col = check_and_extract_fidelity_query_points(
dataset.query_points
) # [..., D], [..., 1]
mask = fidelity_col == fidelity # [..., ]
inds = tf.where(mask)[..., 0] # [..., ]
inputs_for_fidelity = tf.gather(input_points, inds, axis=0) # [..., D]
observations_for_fidelity = tf.gather(dataset.observations, inds, axis=0) # [..., 1]
return Dataset(query_points=inputs_for_fidelity, observations=observations_for_fidelity)
def add_fidelity_column(query_points: TensorType, fidelity: int) -> TensorType:
"""Add fidelity column to query_points without fidelity data
:param query_points: query points without fidelity to add fidelity column to
:param fidelity: fidelity to populate fidelity column with
:return: TensorType of query points with fidelity column added
"""
fidelity_col = tf.ones((tf.shape(query_points)[-2], 1), dtype=query_points.dtype) * fidelity
query_points_for_fidelity = tf.concat([query_points, fidelity_col], axis=-1)
return query_points_for_fidelity
| 8,597 | 41.147059 | 99 | py |
trieste-develop | trieste-develop/trieste/types.py | """This module contains type aliases."""
from typing import Callable, Hashable, Tuple, TypeVar, Union
import tensorflow as tf
TensorType = Union[tf.Tensor, tf.Variable]
"""Type alias for tensor-like types."""
S = TypeVar("S")
"""Unbound type variable."""
T = TypeVar("T")
"""Unbound type variable."""
State = Callable[[S], Tuple[S, T]]
"""
A `State` produces a value of type `T`, given a state of type `S`, and in doing so can update the
state. If the state is updated, it is not updated in-place. Instead, a new state is created. This
is a referentially transparent alternative to mutable state.
"""
Tag = Hashable
"""Type alias for a tag used to label datasets and models."""
| 1,272 | 33.405405 | 97 | py |
trieste-develop | trieste-develop/trieste/version.py | """This module exposes the trieste version number."""
from pathlib import Path
BASE_PATH = Path(__file__).parents[0]
VERSION = BASE_PATH / "VERSION"
__version__ = Path(VERSION).read_text().strip()
| 787 | 36.52381 | 74 | py |
trieste-develop | trieste-develop/trieste/observer.py | """ Definitions and utilities for observers of objective functions. """
from __future__ import annotations
from typing import Callable, Mapping, Union
import tensorflow as tf
from typing_extensions import Final
from .data import Dataset
from .types import Tag, TensorType
SingleObserver = Callable[[TensorType], Dataset]
"""
Type alias for an observer of the objective function (that takes query points and returns an
unlabelled dataset).
"""
MultiObserver = Callable[[TensorType], Mapping[Tag, Dataset]]
"""
Type alias for an observer of the objective function (that takes query points and returns labelled
datasets).
"""
Observer = Union[SingleObserver, MultiObserver]
"""
Type alias for an observer, returning either labelled datasets or a single unlabelled dataset.
"""
OBJECTIVE: Final[Tag] = "OBJECTIVE"
"""
A tag typically used by acquisition rules to denote the data sets and models corresponding to the
optimization objective.
"""
def _is_finite(t: TensorType) -> TensorType:
return tf.logical_and(tf.math.is_finite(t), tf.logical_not(tf.math.is_nan(t)))
def filter_finite(query_points: TensorType, observations: TensorType) -> Dataset:
"""
:param query_points: A tensor of shape (N x M).
:param observations: A tensor of shape (N x 1).
:return: A :class:`~trieste.data.Dataset` containing all the rows in ``query_points`` and
``observations`` where the ``observations`` are finite numbers.
:raise ValueError or InvalidArgumentError: If ``query_points`` or ``observations`` have invalid
shapes.
"""
tf.debugging.assert_shapes([(observations, ("N", 1))])
mask = tf.reshape(_is_finite(observations), [-1])
return Dataset(tf.boolean_mask(query_points, mask), tf.boolean_mask(observations, mask))
def map_is_finite(query_points: TensorType, observations: TensorType) -> Dataset:
"""
:param query_points: A tensor.
:param observations: A tensor.
:return: A :class:`~trieste.data.Dataset` containing all the rows in ``query_points``,
along with the tensor result of mapping the elements of ``observations`` to: `1` if they are
a finite number, else `0`, with dtype `tf.uint8`.
:raise ValueError or InvalidArgumentError: If ``query_points`` and ``observations`` do not
satisfy the shape constraints of :class:`~trieste.data.Dataset`.
"""
return Dataset(query_points, tf.cast(_is_finite(observations), tf.uint8))
| 3,024 | 37.291139 | 100 | py |
trieste-develop | trieste-develop/trieste/objectives/single_objectives.py |
"""
This module contains toy objective functions, useful for experimentation. A number of them have been
taken from `this Virtual Library of Simulation Experiments
<https://web.archive.org/web/20211015101644/https://www.sfu.ca/~ssurjano/> (:cite:`ssurjano2021`)`_.
"""
from __future__ import annotations
import math
from dataclasses import dataclass
from math import pi
from typing import Callable, Sequence
import tensorflow as tf
from ..space import Box, Constraint, LinearConstraint, NonlinearConstraint
from ..types import TensorType
@dataclass(frozen=True)
class ObjectiveTestProblem:
"""
Convenience container class for synthetic objective test functions.
"""
name: str
"""The test function name"""
objective: Callable[[TensorType], TensorType]
"""The synthetic test function"""
search_space: Box
"""The (continuous) search space of the test function"""
@property
def dim(self) -> int:
"""The input dimensionality of the test function"""
return self.search_space.dimension
@property
def bounds(self) -> list[list[float]]:
"""The input space bounds of the test function"""
return [self.search_space.lower, self.search_space.upper]
@dataclass(frozen=True)
class SingleObjectiveTestProblem(ObjectiveTestProblem):
"""
Convenience container class for synthetic single-objective test functions,
including the global minimizers and minimum.
"""
minimizers: TensorType
"""The global minimizers of the test function."""
minimum: TensorType
"""The global minimum of the test function."""
def _branin_internals(x: TensorType, scale: TensorType, translate: TensorType) -> TensorType:
x0 = x[..., :1] * 15.0 - 5.0
x1 = x[..., 1:] * 15.0
b = 5.1 / (4 * math.pi**2)
c = 5 / math.pi
r = 6
s = 10
t = 1 / (8 * math.pi)
return scale * ((x1 - b * x0**2 + c * x0 - r) ** 2 + s * (1 - t) * tf.cos(x0) + translate)
def branin(x: TensorType) -> TensorType:
"""
The Branin-Hoo function over :math:`[0, 1]^2`. See
:cite:`Picheny2013` for details.
:param x: The points at which to evaluate the function, with shape [..., 2].
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes([(x, (..., 2))])
return _branin_internals(x, 1, 10)
def scaled_branin(x: TensorType) -> TensorType:
"""
The Branin-Hoo function, rescaled to have zero mean and unit variance over :math:`[0, 1]^2`. See
:cite:`Picheny2013` for details.
:param x: The points at which to evaluate the function, with shape [..., 2].
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes([(x, (..., 2))])
return _branin_internals(x, 1 / 51.95, -44.81)
_ORIGINAL_BRANIN_MINIMIZERS = tf.constant(
[[-math.pi, 12.275], [math.pi, 2.275], [9.42478, 2.475]], tf.float64
)
Branin = SingleObjectiveTestProblem(
name="Branin",
objective=branin,
search_space=Box([0.0], [1.0]) ** 2,
minimizers=(_ORIGINAL_BRANIN_MINIMIZERS + [5.0, 0.0]) / 15.0,
minimum=tf.constant([0.397887], tf.float64),
)
"""The Branin-Hoo function over :math:`[0, 1]^2`. See :cite:`Picheny2013` for details."""
ScaledBranin = SingleObjectiveTestProblem(
name="Scaled Branin",
objective=scaled_branin,
search_space=Branin.search_space,
minimizers=Branin.minimizers,
minimum=tf.constant([-1.047393], tf.float64),
)
"""The Branin-Hoo function, rescaled to have zero mean and unit variance over :math:`[0, 1]^2`. See
:cite:`Picheny2013` for details."""
def _scaled_branin_constraints() -> Sequence[Constraint]:
def _nlc_func0(x: TensorType) -> TensorType:
c0 = x[..., 0] - 0.2 - tf.sin(x[..., 1])
c0 = tf.expand_dims(c0, axis=-1)
return c0
def _nlc_func1(x: TensorType) -> TensorType:
c1 = x[..., 0] - tf.cos(x[..., 1])
c1 = tf.expand_dims(c1, axis=-1)
return c1
constraints: Sequence[Constraint] = [
LinearConstraint(
A=tf.constant([[-1.0, 1.0], [1.0, 0.0], [0.0, 1.0]]),
lb=tf.constant([-0.4, 0.15, 0.2]),
ub=tf.constant([0.5, 0.9, 0.9]),
),
NonlinearConstraint(_nlc_func0, tf.constant(-1.0), tf.constant(0.0)),
NonlinearConstraint(_nlc_func1, tf.constant(-0.8), tf.constant(0.0)),
]
return constraints
ConstrainedScaledBranin = SingleObjectiveTestProblem(
name="Constrained Scaled Branin",
objective=scaled_branin,
search_space=Box(
Branin.search_space.lower,
Branin.search_space.upper,
constraints=_scaled_branin_constraints(),
),
minimizers=tf.constant([[0.16518, 0.66518]], tf.float64),
minimum=tf.constant([-0.99888], tf.float64),
)
"""The rescaled Branin-Hoo function with a combination of linear and nonlinear constraints on the
search space."""
def simple_quadratic(x: TensorType) -> TensorType:
"""
A trivial quadratic function over :math:`[0, 1]^2`. Useful for quick testing.
:param x: The points at which to evaluate the function, with shape [..., 2].
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes([(x, (..., 2))])
return -tf.math.reduce_sum(x, axis=-1, keepdims=True) ** 2
SimpleQuadratic = SingleObjectiveTestProblem(
name="Simple Quadratic",
objective=simple_quadratic,
search_space=Branin.search_space,
minimizers=tf.constant([[1.0, 1.0]], tf.float64),
minimum=tf.constant([-4.0], tf.float64),
)
"""A trivial quadratic function over :math:`[0, 1]^2`. Useful for quick testing."""
def gramacy_lee(x: TensorType) -> TensorType:
"""
The Gramacy & Lee function, typically evaluated over :math:`[0.5, 2.5]`. See
:cite:`gramacy2012cases` for details.
:param x: Where to evaluate the function, with shape [..., 1].
:return: The function values, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes([(x, (..., 1))])
return tf.sin(10 * math.pi * x) / (2 * x) + (x - 1) ** 4
GramacyLee = SingleObjectiveTestProblem(
name="Gramacy & Lee",
objective=gramacy_lee,
search_space=Box([0.5], [2.5]),
minimizers=tf.constant([[0.548562]], tf.float64),
minimum=tf.constant([-0.869011], tf.float64),
)
"""The Gramacy & Lee function, typically evaluated over :math:`[0.5, 2.5]`. See
:cite:`gramacy2012cases` for details."""
def logarithmic_goldstein_price(x: TensorType) -> TensorType:
"""
A logarithmic form of the Goldstein-Price function, with zero mean and unit variance over
:math:`[0, 1]^2`. See :cite:`Picheny2013` for details.
:param x: The points at which to evaluate the function, with shape [..., 2].
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes([(x, (..., 2))])
x0, x1 = tf.split(4 * x - 2, 2, axis=-1)
a = (x0 + x1 + 1) ** 2
b = 19 - 14 * x0 + 3 * x0**2 - 14 * x1 + 6 * x0 * x1 + 3 * x1**2
c = (2 * x0 - 3 * x1) ** 2
d = 18 - 32 * x0 + 12 * x0**2 + 48 * x1 - 36 * x0 * x1 + 27 * x1**2
return (1 / 2.427) * (tf.math.log((1 + a * b) * (30 + c * d)) - 8.693)
LogarithmicGoldsteinPrice = SingleObjectiveTestProblem(
name="Logarithmic Goldstein-Price",
objective=logarithmic_goldstein_price,
search_space=Box([0.0], [1.0]) ** 2,
minimizers=tf.constant([[0.5, 0.25]], tf.float64),
minimum=tf.constant([-3.12913], tf.float64),
)
"""A logarithmic form of the Goldstein-Price function, with zero mean and unit variance over
:math:`[0, 1]^2`. See :cite:`Picheny2013` for details."""
def hartmann_3(x: TensorType) -> TensorType:
"""
The Hartmann 3 test function over :math:`[0, 1]^3`. This function has 3 local
and one global minima. See https://www.sfu.ca/~ssurjano/hart3.html for details.
:param x: The points at which to evaluate the function, with shape [..., 3].
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes([(x, (..., 3))])
a = [1.0, 1.2, 3.0, 3.2]
A = [[3.0, 10.0, 30.0], [0.1, 10.0, 35.0], [3.0, 10.0, 30.0], [0.1, 10.0, 35.0]]
P = [
[0.3689, 0.1170, 0.2673],
[0.4699, 0.4387, 0.7470],
[0.1091, 0.8732, 0.5547],
[0.0381, 0.5743, 0.8828],
]
inner_sum = -tf.reduce_sum(A * (tf.expand_dims(x, 1) - P) ** 2, -1)
return -tf.reduce_sum(a * tf.math.exp(inner_sum), -1, keepdims=True)
Hartmann3 = SingleObjectiveTestProblem(
name="Hartmann 3",
objective=hartmann_3,
search_space=Box([0.0], [1.0]) ** 3,
minimizers=tf.constant([[0.114614, 0.555649, 0.852547]], tf.float64),
minimum=tf.constant([-3.86278], tf.float64),
)
"""The Hartmann 3 test function over :math:`[0, 1]^3`. This function has 3 local
and one global minima. See https://www.sfu.ca/~ssurjano/hart3.html for details."""
def shekel_4(x: TensorType) -> TensorType:
"""
The Shekel test function over :math:`[0, 1]^4`. This function has ten local
minima and a single global minimum. See https://www.sfu.ca/~ssurjano/shekel.html for details.
Note that we rescale the original problem, which is typically defined
over `[0, 10]^4`.
:param x: The points at which to evaluate the function, with shape [..., 4].
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes([(x, (..., 4))])
y: TensorType = x * 10.0
beta = [0.1, 0.2, 0.2, 0.4, 0.4, 0.6, 0.3, 0.7, 0.5, 0.5]
C = [
[4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 5.0, 8.0, 6.0, 7.0],
[4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 3.0, 1.0, 2.0, 3.6],
[4.0, 1.0, 8.0, 6.0, 3.0, 2.0, 5.0, 8.0, 6.0, 7.0],
[4.0, 1.0, 8.0, 6.0, 7.0, 9.0, 3.0, 1.0, 2.0, 3.6],
]
inner_sum = tf.reduce_sum((tf.expand_dims(y, -1) - C) ** 2, 1)
inner_sum += tf.cast(tf.transpose(beta), dtype=inner_sum.dtype)
return -tf.reduce_sum(inner_sum ** (-1), -1, keepdims=True)
Shekel4 = SingleObjectiveTestProblem(
name="Shekel 4",
objective=shekel_4,
search_space=Box([0.0], [1.0]) ** 4,
minimizers=tf.constant([[0.4, 0.4, 0.4, 0.4]], tf.float64),
minimum=tf.constant([-10.5363], tf.float64),
)
"""The Shekel test function over :math:`[0, 1]^4`. This function has ten local
minima and a single global minimum. See https://www.sfu.ca/~ssurjano/shekel.html for details.
Note that we rescale the original problem, which is typically defined
over `[0, 10]^4`."""
def levy(x: TensorType, d: int) -> TensorType:
"""
The Levy test function over :math:`[0, 1]^d`. This function has many local
minima and a single global minimum. See https://www.sfu.ca/~ssurjano/levy.html for details.
Note that we rescale the original problem, which is typically defined
over `[-10, 10]^d`, to be defined over a unit hypercube :math:`[0, 1]^d`.
:param x: The points at which to evaluate the function, with shape [..., d].
:param d: The dimension of the function.
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_greater_equal(d, 1)
tf.debugging.assert_shapes([(x, (..., d))])
w: TensorType = 1 + ((x * 20.0 - 10) - 1) / 4
term1 = tf.pow(tf.sin(pi * w[..., 0:1]), 2)
term3 = (w[..., -1:] - 1) ** 2 * (1 + tf.pow(tf.sin(2 * pi * w[..., -1:]), 2))
wi = w[..., 0:-1]
wi_sum = tf.reduce_sum(
(wi - 1) ** 2 * (1 + 10 * tf.pow(tf.sin(pi * wi + 1), 2)), axis=-1, keepdims=True
)
return term1 + wi_sum + term3
def levy_8(x: TensorType) -> TensorType:
"""
Convenience function for the 8-dimensional :func:`levy` function, with output
normalised to unit interval
:param x: The points at which to evaluate the function, with shape [..., 8].
:return: The function values at ``x``, with shape [..., 1].
"""
return levy(x, d=8) / 450.0
Levy8 = SingleObjectiveTestProblem(
name="Levy 8",
objective=levy_8,
search_space=Box([0.0], [1.0]) ** 8,
minimizers=tf.constant([[11 / 20] * 8], tf.float64),
minimum=tf.constant([0], tf.float64),
)
"""Convenience function for the 8-dimensional :func:`levy` function.
Taken from https://www.sfu.ca/~ssurjano/levy.html"""
def rosenbrock(x: TensorType, d: int) -> TensorType:
"""
The Rosenbrock function, also known as the Banana function, is a unimodal function,
however the minima lies in a narrow valley. Even though this valley is
easy to find, convergence to the minimum is difficult. See
https://www.sfu.ca/~ssurjano/rosen.html for details. Inputs are rescaled to
be defined over a unit hypercube :math:`[0, 1]^d`.
:param x: The points at which to evaluate the function, with shape [..., d].
:param d: The dimension of the function.
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_greater_equal(d, 1)
tf.debugging.assert_shapes([(x, (..., d))])
y: TensorType = x * 15.0 - 5
unscaled_function = tf.reduce_sum(
(100.0 * (y[..., 1:] - y[..., :-1]) ** 2 + (1 - y[..., :-1]) ** 2), axis=-1, keepdims=True
)
return unscaled_function
def rosenbrock_4(x: TensorType) -> TensorType:
"""
Convenience function for the 4-dimensional :func:`rosenbrock` function with steepness 10.
It is rescaled to have zero mean and unit variance over :math:`[0, 1]^4. See
:cite:`Picheny2013` for details.
:param x: The points at which to evaluate the function, with shape [..., 4].
:return: The function values at ``x``, with shape [..., 1].
"""
return (rosenbrock(x, d=4) - 3.827 * 1e5) / (3.755 * 1e5)
Rosenbrock4 = SingleObjectiveTestProblem(
name="Rosenbrock 4",
objective=rosenbrock_4,
search_space=Box([0.0], [1.0]) ** 4,
minimizers=tf.constant([[0.4] * 4], tf.float64),
minimum=tf.constant([-1.01917], tf.float64),
)
"""The Rosenbrock function, rescaled to have zero mean and unit variance over :math:`[0, 1]^4. See
:cite:`Picheny2013` for details.
This function (also known as the Banana function) is unimodal, however the minima
lies in a narrow valley."""
def ackley_5(x: TensorType) -> TensorType:
"""
The Ackley test function over :math:`[0, 1]^5`. This function has
many local minima and a global minima. See https://www.sfu.ca/~ssurjano/ackley.html
for details.
Note that we rescale the original problem, which is typically defined
over `[-32.768, 32.768]`.
:param x: The points at which to evaluate the function, with shape [..., 5].
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes([(x, (..., 5))])
x = (x - 0.5) * (32.768 * 2.0)
exponent_1 = -0.2 * tf.math.sqrt((1 / 5.0) * tf.reduce_sum(x**2, -1))
exponent_2 = (1 / 5.0) * tf.reduce_sum(tf.math.cos(2.0 * math.pi * x), -1)
function = (
-20.0 * tf.math.exp(exponent_1)
- tf.math.exp(exponent_2)
+ 20.0
+ tf.cast(tf.math.exp(1.0), dtype=x.dtype)
)
return tf.expand_dims(function, -1)
Ackley5 = SingleObjectiveTestProblem(
name="Ackley 5",
objective=ackley_5,
search_space=Box([0.0], [1.0]) ** 5,
minimizers=tf.constant([[0.5, 0.5, 0.5, 0.5, 0.5]], tf.float64),
minimum=tf.constant([0.0], tf.float64),
)
"""The Ackley test function over :math:`[0, 1]^5`. This function has
many local minima and a global minima. See https://www.sfu.ca/~ssurjano/ackley.html
for details.
Note that we rescale the original problem, which is typically defined
over `[-32.768, 32.768]`."""
def hartmann_6(x: TensorType) -> TensorType:
"""
The Hartmann 6 test function over :math:`[0, 1]^6`. This function has
6 local and one global minima. See https://www.sfu.ca/~ssurjano/hart6.html
for details.
:param x: The points at which to evaluate the function, with shape [..., 6].
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes([(x, (..., 6))])
a = [1.0, 1.2, 3.0, 3.2]
A = [
[10.0, 3.0, 17.0, 3.5, 1.7, 8.0],
[0.05, 10.0, 17.0, 0.1, 8.0, 14.0],
[3.0, 3.5, 1.7, 10.0, 17.0, 8.0],
[17.0, 8.0, 0.05, 10.0, 0.1, 14.0],
]
P = [
[0.1312, 0.1696, 0.5569, 0.0124, 0.8283, 0.5886],
[0.2329, 0.4135, 0.8307, 0.3736, 0.1004, 0.9991],
[0.2348, 0.1451, 0.3522, 0.2883, 0.3047, 0.6650],
[0.4047, 0.8828, 0.8732, 0.5743, 0.1091, 0.0381],
]
inner_sum = -tf.reduce_sum(A * (tf.expand_dims(x, 1) - P) ** 2, -1)
return -tf.reduce_sum(a * tf.math.exp(inner_sum), -1, keepdims=True)
Hartmann6 = SingleObjectiveTestProblem(
name="Hartmann 6",
objective=hartmann_6,
search_space=Box([0.0], [1.0]) ** 6,
minimizers=tf.constant([[0.20169, 0.150011, 0.476874, 0.275332, 0.311652, 0.6573]], tf.float64),
minimum=tf.constant([-3.32237], tf.float64),
)
"""The Hartmann 6 test function over :math:`[0, 1]^6`. This function has
6 local and one global minima. See https://www.sfu.ca/~ssurjano/hart6.html
for details."""
def michalewicz(x: TensorType, d: int = 2, m: int = 10) -> TensorType:
"""
The Michalewicz function over :math:`[0, \\pi]` for all i=1,...,d. Dimensionality is determined
by the parameter ``d`` and it features steep ridges and drops. It has :math:`d!` local minima,
and it is multimodal. The parameter ``m`` defines the steepness of they valleys and ridges; a
larger ``m`` leads to a more difficult search. The recommended value of ``m`` is 10. See
https://www.sfu.ca/~ssurjano/egg.html for details.
:param x: The points at which to evaluate the function, with shape [..., d].
:param d: The dimension of the function.
:param m: The steepness of the valleys/ridges.
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_greater_equal(d, 1)
tf.debugging.assert_shapes([(x, (..., d))])
xi = tf.range(1, (d + 1), delta=1, dtype=x.dtype) * tf.pow(x, 2)
result = tf.reduce_sum(tf.sin(x) * tf.pow(tf.sin(xi / math.pi), 2 * m), axis=1, keepdims=True)
return -result
def michalewicz_2(x: TensorType) -> TensorType:
"""
Convenience function for the 2-dimensional :func:`michalewicz` function with steepness 10.
:param x: The points at which to evaluate the function, with shape [..., 2].
:return: The function values at ``x``, with shape [..., 1].
"""
return michalewicz(x, d=2)
def michalewicz_5(x: TensorType) -> TensorType:
"""
Convenience function for the 5-dimensional :func:`michalewicz` function with steepness 10.
:param x: The points at which to evaluate the function, with shape [..., 5].
:return: The function values at ``x``, with shape [..., 1].
"""
return michalewicz(x, d=5)
def michalewicz_10(x: TensorType) -> TensorType:
"""
Convenience function for the 10-dimensional :func:`michalewicz` function with steepness 10.
:param x: The points at which to evaluate the function, with shape [..., 10].
:return: The function values at ``x``, with shape [..., 1].
"""
return michalewicz(x, d=10)
Michalewicz2 = SingleObjectiveTestProblem(
name="Michalewicz 2",
objective=michalewicz_2,
search_space=Box([0.0], [pi]) ** 2,
minimizers=tf.constant([[2.202906, 1.570796]], tf.float64),
minimum=tf.constant([-1.8013034], tf.float64),
)
"""Convenience function for the 2-dimensional :func:`michalewicz` function with steepness 10.
Taken from https://arxiv.org/abs/2003.09867"""
Michalewicz5 = SingleObjectiveTestProblem(
name="Michalewicz 5",
objective=michalewicz_5,
search_space=Box([0.0], [pi]) ** 5,
minimizers=tf.constant([[2.202906, 1.570796, 1.284992, 1.923058, 1.720470]], tf.float64),
minimum=tf.constant([-4.6876582], tf.float64),
)
"""Convenience function for the 5-dimensional :func:`michalewicz` function with steepness 10.
Taken from https://arxiv.org/abs/2003.09867"""
Michalewicz10 = SingleObjectiveTestProblem(
name="Michalewicz 10",
objective=michalewicz_10,
search_space=Box([0.0], [pi]) ** 10,
minimizers=tf.constant(
[
[
2.202906,
1.570796,
1.284992,
1.923058,
1.720470,
1.570796,
1.454414,
1.756087,
1.655717,
1.570796,
]
],
tf.float64,
),
minimum=tf.constant([-9.6601517], tf.float64),
)
"""Convenience function for the 10-dimensional :func:`michalewicz` function with steepness 10.
Taken from https://arxiv.org/abs/2003.09867"""
def trid(x: TensorType, d: int = 10) -> TensorType:
"""
The Trid function over :math:`[-d^2, d^2]` for all i=1,...,d. Dimensionality is determined
by the parameter ``d`` and it has a global minimum. This function has large variation in
output which makes it challenging for Bayesian optimisation with vanilla Gaussian processes
with non-stationary kernels. Models that can deal with non-stationarities, such as deep
Gaussian processes, can be useful for modelling these functions. See :cite:`hebbal2019bayesian`
and https://www.sfu.ca/~ssurjano/trid.html for details.
:param x: The points at which to evaluate the function, with shape [..., d].
:param d: Dimensionality.
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_greater_equal(d, 2)
tf.debugging.assert_shapes([(x, (..., d))])
result = tf.reduce_sum(tf.pow(x - 1, 2), 1, True) - tf.reduce_sum(x[:, 1:] * x[:, :-1], 1, True)
return result
def trid_10(x: TensorType) -> TensorType:
"""The Trid function with dimension 10.
:param x: The points at which to evaluate the function, with shape [..., 10].
:return: The function values at ``x``, with shape [..., 1].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
return trid(x, d=10)
Trid10 = SingleObjectiveTestProblem(
name="Trid 10",
objective=trid_10,
search_space=Box([-(10**2)], [10**2]) ** 10,
minimizers=tf.constant([[i * (10 + 1 - i) for i in range(1, 10 + 1)]], tf.float64),
minimum=tf.constant([-10 * (10 + 4) * (10 - 1) / 6], tf.float64),
)
"""The Trid function with dimension 10."""
| 23,895 | 35.819723 | 100 | py |
trieste-develop | trieste-develop/trieste/objectives/multifidelity_objectives.py | """
This module contains synthetic multi-fidelity objective functions, useful for experimentation.
"""
from dataclasses import dataclass
import numpy as np
import tensorflow as tf
from ..space import Box, DiscreteSearchSpace, SearchSpace, TaggedProductSearchSpace
from ..types import TensorType
from .single_objectives import SingleObjectiveTestProblem
@dataclass(frozen=True)
class SingleObjectiveMultifidelityTestProblem(SingleObjectiveTestProblem):
num_fidelities: int
"""The number of fidelities of test function"""
fidelity_search_space: TaggedProductSearchSpace
"""The search space including fidelities"""
def linear_multifidelity(x: TensorType) -> TensorType:
x_input = x[..., :-1]
x_fidelity = x[..., -1:]
f = 0.5 * ((6.0 * x_input - 2.0) ** 2) * tf.math.sin(12.0 * x_input - 4.0) + 10.0 * (
x_input - 1.0
)
f = f + x_fidelity * (f - 20.0 * (x_input - 1.0))
return f
_LINEAR_MULTIFIDELITY_MINIMIZERS = {
2: tf.constant([[0.75724875]], tf.float64),
3: tf.constant([[0.76333767]], tf.float64),
5: tf.constant([[0.76801846]], tf.float64),
}
_LINEAR_MULTIFIDELITY_MINIMA = {
2: tf.constant([-6.020740055], tf.float64),
3: tf.constant([-6.634287061], tf.float64),
5: tf.constant([-7.933019704], tf.float64),
}
def _linear_multifidelity_search_space_builder(
n_fidelities: int, input_search_space: SearchSpace
) -> TaggedProductSearchSpace:
fidelity_search_space = DiscreteSearchSpace(np.arange(n_fidelities, dtype=float).reshape(-1, 1))
search_space = TaggedProductSearchSpace(
[input_search_space, fidelity_search_space], ["input", "fidelity"]
)
return search_space
Linear2Fidelity = SingleObjectiveMultifidelityTestProblem(
name="Linear 2 Fidelity",
objective=linear_multifidelity,
search_space=Box(np.zeros(1), np.ones(1)),
fidelity_search_space=_linear_multifidelity_search_space_builder(
2, Box(np.zeros(1), np.ones(1))
),
minimizers=_LINEAR_MULTIFIDELITY_MINIMIZERS[2],
minimum=_LINEAR_MULTIFIDELITY_MINIMA[2],
num_fidelities=2,
)
Linear3Fidelity = SingleObjectiveMultifidelityTestProblem(
name="Linear 3 Fidelity",
objective=linear_multifidelity,
search_space=Box(np.zeros(1), np.ones(1)),
fidelity_search_space=_linear_multifidelity_search_space_builder(
3, Box(np.zeros(1), np.ones(1))
),
minimizers=_LINEAR_MULTIFIDELITY_MINIMIZERS[3],
minimum=_LINEAR_MULTIFIDELITY_MINIMA[3],
num_fidelities=3,
)
Linear5Fidelity = SingleObjectiveMultifidelityTestProblem(
name="Linear 5 Fidelity",
objective=linear_multifidelity,
search_space=Box(np.zeros(1), np.ones(1)),
fidelity_search_space=_linear_multifidelity_search_space_builder(
5, Box(np.zeros(1), np.ones(1))
),
minimizers=_LINEAR_MULTIFIDELITY_MINIMIZERS[5],
minimum=_LINEAR_MULTIFIDELITY_MINIMA[5],
num_fidelities=5,
)
| 3,507 | 31.785047 | 100 | py |
trieste-develop | trieste-develop/trieste/objectives/utils.py |
"""
This module contains functions convenient for creating :class:`Observer` objects that return data
from objective functions, appropriately formatted for usage with the toolbox.
"""
from __future__ import annotations
from collections.abc import Callable
from typing import Optional, overload
from ..data import Dataset
from ..observer import MultiObserver, Observer, SingleObserver
from ..types import Tag, TensorType
@overload
def mk_observer(objective: Callable[[TensorType], TensorType]) -> SingleObserver:
...
@overload
def mk_observer(objective: Callable[[TensorType], TensorType], key: Tag) -> MultiObserver:
...
def mk_observer(
objective: Callable[[TensorType], TensorType], key: Optional[Tag] = None
) -> Observer:
"""
:param objective: An objective function designed to be used with a single data set and model.
:param key: An optional key to use to access the data from the observer result.
:return: An observer returning the data from ``objective``.
"""
if key is not None:
return lambda qp: {key: Dataset(qp, objective(qp))}
else:
return lambda qp: Dataset(qp, objective(qp))
def mk_multi_observer(**kwargs: Callable[[TensorType], TensorType]) -> MultiObserver:
"""
:param kwargs: Observation functions.
:return: An multi-observer returning the data from ``kwargs``.
"""
return lambda qp: {key: Dataset(qp, objective(qp)) for key, objective in kwargs.items()}
| 2,051 | 33.2 | 97 | py |
trieste-develop | trieste-develop/trieste/objectives/multi_objectives.py | """
This module contains synthetic multi-objective functions, useful for experimentation.
"""
from __future__ import annotations
import math
from dataclasses import dataclass
from functools import partial
import tensorflow as tf
from typing_extensions import Protocol
from ..space import Box
from ..types import TensorType
from .single_objectives import ObjectiveTestProblem
class GenParetoOptimalPoints(Protocol):
"""A Protocol representing a function that generates Pareto optimal points."""
def __call__(self, n: int, seed: int | None = None) -> TensorType:
"""
Generate `n` Pareto optimal points.
:param n: The number of pareto optimal points to be generated.
:param seed: An integer used to create a random seed for distributions that
used to generate pareto optimal points.
:return: The Pareto optimal points
"""
@dataclass(frozen=True)
class MultiObjectiveTestProblem(ObjectiveTestProblem):
"""
Convenience container class for synthetic multi-objective test functions, containing
a generator for the pareto optimal points, which can be used as a reference of performance
measure of certain multi-objective optimization algorithms.
"""
gen_pareto_optimal_points: GenParetoOptimalPoints
"""Function to generate Pareto optimal points, given the number of points and an optional
random number seed."""
def vlmop2(x: TensorType, d: int) -> TensorType:
"""
The VLMOP2 synthetic function.
:param x: The points at which to evaluate the function, with shape [..., d].
:param d: The dimensionality of the synthetic function.
:return: The function values at ``x``, with shape [..., 2].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes(
[(x, (..., d))],
message=f"input x dim: {x.shape[-1]} does not align with pre-specified dim: {d}",
)
transl = 1 / tf.sqrt(tf.cast(d, x.dtype))
y1 = 1 - tf.exp(-1 * tf.reduce_sum((x - transl) ** 2, axis=-1))
y2 = 1 - tf.exp(-1 * tf.reduce_sum((x + transl) ** 2, axis=-1))
return tf.stack([y1, y2], axis=-1)
def VLMOP2(input_dim: int) -> MultiObjectiveTestProblem:
"""
The VLMOP2 problem, typically evaluated over :math:`[-2, 2]^d`.
The idea pareto fronts lies on -1/sqrt(d) - 1/sqrt(d) and x1=...=xdim.
See :cite:`van1999multiobjective` and :cite:`fonseca1995multiobjective`
(the latter for discussion of pareto front property) for details.
:param input_dim: The input dimensionality of the synthetic function.
:return: The problem specification.
"""
def gen_pareto_optimal_points(n: int, seed: int | None = None) -> TensorType:
tf.debugging.assert_greater(n, 0)
transl = 1 / tf.sqrt(tf.cast(input_dim, tf.float64))
_x = tf.tile(tf.linspace([-transl], [transl], n), [1, input_dim])
return vlmop2(_x, input_dim)
return MultiObjectiveTestProblem(
name=f"VLMOP2({input_dim})",
objective=partial(vlmop2, d=input_dim),
search_space=Box([-2.0], [2.0]) ** input_dim,
gen_pareto_optimal_points=gen_pareto_optimal_points,
)
def dtlz_mkd(input_dim: int, num_objective: int) -> tuple[int, int, int]:
"""Return m/k/d values for dtlz synthetic functions."""
tf.debugging.assert_greater(input_dim, 0)
tf.debugging.assert_greater(num_objective, 0)
tf.debugging.assert_greater(
input_dim,
num_objective,
f"input dimension {input_dim}"
f" must be greater than function objective numbers {num_objective}",
)
M = num_objective
k = input_dim - M + 1
d = input_dim
return (M, k, d)
def dtlz1(x: TensorType, m: int, k: int, d: int) -> TensorType:
"""
The DTLZ1 synthetic function.
:param x: The points at which to evaluate the function, with shape [..., d].
:param m: The objective numbers.
:param k: The input dimensionality for g.
:param d: The dimensionality of the synthetic function.
:return: The function values at ``x``, with shape [..., m].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes(
[(x, (..., d))],
message=f"input x dim: {x.shape[-1]} does not align with pre-specified dim: {d}",
)
tf.debugging.assert_greater(m, 0, message=f"positive objective numbers expected but found {m}")
def g(xM: TensorType) -> TensorType:
return 100 * (
k
+ tf.reduce_sum(
(xM - 0.5) ** 2 - tf.cos(20 * math.pi * (xM - 0.5)), axis=-1, keepdims=True
)
)
ta = tf.TensorArray(x.dtype, size=m)
for i in range(m):
xM = x[..., m - 1 :]
y = 1 + g(xM)
y *= 1 / 2 * tf.reduce_prod(x[..., : m - 1 - i], axis=-1, keepdims=True)
if i > 0:
y *= 1 - x[..., m - i - 1, tf.newaxis]
ta = ta.write(i, y)
return tf.squeeze(tf.concat(tf.split(ta.stack(), m, axis=0), axis=-1), axis=0)
def DTLZ1(input_dim: int, num_objective: int) -> MultiObjectiveTestProblem:
"""
The DTLZ1 problem, the idea pareto fronts lie on a linear hyper-plane.
See :cite:`deb2002scalable` for details.
:param input_dim: The input dimensionality of the synthetic function.
:param num_objective: The number of objectives.
:return: The problem specification.
"""
M, k, d = dtlz_mkd(input_dim, num_objective)
def gen_pareto_optimal_points(n: int, seed: int | None = None) -> TensorType:
tf.debugging.assert_greater_equal(M, 2)
rnd = tf.random.uniform([n, M - 1], minval=0, maxval=1, seed=seed, dtype=tf.float64)
strnd = tf.sort(rnd, axis=-1)
strnd = tf.concat(
[tf.zeros([n, 1], dtype=tf.float64), strnd, tf.ones([n, 1], dtype=tf.float64)], axis=-1
)
return 0.5 * (strnd[..., 1:] - strnd[..., :-1])
return MultiObjectiveTestProblem(
name=f"DTLZ1({input_dim}, {num_objective})",
objective=partial(dtlz1, m=M, k=k, d=d),
search_space=Box([0.0], [1.0]) ** d,
gen_pareto_optimal_points=gen_pareto_optimal_points,
)
def dtlz2(x: TensorType, m: int, d: int) -> TensorType:
"""
The DTLZ2 synthetic function.
:param x: The points at which to evaluate the function, with shape [..., d].
:param m: The objective numbers.
:param d: The dimensionality of the synthetic function.
:return: The function values at ``x``, with shape [..., m].
:raise ValueError (or InvalidArgumentError): If ``x`` has an invalid shape.
"""
tf.debugging.assert_shapes(
[(x, (..., d))],
message=f"input x dim: {x.shape[-1]} does not align with pre-specified dim: {d}",
)
tf.debugging.assert_greater(m, 0, message=f"positive objective numbers expected but found {m}")
def g(xM: TensorType) -> TensorType:
z = (xM - 0.5) ** 2
return tf.reduce_sum(z, axis=-1, keepdims=True)
ta = tf.TensorArray(x.dtype, size=m)
for i in tf.range(m):
y = 1 + g(x[..., m - 1 :])
for j in tf.range(m - 1 - i):
y *= tf.cos(math.pi / 2 * x[..., j, tf.newaxis])
if i > 0:
y *= tf.sin(math.pi / 2 * x[..., m - 1 - i, tf.newaxis])
ta = ta.write(i, y)
return tf.squeeze(tf.concat(tf.split(ta.stack(), m, axis=0), axis=-1), axis=0)
def DTLZ2(input_dim: int, num_objective: int) -> MultiObjectiveTestProblem:
"""
The DTLZ2 problem, the idea pareto fronts lie on (part of) a unit hyper sphere.
See :cite:`deb2002scalable` for details.
:param input_dim: The input dimensionality of the synthetic function.
:param num_objective: The number of objectives.
:return: The problem specification.
"""
M, k, d = dtlz_mkd(input_dim, num_objective)
def gen_pareto_optimal_points(n: int, seed: int | None = None) -> TensorType:
tf.debugging.assert_greater_equal(M, 2)
rnd = tf.random.normal([n, M], seed=seed, dtype=tf.float64)
samples = tf.abs(rnd / tf.norm(rnd, axis=-1, keepdims=True))
return samples
return MultiObjectiveTestProblem(
name=f"DTLZ2({input_dim}, {num_objective})",
objective=partial(dtlz2, m=M, d=d),
search_space=Box([0.0], [1.0]) ** d,
gen_pareto_optimal_points=gen_pareto_optimal_points,
)
| 8,968 | 36.527197 | 99 | py |
trieste-develop | trieste-develop/trieste/models/utils.py |
"""
This module contains auxiliary objects and functions that are used by multiple model types.
"""
from __future__ import annotations
import gpflow
import tensorflow as tf
from gpflow.utilities.traversal import _merge_leaf_components, leaf_components
from .. import logging
from ..data import Dataset
from .interfaces import ProbabilisticModel
def write_summary_data_based_metrics(
dataset: Dataset,
model: ProbabilisticModel,
prefix: str = "",
) -> None:
"""
Logging utility for writing TensorBoard summary of various metrics for model diagnostics.
:param dataset: The dataset to use for computing the metrics. All available data in the
dataset will be used.
:param model: The model to produce metrics for.
:param prefix: The prefix to add to "accuracy" category of model summaries.
"""
name = prefix + "accuracy"
predict = model.predict(dataset.query_points)
# basics
logging.histogram(f"{name}/predict_mean", predict[0])
logging.scalar(f"{name}/predict_mean__mean", tf.reduce_mean(predict[0]))
logging.histogram(f"{name}/predict_variance", predict[1])
logging.scalar(f"{name}/predict_variance__mean", tf.reduce_mean(predict[1]))
logging.histogram(f"{name}/observations", dataset.observations)
logging.scalar(f"{name}/observations_mean", tf.reduce_mean(dataset.observations))
logging.scalar(f"{name}/observations_variance", tf.math.reduce_variance(dataset.observations))
# accuracy metrics
diffs = tf.cast(dataset.observations, predict[0].dtype) - predict[0]
z_residuals = diffs / tf.math.sqrt(predict[1])
logging.histogram(f"{name}/absolute_error", tf.math.abs(diffs))
logging.histogram(f"{name}/z_residuals", z_residuals)
logging.scalar(f"{name}/root_mean_square_error", tf.math.sqrt(tf.reduce_mean(diffs**2)))
logging.scalar(f"{name}/mean_absolute_error", tf.reduce_mean(tf.math.abs(diffs)))
logging.scalar(f"{name}/z_residuals_std", tf.math.reduce_std(z_residuals))
# variance metrics
variance_error = predict[1] - diffs**2
logging.histogram(f"{name}/variance_error", variance_error)
logging.scalar(
f"{name}/root_mean_variance_error",
tf.math.sqrt(tf.reduce_mean(variance_error**2)),
)
def write_summary_kernel_parameters(kernel: gpflow.kernels.Kernel, prefix: str = "") -> None:
"""
Logging utility for writing TensorBoard summary of kernel parameters. Provides useful
diagnostics for models with a GPflow kernel. Only trainable parameters are logged.
:param kernel: The kernel to use for computing the metrics.
:param prefix: The prefix to add to "kernel" category of model summaries.
"""
components = _merge_leaf_components(leaf_components(kernel))
for k, v in components.items():
if v.trainable:
if tf.rank(v) == 0:
logging.scalar(f"{prefix}kernel.{k}", v)
elif tf.rank(v) == 1:
for i, vi in enumerate(v):
logging.scalar(f"{prefix}kernel.{k}[{i}]", vi)
def write_summary_likelihood_parameters(
likelihood: gpflow.likelihoods.Likelihood, prefix: str = ""
) -> None:
"""
Logging utility for writing TensorBoard summary of likelihood parameters. Provides useful
diagnostics for models with a GPflow likelihood. Only trainable parameters are logged.
:param likelihood: The likelihood to use for computing the metrics.
:param prefix: The prefix to add to "likelihood" category of model summaries.
"""
likelihood_components = _merge_leaf_components(leaf_components(likelihood))
for k, v in likelihood_components.items():
if v.trainable:
logging.scalar(f"{prefix}likelihood.{k}", v)
| 4,316 | 40.114286 | 98 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.