repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
ARFlow
|
ARFlow-master/transforms/sep_transforms.py
|
import numpy as np
import torch
# from scipy.misc import imresize
from skimage.transform import resize as imresize
class ArrayToTensor(object):
"""Converts a numpy.ndarray (H x W x C) to a torch.FloatTensor of shape (C x H x W)."""
def __call__(self, array):
assert (isinstance(array, np.ndarray))
array = np.transpose(array, (2, 0, 1))
# handle numpy array
tensor = torch.from_numpy(array)
# put it from HWC to CHW format
return tensor.float()
class Zoom(object):
def __init__(self, new_h, new_w):
self.new_h = new_h
self.new_w = new_w
def __call__(self, image):
h, w, _ = image.shape
if h == self.new_h and w == self.new_w:
return image
image = imresize(image, (self.new_h, self.new_w))
return image
| 832
| 26.766667
| 91
|
py
|
ARFlow
|
ARFlow-master/transforms/ar_transforms/interpolation.py
|
## Portions of Code from, copyright 2018 Jochen Gast
from __future__ import absolute_import, division, print_function
import torch
from torch import nn
import torch.nn.functional as tf
def _bchw2bhwc(tensor):
return tensor.transpose(1,2).transpose(2,3)
def _bhwc2bchw(tensor):
return tensor.transpose(2,3).transpose(1,2)
class Meshgrid(nn.Module):
def __init__(self):
super(Meshgrid, self).__init__()
self.width = 0
self.height = 0
self.register_buffer("xx", torch.zeros(1,1))
self.register_buffer("yy", torch.zeros(1,1))
self.register_buffer("rangex", torch.zeros(1,1))
self.register_buffer("rangey", torch.zeros(1,1))
def _compute_meshgrid(self, width, height):
torch.arange(0, width, out=self.rangex)
torch.arange(0, height, out=self.rangey)
self.xx = self.rangex.repeat(height, 1).contiguous()
self.yy = self.rangey.repeat(width, 1).t().contiguous()
def forward(self, width, height):
if self.width != width or self.height != height:
self._compute_meshgrid(width=width, height=height)
self.width = width
self.height = height
return self.xx, self.yy
class BatchSub2Ind(nn.Module):
def __init__(self):
super(BatchSub2Ind, self).__init__()
self.register_buffer("_offsets", torch.LongTensor())
def forward(self, shape, row_sub, col_sub, out=None):
batch_size = row_sub.size(0)
height, width = shape
ind = row_sub*width + col_sub
torch.arange(batch_size, out=self._offsets)
self._offsets *= (height*width)
if out is None:
return torch.add(ind, self._offsets.view(-1,1,1))
else:
torch.add(ind, self._offsets.view(-1,1,1), out=out)
class Interp2(nn.Module):
def __init__(self, clamp=False):
super(Interp2, self).__init__()
self._clamp = clamp
self._batch_sub2ind = BatchSub2Ind()
self.register_buffer("_x0", torch.LongTensor())
self.register_buffer("_x1", torch.LongTensor())
self.register_buffer("_y0", torch.LongTensor())
self.register_buffer("_y1", torch.LongTensor())
self.register_buffer("_i00", torch.LongTensor())
self.register_buffer("_i01", torch.LongTensor())
self.register_buffer("_i10", torch.LongTensor())
self.register_buffer("_i11", torch.LongTensor())
self.register_buffer("_v00", torch.FloatTensor())
self.register_buffer("_v01", torch.FloatTensor())
self.register_buffer("_v10", torch.FloatTensor())
self.register_buffer("_v11", torch.FloatTensor())
self.register_buffer("_x", torch.FloatTensor())
self.register_buffer("_y", torch.FloatTensor())
def forward(self, v, xq, yq):
batch_size, channels, height, width = v.size()
# clamp if wanted
if self._clamp:
xq.clamp_(0, width - 1)
yq.clamp_(0, height - 1)
# ------------------------------------------------------------------
# Find neighbors
#
# x0 = torch.floor(xq).long(), x0.clamp_(0, width - 1)
# x1 = x0 + 1, x1.clamp_(0, width - 1)
# y0 = torch.floor(yq).long(), y0.clamp_(0, height - 1)
# y1 = y0 + 1, y1.clamp_(0, height - 1)
#
# ------------------------------------------------------------------
self._x0 = torch.floor(xq).long().clamp(0, width - 1)
self._y0 = torch.floor(yq).long().clamp(0, height - 1)
self._x1 = torch.add(self._x0, 1).clamp(0, width - 1)
self._y1 = torch.add(self._y0, 1).clamp(0, height - 1)
# batch_sub2ind
self._batch_sub2ind([height, width], self._y0, self._x0, out=self._i00)
self._batch_sub2ind([height, width], self._y0, self._x1, out=self._i01)
self._batch_sub2ind([height, width], self._y1, self._x0, out=self._i10)
self._batch_sub2ind([height, width], self._y1, self._x1, out=self._i11)
# reshape
v_flat = _bchw2bhwc(v).contiguous().view(-1, channels)
torch.index_select(v_flat, dim=0, index=self._i00.view(-1), out=self._v00)
torch.index_select(v_flat, dim=0, index=self._i01.view(-1), out=self._v01)
torch.index_select(v_flat, dim=0, index=self._i10.view(-1), out=self._v10)
torch.index_select(v_flat, dim=0, index=self._i11.view(-1), out=self._v11)
# local_coords
torch.add(xq, - self._x0.float(), out=self._x)
torch.add(yq, - self._y0.float(), out=self._y)
# weights
w00 = torch.unsqueeze((1.0 - self._y) * (1.0 - self._x), dim=1)
w01 = torch.unsqueeze((1.0 - self._y) * self._x, dim=1)
w10 = torch.unsqueeze(self._y * (1.0 - self._x), dim=1)
w11 = torch.unsqueeze(self._y * self._x, dim=1)
def _reshape(u):
return _bhwc2bchw(u.view(batch_size, height, width, channels))
# values
values = _reshape(self._v00)*w00 + _reshape(self._v01)*w01 \
+ _reshape(self._v10)*w10 + _reshape(self._v11)*w11
if self._clamp:
return values
else:
# find_invalid
invalid = ((xq < 0) | (xq >= width) | (yq < 0) | (yq >= height)).unsqueeze(dim=1).float()
# maskout invalid
transformed = invalid * torch.zeros_like(values) + (1.0 - invalid)*values
return transformed
def resize2D(inputs, size_targets, mode="bilinear"):
size_inputs = [inputs.size(2), inputs.size(3)]
if all([size_inputs == size_targets]):
return inputs # nothing to do
elif any([size_targets < size_inputs]):
resized = tf.adaptive_avg_pool2d(inputs, size_targets) # downscaling
else:
resized = tf.upsample(inputs, size=size_targets, mode=mode) # upsampling
# correct scaling
return resized
def resize2D_as(inputs, output_as, mode="bilinear"):
size_targets = [output_as.size(2), output_as.size(3)]
return resize2D(inputs, size_targets, mode=mode)
| 6,104
| 37.15625
| 101
|
py
|
ARFlow
|
ARFlow-master/transforms/ar_transforms/sp_transfroms.py
|
# Part of the code from https://github.com/visinf/irr/blob/master/augmentations.py
import torch
import torch.nn as nn
from transforms.ar_transforms.interpolation import Interp2
from transforms.ar_transforms.interpolation import Meshgrid
import numpy as np
def denormalize_coords(xx, yy, width, height):
""" scale indices from [-1, 1] to [0, width/height] """
xx = 0.5 * (width - 1.0) * (xx.float() + 1.0)
yy = 0.5 * (height - 1.0) * (yy.float() + 1.0)
return xx, yy
def normalize_coords(xx, yy, width, height):
""" scale indices from [0, width/height] to [-1, 1] """
xx = (2.0 / (width - 1.0)) * xx.float() - 1.0
yy = (2.0 / (height - 1.0)) * yy.float() - 1.0
return xx, yy
def apply_transform_to_params(theta0, theta_transform):
a1 = theta0[:, 0]
a2 = theta0[:, 1]
a3 = theta0[:, 2]
a4 = theta0[:, 3]
a5 = theta0[:, 4]
a6 = theta0[:, 5]
#
b1 = theta_transform[:, 0]
b2 = theta_transform[:, 1]
b3 = theta_transform[:, 2]
b4 = theta_transform[:, 3]
b5 = theta_transform[:, 4]
b6 = theta_transform[:, 5]
#
c1 = a1 * b1 + a4 * b2
c2 = a2 * b1 + a5 * b2
c3 = b3 + a3 * b1 + a6 * b2
c4 = a1 * b4 + a4 * b5
c5 = a2 * b4 + a5 * b5
c6 = b6 + a3 * b4 + a6 * b5
#
new_theta = torch.stack([c1, c2, c3, c4, c5, c6], dim=1)
return new_theta
class _IdentityParams(nn.Module):
def __init__(self):
super(_IdentityParams, self).__init__()
self._batch_size = 0
self.register_buffer("_o", torch.FloatTensor())
self.register_buffer("_i", torch.FloatTensor())
def _update(self, batch_size):
torch.zeros([batch_size, 1], out=self._o)
torch.ones([batch_size, 1], out=self._i)
return torch.cat([self._i, self._o, self._o, self._o, self._i, self._o], dim=1)
def forward(self, batch_size):
if self._batch_size != batch_size:
self._identity_params = self._update(batch_size)
self._batch_size = batch_size
return self._identity_params
class RandomMirror(nn.Module):
def __init__(self, vertical=True, p=0.5):
super(RandomMirror, self).__init__()
self._batch_size = 0
self._p = p
self._vertical = vertical
self.register_buffer("_mirror_probs", torch.FloatTensor())
def update_probs(self, batch_size):
torch.ones([batch_size, 1], out=self._mirror_probs)
self._mirror_probs *= self._p
def forward(self, theta_list):
batch_size = theta_list[0].size(0)
if batch_size != self._batch_size:
self.update_probs(batch_size)
self._batch_size = batch_size
# apply random sign to a1 a2 a3 (these are the guys responsible for x)
sign = torch.sign(2.0 * torch.bernoulli(self._mirror_probs) - 1.0)
i = torch.ones_like(sign)
horizontal_mirror = torch.cat([sign, sign, sign, i, i, i], dim=1)
theta_list = [theta * horizontal_mirror for theta in theta_list]
# apply random sign to a4 a5 a6 (these are the guys responsible for y)
if self._vertical:
sign = torch.sign(2.0 * torch.bernoulli(self._mirror_probs) - 1.0)
vertical_mirror = torch.cat([i, i, i, sign, sign, sign], dim=1)
theta_list = [theta * vertical_mirror for theta in theta_list]
return theta_list
class RandomAffineFlow(nn.Module):
def __init__(self, cfg, addnoise=True):
super(RandomAffineFlow, self).__init__()
self.cfg = cfg
self._interp2 = Interp2(clamp=False)
self._flow_interp2 = Interp2(clamp=False)
self._meshgrid = Meshgrid()
self._identity = _IdentityParams()
self._random_mirror = RandomMirror(cfg.vflip) if cfg.hflip else RandomMirror(p=1)
self._addnoise = addnoise
self.register_buffer("_noise1", torch.FloatTensor())
self.register_buffer("_noise2", torch.FloatTensor())
self.register_buffer("_xbounds", torch.FloatTensor([-1, -1, 1, 1]))
self.register_buffer("_ybounds", torch.FloatTensor([-1, 1, -1, 1]))
self.register_buffer("_x", torch.IntTensor(1))
self.register_buffer("_y", torch.IntTensor(1))
def inverse_transform_coords(self, width, height, thetas, offset_x=None,
offset_y=None):
xx, yy = self._meshgrid(width=width, height=height)
xx = torch.unsqueeze(xx, dim=0).float()
yy = torch.unsqueeze(yy, dim=0).float()
if offset_x is not None:
xx = xx + offset_x
if offset_y is not None:
yy = yy + offset_y
a1 = thetas[:, 0].contiguous().view(-1, 1, 1)
a2 = thetas[:, 1].contiguous().view(-1, 1, 1)
a3 = thetas[:, 2].contiguous().view(-1, 1, 1)
a4 = thetas[:, 3].contiguous().view(-1, 1, 1)
a5 = thetas[:, 4].contiguous().view(-1, 1, 1)
a6 = thetas[:, 5].contiguous().view(-1, 1, 1)
xx, yy = normalize_coords(xx, yy, width=width, height=height)
xq = a1 * xx + a2 * yy + a3
yq = a4 * xx + a5 * yy + a6
xq, yq = denormalize_coords(xq, yq, width=width, height=height)
return xq, yq
def transform_coords(self, width, height, thetas):
xx1, yy1 = self._meshgrid(width=width, height=height)
xx, yy = normalize_coords(xx1, yy1, width=width, height=height)
def _unsqueeze12(u):
return torch.unsqueeze(torch.unsqueeze(u, dim=1), dim=1)
a1 = _unsqueeze12(thetas[:, 0])
a2 = _unsqueeze12(thetas[:, 1])
a3 = _unsqueeze12(thetas[:, 2])
a4 = _unsqueeze12(thetas[:, 3])
a5 = _unsqueeze12(thetas[:, 4])
a6 = _unsqueeze12(thetas[:, 5])
#
z = a1 * a5 - a2 * a4
b1 = a5 / z
b2 = - a2 / z
b4 = - a4 / z
b5 = a1 / z
#
xhat = xx - a3
yhat = yy - a6
xq = b1 * xhat + b2 * yhat
yq = b4 * xhat + b5 * yhat
xq, yq = denormalize_coords(xq, yq, width=width, height=height)
return xq, yq
def find_invalid(self, width, height, thetas):
x = self._xbounds
y = self._ybounds
#
a1 = torch.unsqueeze(thetas[:, 0], dim=1)
a2 = torch.unsqueeze(thetas[:, 1], dim=1)
a3 = torch.unsqueeze(thetas[:, 2], dim=1)
a4 = torch.unsqueeze(thetas[:, 3], dim=1)
a5 = torch.unsqueeze(thetas[:, 4], dim=1)
a6 = torch.unsqueeze(thetas[:, 5], dim=1)
#
z = a1 * a5 - a2 * a4
b1 = a5 / z
b2 = - a2 / z
b4 = - a4 / z
b5 = a1 / z
#
xhat = x - a3
yhat = y - a6
xq = b1 * xhat + b2 * yhat
yq = b4 * xhat + b5 * yhat
xq, yq = denormalize_coords(xq, yq, width=width, height=height)
#
invalid = (
(xq < 0) | (yq < 0) | (xq >= width) | (yq >= height)
).sum(dim=1, keepdim=True) > 0
return invalid
def apply_random_transforms_to_params(self,
theta0,
max_translate,
min_zoom, max_zoom,
min_squeeze, max_squeeze,
min_rotate, max_rotate,
validate_size=None):
max_translate *= 0.5
batch_size = theta0.size(0)
height, width = validate_size
# collect valid params here
thetas = torch.zeros_like(theta0)
zoom = theta0.new(batch_size, 1).zero_()
squeeze = torch.zeros_like(zoom)
tx = torch.zeros_like(zoom)
ty = torch.zeros_like(zoom)
phi = torch.zeros_like(zoom)
invalid = torch.ones_like(zoom).byte()
while invalid.sum() > 0:
# random sampling
zoom.uniform_(min_zoom, max_zoom)
squeeze.uniform_(min_squeeze, max_squeeze)
tx.uniform_(-max_translate, max_translate)
ty.uniform_(-max_translate, max_translate)
phi.uniform_(min_rotate, max_rotate)
# construct affine parameters
sx = zoom * squeeze
sy = zoom / squeeze
sin_phi = torch.sin(phi)
cos_phi = torch.cos(phi)
b1 = cos_phi * sx
b2 = sin_phi * sy
b3 = tx
b4 = - sin_phi * sx
b5 = cos_phi * sy
b6 = ty
theta_transform = torch.cat([b1, b2, b3, b4, b5, b6], dim=1)
theta_try = apply_transform_to_params(theta0, theta_transform)
thetas = invalid.float() * theta_try + (1 - invalid).float() * thetas
# compute new invalid ones
invalid = self.find_invalid(width=width, height=height, thetas=thetas)
# here we should have good thetas within borders
return thetas
def transform_image(self, images, thetas):
batch_size, channels, height, width = images.size()
xq, yq = self.transform_coords(width=width, height=height, thetas=thetas)
transformed = self._interp2(images, xq, yq)
return transformed
def transform_flow(self, flow, theta1, theta2):
batch_size, channels, height, width = flow.size()
u = flow[:, 0, :, :]
v = flow[:, 1, :, :]
# inverse transform coords
x0, y0 = self.inverse_transform_coords(
width=width, height=height, thetas=theta1)
x1, y1 = self.inverse_transform_coords(
width=width, height=height, thetas=theta2, offset_x=u, offset_y=v)
# subtract and create new flow
u = x1 - x0
v = y1 - y0
new_flow = torch.stack([u, v], dim=1)
# transform coords
xq, yq = self.transform_coords(width=width, height=height, thetas=theta1)
# interp2
transformed = self._flow_interp2(new_flow, xq, yq)
return transformed
def forward(self, data):
# 01234 flow 12 21 23 32
imgs = data['imgs']
flows_f = data['flows_f']
masks_f = data['masks_f']
batch_size, _, height, width = imgs[0].size()
# identity = no transform
theta0 = self._identity(batch_size)
# global transform
theta_list = [self.apply_random_transforms_to_params(
theta0,
max_translate=self.cfg.trans[0],
min_zoom=self.cfg.zoom[0], max_zoom=self.cfg.zoom[1],
min_squeeze=self.cfg.squeeze[0], max_squeeze=self.cfg.squeeze[1],
min_rotate=self.cfg.rotate[0], max_rotate=self.cfg.rotate[1],
validate_size=[height, width])
]
# relative transform
for i in range(len(imgs) - 1):
theta_list.append(
self.apply_random_transforms_to_params(
theta_list[-1],
max_translate=self.cfg.trans[1],
min_zoom=self.cfg.zoom[2], max_zoom=self.cfg.zoom[3],
min_squeeze=self.cfg.squeeze[2], max_squeeze=self.cfg.squeeze[3],
min_rotate=self.cfg.rotate[2], max_rotate=self.cfg.rotate[2],
validate_size=[height, width])
)
# random flip images
theta_list = self._random_mirror(theta_list)
# 01234
imgs = [self.transform_image(im, theta) for im, theta in zip(imgs, theta_list)]
if len(imgs) > 2:
theta_list = theta_list[1:-1]
# 12 23
flows_f = [self.transform_flow(flo, theta1, theta2) for flo, theta1, theta2 in
zip(flows_f, theta_list[:-1], theta_list[1:])]
masks_f = [self.transform_image(mask, theta) for mask, theta in
zip(masks_f, theta_list)]
if self._addnoise:
stddev = np.random.uniform(0.0, 0.04)
for im in imgs:
noise = torch.zeros_like(im)
noise.normal_(std=stddev)
im.add_(noise)
im.clamp_(0.0, 1.0)
data['imgs'] = imgs
data['flows_f'] = flows_f
data['masks_f'] = masks_f
return data
| 12,154
| 34.437318
| 89
|
py
|
ARFlow
|
ARFlow-master/transforms/ar_transforms/ap_transforms.py
|
import numpy as np
import torch
from torchvision import transforms as tf
from PIL import ImageFilter
def get_ap_transforms(cfg):
transforms = [ToPILImage()]
if cfg.cj:
transforms.append(ColorJitter(brightness=cfg.cj_bri,
contrast=cfg.cj_con,
saturation=cfg.cj_sat,
hue=cfg.cj_hue))
if cfg.gblur:
transforms.append(RandomGaussianBlur(0.5, 3))
transforms.append(ToTensor())
if cfg.gamma:
transforms.append(RandomGamma(min_gamma=0.7, max_gamma=1.5, clip_image=True))
return tf.Compose(transforms)
# from https://github.com/visinf/irr/blob/master/datasets/transforms.py
class ToPILImage(tf.ToPILImage):
def __call__(self, imgs):
return [super(ToPILImage, self).__call__(im) for im in imgs]
class ColorJitter(tf.ColorJitter):
def __call__(self, imgs):
transform = self.get_params(self.brightness, self.contrast,
self.saturation, self.hue)
return [transform(im) for im in imgs]
class ToTensor(tf.ToTensor):
def __call__(self, imgs):
return [super(ToTensor, self).__call__(im) for im in imgs]
class RandomGamma():
def __init__(self, min_gamma=0.7, max_gamma=1.5, clip_image=False):
self._min_gamma = min_gamma
self._max_gamma = max_gamma
self._clip_image = clip_image
@staticmethod
def get_params(min_gamma, max_gamma):
return np.random.uniform(min_gamma, max_gamma)
@staticmethod
def adjust_gamma(image, gamma, clip_image):
adjusted = torch.pow(image, gamma)
if clip_image:
adjusted.clamp_(0.0, 1.0)
return adjusted
def __call__(self, imgs):
gamma = self.get_params(self._min_gamma, self._max_gamma)
return [self.adjust_gamma(im, gamma, self._clip_image) for im in imgs]
class RandomGaussianBlur():
def __init__(self, p, max_k_sz):
self.p = p
self.max_k_sz = max_k_sz
def __call__(self, imgs):
if np.random.random() < self.p:
radius = np.random.uniform(0, self.max_k_sz)
imgs = [im.filter(ImageFilter.GaussianBlur(radius)) for im in imgs]
return imgs
| 2,275
| 30.611111
| 85
|
py
|
ARFlow
|
ARFlow-master/transforms/ar_transforms/oc_transforms.py
|
import numpy as np
import torch
# from skimage.color import rgb2yuv
import cv2
from fast_slic.avx2 import SlicAvx2 as Slic
from skimage.segmentation import slic as sk_slic
def run_slic_pt(img_batch, n_seg=200, compact=10, rd_select=(8, 16), fast=True): # Nx1xHxW
"""
:param img: Nx3xHxW 0~1 float32
:param n_seg:
:param compact:
:return: Nx1xHxW float32
"""
B = img_batch.size(0)
dtype = img_batch.type()
img_batch = np.split(
img_batch.detach().cpu().numpy().transpose([0, 2, 3, 1]), B, axis=0)
out = []
if fast:
fast_slic = Slic(num_components=n_seg, compactness=compact, min_size_factor=0.8)
for img in img_batch:
img = np.copy((img * 255).squeeze(0).astype(np.uint8), order='C')
if fast:
img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
seg = fast_slic.iterate(img)
else:
seg = sk_slic(img, n_segments=200, compactness=10)
if rd_select is not None:
n_select = np.random.randint(rd_select[0], rd_select[1])
select_list = np.random.choice(range(0, np.max(seg) + 1), n_select,
replace=False)
seg = np.bitwise_or.reduce([seg == seg_id for seg_id in select_list])
out.append(seg)
x_out = torch.tensor(np.stack(out)).type(dtype).unsqueeze(1)
return x_out
def random_crop(img, flow, occ_mask, crop_sz):
"""
:param img: Nx6xHxW
:param flows: n * [Nx2xHxW]
:param occ_masks: n * [Nx1xHxW]
:param crop_sz:
:return:
"""
_, _, h, w = img.size()
c_h, c_w = crop_sz
if c_h == h and c_w == w:
return img, flow, occ_mask
x1 = np.random.randint(0, w - c_w)
y1 = np.random.randint(0, h - c_h)
img = img[:, :, y1:y1 + c_h, x1: x1 + c_w]
flow = flow[:, :, y1:y1 + c_h, x1: x1 + c_w]
occ_mask = occ_mask[:, :, y1:y1 + c_h, x1: x1 + c_w]
return img, flow, occ_mask
| 1,952
| 29.046154
| 91
|
py
|
ARFlow
|
ARFlow-master/losses/flow_loss.py
|
import torch.nn as nn
import torch.nn.functional as F
from .loss_blocks import SSIM, smooth_grad_1st, smooth_grad_2nd, TernaryLoss
from utils.warp_utils import flow_warp
from utils.warp_utils import get_occu_mask_bidirection, get_occu_mask_backward
class unFlowLoss(nn.modules.Module):
def __init__(self, cfg):
super(unFlowLoss, self).__init__()
self.cfg = cfg
def loss_photomatric(self, im1_scaled, im1_recons, occu_mask1):
loss = []
if self.cfg.w_l1 > 0:
loss += [self.cfg.w_l1 * (im1_scaled - im1_recons).abs() * occu_mask1]
if self.cfg.w_ssim > 0:
loss += [self.cfg.w_ssim * SSIM(im1_recons * occu_mask1,
im1_scaled * occu_mask1)]
if self.cfg.w_ternary > 0:
loss += [self.cfg.w_ternary * TernaryLoss(im1_recons * occu_mask1,
im1_scaled * occu_mask1)]
return sum([l.mean() for l in loss]) / occu_mask1.mean()
def loss_smooth(self, flow, im1_scaled):
if 'smooth_2nd' in self.cfg and self.cfg.smooth_2nd:
func_smooth = smooth_grad_2nd
else:
func_smooth = smooth_grad_1st
loss = []
loss += [func_smooth(flow, im1_scaled, self.cfg.alpha)]
return sum([l.mean() for l in loss])
def forward(self, output, target):
"""
:param output: Multi-scale forward/backward flows n * [B x 4 x h x w]
:param target: image pairs Nx6xHxW
:return:
"""
pyramid_flows = output
im1_origin = target[:, :3]
im2_origin = target[:, 3:]
pyramid_smooth_losses = []
pyramid_warp_losses = []
self.pyramid_occu_mask1 = []
self.pyramid_occu_mask2 = []
s = 1.
for i, flow in enumerate(pyramid_flows):
if self.cfg.w_scales[i] == 0:
pyramid_warp_losses.append(0)
pyramid_smooth_losses.append(0)
continue
b, _, h, w = flow.size()
# resize images to match the size of layer
im1_scaled = F.interpolate(im1_origin, (h, w), mode='area')
im2_scaled = F.interpolate(im2_origin, (h, w), mode='area')
im1_recons = flow_warp(im2_scaled, flow[:, :2], pad=self.cfg.warp_pad)
im2_recons = flow_warp(im1_scaled, flow[:, 2:], pad=self.cfg.warp_pad)
if i == 0:
if self.cfg.occ_from_back:
occu_mask1 = 1 - get_occu_mask_backward(flow[:, 2:], th=0.2)
occu_mask2 = 1 - get_occu_mask_backward(flow[:, :2], th=0.2)
else:
occu_mask1 = 1 - get_occu_mask_bidirection(flow[:, :2], flow[:, 2:])
occu_mask2 = 1 - get_occu_mask_bidirection(flow[:, 2:], flow[:, :2])
else:
occu_mask1 = F.interpolate(self.pyramid_occu_mask1[0],
(h, w), mode='nearest')
occu_mask2 = F.interpolate(self.pyramid_occu_mask2[0],
(h, w), mode='nearest')
self.pyramid_occu_mask1.append(occu_mask1)
self.pyramid_occu_mask2.append(occu_mask2)
loss_warp = self.loss_photomatric(im1_scaled, im1_recons, occu_mask1)
if i == 0:
s = min(h, w)
loss_smooth = self.loss_smooth(flow[:, :2] / s, im1_scaled)
if self.cfg.with_bk:
loss_warp += self.loss_photomatric(im2_scaled, im2_recons,
occu_mask2)
loss_smooth += self.loss_smooth(flow[:, 2:] / s, im2_scaled)
loss_warp /= 2.
loss_smooth /= 2.
pyramid_warp_losses.append(loss_warp)
pyramid_smooth_losses.append(loss_smooth)
pyramid_warp_losses = [l * w for l, w in
zip(pyramid_warp_losses, self.cfg.w_scales)]
pyramid_smooth_losses = [l * w for l, w in
zip(pyramid_smooth_losses, self.cfg.w_sm_scales)]
warp_loss = sum(pyramid_warp_losses)
smooth_loss = self.cfg.w_smooth * sum(pyramid_smooth_losses)
total_loss = warp_loss + smooth_loss
return total_loss, warp_loss, smooth_loss, pyramid_flows[0].abs().mean()
| 4,395
| 37.226087
| 88
|
py
|
ARFlow
|
ARFlow-master/losses/get_loss.py
|
from .flow_loss import unFlowLoss
def get_loss(cfg):
if cfg.type == 'unflow':
loss = unFlowLoss(cfg)
else:
raise NotImplementedError(cfg.type)
return loss
| 184
| 19.555556
| 43
|
py
|
ARFlow
|
ARFlow-master/losses/loss_blocks.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
# Crecit: https://github.com/simonmeister/UnFlow/blob/master/src/e2eflow/core/losses.py
def TernaryLoss(im, im_warp, max_distance=1):
patch_size = 2 * max_distance + 1
def _rgb_to_grayscale(image):
grayscale = image[:, 0, :, :] * 0.2989 + \
image[:, 1, :, :] * 0.5870 + \
image[:, 2, :, :] * 0.1140
return grayscale.unsqueeze(1)
def _ternary_transform(image):
intensities = _rgb_to_grayscale(image) * 255
out_channels = patch_size * patch_size
w = torch.eye(out_channels).view((out_channels, 1, patch_size, patch_size))
weights = w.type_as(im)
patches = F.conv2d(intensities, weights, padding=max_distance)
transf = patches - intensities
transf_norm = transf / torch.sqrt(0.81 + torch.pow(transf, 2))
return transf_norm
def _hamming_distance(t1, t2):
dist = torch.pow(t1 - t2, 2)
dist_norm = dist / (0.1 + dist)
dist_mean = torch.mean(dist_norm, 1, keepdim=True) # instead of sum
return dist_mean
def _valid_mask(t, padding):
n, _, h, w = t.size()
inner = torch.ones(n, 1, h - 2 * padding, w - 2 * padding).type_as(t)
mask = F.pad(inner, [padding] * 4)
return mask
t1 = _ternary_transform(im)
t2 = _ternary_transform(im_warp)
dist = _hamming_distance(t1, t2)
mask = _valid_mask(im, max_distance)
return dist * mask
def SSIM(x, y, md=1):
patch_size = 2 * md + 1
C1 = 0.01 ** 2
C2 = 0.03 ** 2
mu_x = nn.AvgPool2d(patch_size, 1, 0)(x)
mu_y = nn.AvgPool2d(patch_size, 1, 0)(y)
mu_x_mu_y = mu_x * mu_y
mu_x_sq = mu_x.pow(2)
mu_y_sq = mu_y.pow(2)
sigma_x = nn.AvgPool2d(patch_size, 1, 0)(x * x) - mu_x_sq
sigma_y = nn.AvgPool2d(patch_size, 1, 0)(y * y) - mu_y_sq
sigma_xy = nn.AvgPool2d(patch_size, 1, 0)(x * y) - mu_x_mu_y
SSIM_n = (2 * mu_x_mu_y + C1) * (2 * sigma_xy + C2)
SSIM_d = (mu_x_sq + mu_y_sq + C1) * (sigma_x + sigma_y + C2)
SSIM = SSIM_n / SSIM_d
dist = torch.clamp((1 - SSIM) / 2, 0, 1)
return dist
def gradient(data):
D_dy = data[:, :, 1:] - data[:, :, :-1]
D_dx = data[:, :, :, 1:] - data[:, :, :, :-1]
return D_dx, D_dy
def smooth_grad_1st(flo, image, alpha):
img_dx, img_dy = gradient(image)
weights_x = torch.exp(-torch.mean(torch.abs(img_dx), 1, keepdim=True) * alpha)
weights_y = torch.exp(-torch.mean(torch.abs(img_dy), 1, keepdim=True) * alpha)
dx, dy = gradient(flo)
loss_x = weights_x * dx.abs() / 2.
loss_y = weights_y * dy.abs() / 2
return loss_x.mean() / 2. + loss_y.mean() / 2.
def smooth_grad_2nd(flo, image, alpha):
img_dx, img_dy = gradient(image)
weights_x = torch.exp(-torch.mean(torch.abs(img_dx), 1, keepdim=True) * alpha)
weights_y = torch.exp(-torch.mean(torch.abs(img_dy), 1, keepdim=True) * alpha)
dx, dy = gradient(flo)
dx2, dxdy = gradient(dx)
dydx, dy2 = gradient(dy)
loss_x = weights_x[:, :, :, 1:] * dx2.abs()
loss_y = weights_y[:, :, 1:, :] * dy2.abs()
return loss_x.mean() / 2. + loss_y.mean() / 2.
| 3,197
| 30.98
| 87
|
py
|
myriad
|
myriad-main/setup.py
|
from setuptools import setup, find_packages
setup(
name='myriad',
packages=find_packages(),
license='Apache 2.0',
author='Nikolaus H. R. Howe, Simon Dufort-Labbé, Nitarshan Rajkumar',
)
| 195
| 20.777778
| 71
|
py
|
myriad
|
myriad-main/run.py
|
# (c) 2021 Nikolaus Howe
import numpy as np
import random
from jax.config import config
from myriad.experiments.e2e_sysid import run_endtoend
from myriad.experiments.mle_sysid import run_mle_sysid
from myriad.experiments.node_e2e_sysid import run_node_endtoend
from myriad.experiments.node_mle_sysid import run_node_mle_sysid
from myriad.useful_scripts import run_setup, run_trajectory_opt, load_node_and_plan
from myriad.probing_numerical_instability import probe, special_probe
from myriad.utils import integrate_time_independent, yield_minibatches
config.update("jax_enable_x64", True)
run_buddy = False
def main():
#########
# Setup #
#########
hp, cfg = run_setup()
random.seed(hp.seed)
np.random.seed(hp.seed)
if run_buddy:
# random.seed(hp.seed)
# np.random.seed(hp.seed)
import experiment_buddy
experiment_buddy.register(hp.__dict__)
# tensorboard = experiment_buddy.deploy(host='mila', sweep_yaml="sweep.yaml")
tensorboard = experiment_buddy.deploy(host='mila', sweep_yaml="")
# tensorboard = experiment_buddy.deploy(host='', sweep_yaml='')
########################################
# Probing Systems' Numerical Stability #
########################################
# for st in SystemType:
# if st in [SystemType.SIMPLECASE, SystemType.INVASIVEPLANT]:
# continue
# print("system", st)
# hp.system = st
# probe(hp, cfg)
# probe(hp, cfg)
# special_probe(hp, cfg)
###########################################
# Trajectory optimization with true model #
###########################################
run_trajectory_opt(hp, cfg, save_as='traj_opt_example.pdf')
######################
# MLE model learning #
######################
# Parametric, MLE
# run_mle_sysid(hp, cfg)
# NODE, MLE
# run_node_mle_sysid(hp, cfg)
#############################
# End to end model learning #
#############################
# Parametric, end-to-end
# run_endtoend(hp, cfg)
# NODE, end-to-end
# run_node_endtoend(hp, cfg)
###############
# Noise study #
###############
# study_noise(hp, cfg, experiment_string='mle_sysid')
# study_noise(hp, cfg, experiment_string='node_mle_sysid')
##################
# Dynamics study #
##################
# study_vector_field(hp, cfg, 'mle', 0)
# study_vector_field(hp, cfg, 'e2e', 0, file_extension='pdf')
if __name__ == '__main__':
main()
| 2,407
| 26.363636
| 83
|
py
|
myriad
|
myriad-main/tests/tests.py
|
# (c) Nikolaus Howe 2021
from scipy.integrate import odeint
import jax.numpy as jnp
import numpy as np
import sys
import unittest
from run import run_trajectory_opt
from myriad.config import IntegrationMethod, NLPSolverType, OptimizerType, QuadratureRule, SystemType
from myriad.custom_types import State, Control, Timestep, States
from myriad.useful_scripts import run_setup
from myriad.utils import integrate
hp, cfg = run_setup(sys.argv, gin_path='../source/gin-configs/default.gin')
class BasicTests(unittest.TestCase):
def test_integrate(self):
# Perform integration using odeint
def f(t: Timestep, state: State) -> States:
return state
y0 = jnp.array([1.])
t = [0., 1.]
result_odeint = odeint(f, y0, t, tfirst=True)
# Perform integration using our 'integrate'
N = 100
t = jnp.linspace(0., 1., N)
h = t[1]
def f_wrapper(state: State, control: Control, time: Timestep) -> States:
del control
return f(time, state)
_, found_states = integrate(f_wrapper, y0, t, h, N - 1, t, integration_method=IntegrationMethod.RK4)
# Check that we get similar enough results
np.testing.assert_almost_equal(result_odeint[-1], found_states[-1], decimal=6,
err_msg=f'our integrator gave {result_odeint[-1]}, '
f'but it should have given {found_states[-1]}',
verbose=True)
class OptimizerTests(unittest.TestCase):
def test_single_shooting(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.HEUN
hp.max_iter = 1000
hp.intervals = 1
hp.controls_per_interval = 50
run_trajectory_opt(hp, cfg)
def test_multiple_shooting(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.HEUN
hp.max_iter = 1000
hp.intervals = 20
hp.controls_per_interval = 3
run_trajectory_opt(hp, cfg)
def test_dense_multiple_shooting(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.HEUN
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
def test_trapezoidal_collocation(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.COLLOCATION
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.HEUN
hp.quadrature_rule = QuadratureRule.TRAPEZOIDAL
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
def test_hermite_simpson_collocation(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.COLLOCATION
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.RK4
hp.quadrature_rule = QuadratureRule.HERMITE_SIMPSON
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
class IntegrationMethodTests(unittest.TestCase):
def test_euler(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.EULER
hp.quadrature_rule = QuadratureRule.HERMITE_SIMPSON
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
def test_heun(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.HEUN
hp.quadrature_rule = QuadratureRule.HERMITE_SIMPSON
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
def test_midpoint(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.MIDPOINT
hp.quadrature_rule = QuadratureRule.HERMITE_SIMPSON
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
def test_RK4(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.RK4
hp.quadrature_rule = QuadratureRule.HERMITE_SIMPSON
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
class QuadratureRuleTests(unittest.TestCase):
def test_trapozoidal(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.COLLOCATION
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.HEUN
hp.quadrature_rule = QuadratureRule.TRAPEZOIDAL
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
def test_hermite_simpson(self):
global hp, cfg
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.COLLOCATION
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.RK4
hp.quadrature_rule = QuadratureRule.HERMITE_SIMPSON
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
class NLPSolverTests(unittest.TestCase):
def test_ipopt(self):
hp.seed = 42
hp.system = SystemType.PENDULUM
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.IPOPT
hp.integration_method = IntegrationMethod.HEUN
hp.quadrature_rule = QuadratureRule.TRAPEZOIDAL
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
def test_slsqp(self):
hp.seed = 42
hp.system = SystemType.PENDULUM
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.HEUN
hp.quadrature_rule = QuadratureRule.TRAPEZOIDAL
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
def test_trust_constr(self):
hp.seed = 42
hp.system = SystemType.PENDULUM
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.TRUST
hp.integration_method = IntegrationMethod.HEUN
hp.quadrature_rule = QuadratureRule.TRAPEZOIDAL
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
def test_extragradient(self):
hp.seed = 42
hp.system = SystemType.SIMPLECASE
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.EXTRAGRADIENT
hp.integration_method = IntegrationMethod.HEUN
hp.quadrature_rule = QuadratureRule.TRAPEZOIDAL
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
run_trajectory_opt(hp, cfg)
if __name__ == '__main__':
unittest.main()
| 7,521
| 26.654412
| 104
|
py
|
myriad
|
myriad-main/tests/system_tests.py
|
# (c) Nikolaus Howe 2021
import sys
import unittest
from myriad.config import IntegrationMethod, NLPSolverType, OptimizerType, SystemType
from myriad.useful_scripts import run_setup
from run import run_trajectory_opt
hp, cfg = run_setup(sys.argv, gin_path='../myriad/gin-configs/default.gin')
class SystemTests(unittest.TestCase):
def test_systems(self):
global hp, cfg
hp.seed = 42
hp.optimizer = OptimizerType.SHOOTING
hp.nlpsolver = NLPSolverType.SLSQP
hp.integration_method = IntegrationMethod.HEUN
hp.max_iter = 1000
hp.intervals = 50
hp.controls_per_interval = 1
# Try to optimize each system (except for the discrete one)
for system_type in SystemType:
# Skip Invasive Plant, since it's not a continuous system
if system_type == SystemType.INVASIVEPLANT:
continue
hp.system = system_type
run_trajectory_opt(hp, cfg)
if __name__ == '__main__':
unittest.main()
| 950
| 26.171429
| 85
|
py
|
myriad
|
myriad-main/tests/test_smoke.py
|
import random
import unittest
import jax
import numpy as np
from myriad.config import Config, SystemType, HParams, OptimizerType
from myriad.trajectory_optimizers import get_optimizer
from myriad.systems import IndirectFHCS
from myriad.plotting import plot_result
# import os
# os.environ['KMP_DUPLICATE_LIB_OK'] = 'True'
approaches = {
'fbsm': {'optimizer': OptimizerType.FBSM, 'fbsm_intervals': 1000},
'single_shooting': {'optimizer': OptimizerType.SHOOTING, 'intervals': 1, 'controls_per_interval': 90},
'multiple_shooting_3_controls': {'optimizer': OptimizerType.SHOOTING, 'intervals': 30, 'controls_per_interval': 3},
'multiple_shooting_1_control': {'optimizer': OptimizerType.SHOOTING, 'intervals': 90, 'controls_per_interval': 1},
'collocation': {'optimizer': OptimizerType.COLLOCATION, 'intervals': 90, 'controls_per_interval': 1}
}
# Test that experiments run without raising exceptions
class SmokeTest(unittest.TestCase):
def setUp(self):
jax.config.update("jax_enable_x64", True)
def test_smoke(self):
for system in SystemType:
for approach in approaches:
hp = HParams(system=system, **approaches[approach]) # unpack the hparams for this approach
# TODO: add adjoint dynamics to those systems, so that FBSM can be used
# (FBSM doesn't work on environments without adjoint dynamics)
if hp.optimizer == OptimizerType.FBSM and not issubclass(system.value, IndirectFHCS):
continue
# Invasive plant is a discrete system, so it only works with FBSM
if hp.system == SystemType.INVASIVEPLANT:
continue
with self.subTest(system=hp.system, optimizer=hp.optimizer):
cfg = Config(verbose=True)
random.seed(hp.seed)
np.random.seed(hp.seed)
_system = hp.system()
optimizer = get_optimizer(hp, cfg, _system)
print("calling optimizer", hp.optimizer.name)
results = optimizer.solve()
print("solution", results[0].shape)
print("now for plotting")
# Plot the solution, using system-specific plotting where present
# plot_solution = getattr(_system, "plot_solution", None)
# if callable(plot_solution):
# print("using custom plotting")
# plot_solution(*results)
# else:
print("using default plotting")
plot_result(results, hp, save_as=approach+hp.system.name+"_test")
if __name__=='__main__':
unittest.main()
| 2,495
| 36.253731
| 117
|
py
|
myriad
|
myriad-main/myriad/custom_types.py
|
# (c) Nikolaus Howe 2021
import jax.numpy as jnp
from typing import Callable, Mapping, Optional, Union
Batch = jnp.ndarray
Control = Union[float, jnp.ndarray]
Controls = jnp.ndarray
Cost = float
Dataset = jnp.ndarray
Defect = jnp.ndarray
DParams = Mapping[str, Union[float, jnp.ndarray]]
DState = Union[float, jnp.ndarray]
DStates = jnp.ndarray
Epoch = int
Params = Mapping[str, Union[float, jnp.ndarray]]
Solution = Mapping[str, Union[float, jnp.ndarray]]
State = Union[float, jnp.ndarray]
States = jnp.ndarray
Timestep = int
CostFun = Callable[[State, Control, Optional[Timestep]], Cost]
DynamicsFun = Callable[[State, Control, Optional[Timestep]], DState]
| 663
| 25.56
| 68
|
py
|
myriad
|
myriad-main/myriad/plotting.py
|
# (c) 2021 Nikolaus Howe
import jax.numpy as jnp
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.offsetbox import AnchoredText
from typing import Dict, Optional, Tuple
from myriad.config import SystemType, IntegrationMethod, OptimizerType, HParams
from myriad.systems import state_descriptions, control_descriptions
from myriad.systems import get_name
def plot_losses(hp, path_to_csv, save_as=None):
etv = np.genfromtxt(path_to_csv, delimiter=',')
if len(etv) == 10000: # TODO: remove except for ne2e
print("clipping to 9999")
etv = etv[:-1]
epochs = etv[:, 0]
train = etv[:, 1]
val = etv[:, 2]
if save_as is not None and save_as.endswith(('pgf', 'pdf')):
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
title = get_name(hp)
print("title is", title)
plt.figure(figsize=(4.5, 3.5))
fixed_epochs = []
transitions = []
offset = 0
previous = 0
for i, epoch in enumerate(epochs):
if i > 0 and epoch == 0:
offset += previous
transitions.append(epoch + offset)
fixed_epochs.append(epoch + offset)
previous = epoch
plt.plot(fixed_epochs, train, label='train loss')
plt.plot(fixed_epochs, val, label='validation loss')
if title is not None:
plt.title(title)
for transition in transitions:
plt.axvline(transition, linestyle='dashed', color='grey')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.grid()
plt.legend()
plt.tight_layout()
plt.yscale('log')
if save_as is not None:
plt.savefig(save_as, bbox_inches='tight')
plt.close()
else:
plt.show()
def plot_result(result, hp, save_as=None):
adj = len(result) == 3
data = {}
if adj:
x_guess, u_guess, adj_guess = result
data['adj'] = adj_guess
else:
x_guess, u_guess = result
data['x'] = x_guess
data['u'] = u_guess
plot(hp, hp.system(), data, save_as=save_as)
def plot(hp, system,
data: Dict[str, jnp.ndarray],
labels: Optional[Dict[str, str]] = None,
styles: Optional[Dict[str, str]] = None,
widths: Optional[Dict[str, float]] = None,
title: Optional[str] = None,
save_as: Optional[str] = None,
figsize: Optional[Tuple[float, float]] = None) -> None:
if save_as is not None and save_as.endswith(('pgf', 'pdf')): # comment out for the cluster
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
if styles is None:
styles = {}
for name in data:
styles[name] = '-'
if widths is None:
widths = {}
for name in data:
widths[name] = 1.
# Separate plotting for the discrete-time system
if hp.system == SystemType.INVASIVEPLANT:
system.plot_solution(data['x'], data['u'], data['adj'])
return
# elif hp.system == SystemType.SEIR:
# system.plot_solution(data['x'], data['u'])
# return
if figsize is not None:
plt.figure(figsize=figsize)
else:
# plt.rcParams["figure.figsize"] = (4, 3.3)
plt.figure(figsize=(4, 4))
# if 'adj' not in data:
# height = 4#5.6
# num_subplots = 2
# else:
# height = 9
# num_subplots = 3
# # plt.figure(figsize=(7, height))
# plt.figure(figsize=(5, height))
num_subplots = 2
title = get_name(hp)
if title is not None:
plt.suptitle(title)
# else:
# if hp.optimizer == OptimizerType.COLLOCATION:
# plt.suptitle(
# f'{hp.system.name}') # {hp.optimizer.name}:{hp.intervals} {hp.quadrature_rule.name} {hp.integration_method.name}')
# else:
# plt.suptitle(
# f'{hp.system.name}') # {hp.optimizer.name}:{hp.intervals}x{hp.controls_per_interval} {hp.integration_method.name}')
order_multiplier = 2 if hp.integration_method == IntegrationMethod.RK4 else 1
ts_x = jnp.linspace(0, system.T, data['x'].shape[0])
ts_u = jnp.linspace(0, system.T, data['u'].shape[0])
# Every system except SIMPLECASE and SIMPLECASEWITHBOUNDS
# Plot exactly those state columns which we want plotted
plt.subplot(num_subplots, 1, 1)
if hp.system in state_descriptions:
for idx, x_i in enumerate(data['x'].T):
if idx in state_descriptions[hp.system][0]:
plt.plot(ts_x, x_i, styles['x'], lw=widths['x'],
label=state_descriptions[hp.system][1][idx] + labels['x'])
if 'other_x' in data:
plt.plot(jnp.linspace(0, system.T, data['other_x'][:, idx].shape[0]),
data['other_x'][:, idx], styles['other_x'], lw=widths['other_x'],
label=state_descriptions[hp.system][1][idx] + labels['other_x'])
else:
plt.plot(ts_x, data['x'], styles['x'], lw=widths['x'], label=labels['x'])
if 'other_x' in data:
plt.plot(jnp.linspace(0, system.T, data['other_x'].shape[0]),
data['other_x'], styles['other_x'], lw=widths['other_x'], label=labels['other_x'])
plt.ylabel("state (x)")
plt.grid()
plt.legend(loc="upper left")
# Same thing as above, but for the controls
ax = plt.subplot(num_subplots, 1, 2)
if hp.system in control_descriptions:
for idx, u_i in enumerate(data['u'].T):
if idx in control_descriptions[hp.system][0]:
plt.plot(ts_u, u_i, styles['u'], lw=widths['u'], label=control_descriptions[hp.system][1][idx] + labels['u'])
if 'other_u' in data and data['other_u'] is not None:
plt.plot(jnp.linspace(0, system.T, data['other_u'][:, idx].shape[0]),
data['other_u'][:, idx], styles['other_u'], lw=widths['other_u'],
label=control_descriptions[hp.system][1][idx] + labels['other_u'])
else:
plt.plot(ts_u, data['u'], styles['u'], lw=widths['u'], label=labels['u'])
if 'other_u' in data:
plt.plot(jnp.linspace(0, system.T, data['other_u'].shape[0]),
data['other_u'], styles['other_u'], lw=widths['other_u'], label=labels['other_u'])
plt.ylabel("control (u)")
plt.grid()
plt.legend(loc="upper left")
if 'cost' in data and 'other_cost' not in data:
cost_text = f"Cost: {data['cost']:.2f}"
if 'defect' in data and data['defect'] is not None:
for i, d in enumerate(data['defect']):
if i == 0:
cost_text += f"\nDefect: {d:.2f}"
else:
cost_text += f" {d:.2f}"
at = AnchoredText(cost_text,
prop=dict(size=10), frameon=False,
loc='upper right',
)
# at.set_alpha(0.5)
# at.patch.set_alpha(0.5)
at.txt._text.set_bbox(dict(facecolor="#FFFFFF", edgecolor="#DBDBDB", alpha=0.7))
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at, )
elif 'cost' in data and 'other_cost' in data:
cost_text = f"Optimal cost: {data['cost']:.2f}"
if 'defect' in data and data['defect'] is not None:
for i, d in enumerate(data['defect']):
if i == 0:
cost_text += f"\nOptimal defect: {d:.2f}"
else:
cost_text += f" {d:.2f}"
cost_text += f"\nAchieved cost: {data['other_cost']:.2f}"
if 'other_defect' in data and data['other_defect'] is not None:
for i, d in enumerate(data['other_defect']):
if i == 0:
cost_text += f"\nAchieved defect: {d:.2f}"
else:
cost_text += f" {d:.2f}"
at = AnchoredText(cost_text,
prop=dict(size=10), frameon=False,
loc='upper right',
)
# at.set_alpha(0.5)
# at.patch.set_alpha(0.5)
at.txt._text.set_bbox(dict(facecolor="#FFFFFF", edgecolor="#DBDBDB", alpha=0.7))
at.patch.set_boxstyle("round,pad=0.,rounding_size=0.2")
ax.add_artist(at, )
if 'adj' in data:
ts_adj = jnp.linspace(0, system.T, data['adj'].shape[0])
plt.subplot(num_subplots, 1, 3)
if labels is not None and 'adj' in labels:
plt.plot(ts_adj, data['adj'], label=labels['adj'])
else:
plt.plot(ts_adj, data['adj'], label='Adjoint')
plt.ylabel("adjoint (lambda)")
plt.legend(loc="upper left")
plt.xlabel('time (s)')
plt.tight_layout()
if save_as:
plt.savefig(save_as, bbox_inches='tight')
plt.close()
else:
plt.show()
if __name__ == "__main__":
hp = HParams()
path_to_csv = f'../losses/{hp.system.name}/1_1_1'
plot_losses(path_to_csv, save_as=f'../plots/{hp.system.name}/1_1_1/{hp.system.name}_train.pdf')
plot_losses(path_to_csv, save_as=f'../plots/{hp.system.name}/1_1_1/{hp.system.name}_train.pgf')
# plot_losses(path_to_csv)
| 8,641
| 32.496124
| 125
|
py
|
myriad
|
myriad-main/myriad/probing_numerical_instability.py
|
# (c) 2021 Nikolaus Howe
import numpy as np
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import pickle as pkl
from jax import lax
from typing import Callable, Tuple
from myriad.custom_types import State, States, Control, Controls, DState
from myriad.utils import integrate, integrate_time_independent, integrate_time_independent_in_parallel
from myriad.config import HParams, Config, IntegrationMethod
################
# INSTRUCTIONS #
################
# Place me at the same level as "run.py",
# and run me as:
# for st in SystemType:
# if st in [SystemType.SIMPLECASE, SystemType.INVASIVEPLANT]:
# continue
# print("system", st)
# hp.system = st
# run_trajectory_opt(hp, cfg)
def nice_scan(f, init, xs, length=None):
if xs is None:
xs = [None] * length
carry = init
ys = []
for x in xs:
for c in carry.T:
plt.plot(x, c, 'o', color='red')
# if x == 21:
# print("x", x, carry)
if x == 49:
print("xx", x, carry)
# plt.xlim((0, 12))
plt.show()
carry, y = f(carry, x)
ys.append(y)
return carry, np.stack(ys)
def testing_integrate_time_independent(
dynamics_t: Callable[[State, Control], DState], # dynamics function
x_0: State, # starting state
interval_us: Controls, # controls
h: float, # step size
N: int, # steps
integration_method: IntegrationMethod # allows user to choose int method
) -> Tuple[State, States]:
# QUESTION: do we want to keep the mid-controls as decision variables for RK4,
# or move to simply taking the average between the edge ones?
# @jit
def rk4_step(x, u1, u2, u3):
k1 = dynamics_t(x, u1)
k2 = dynamics_t(x + h * k1 / 2, u2)
k3 = dynamics_t(x + h * k2 / 2, u2)
k4 = dynamics_t(x + h * k3, u3)
return x + h / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
# @jit
def heun_step(x, u1, u2):
k1 = dynamics_t(x, u1)
k2 = dynamics_t(x + h * k1, u2)
return x + h / 2 * (k1 + k2)
# @jit
def midpoint_step(x, u1, u2):
x_mid = x + h * dynamics_t(x, u1)
u_mid = (u1 + u2) / 2
return x + h * dynamics_t(x_mid, u_mid)
# @jit
def euler_step(x, u):
return x + h * dynamics_t(x, u)
def fn(carried_state, idx):
if integration_method == IntegrationMethod.EULER:
one_step_forward = euler_step(carried_state, interval_us[idx])
elif integration_method == IntegrationMethod.HEUN:
one_step_forward = heun_step(carried_state, interval_us[idx], interval_us[idx + 1])
elif integration_method == IntegrationMethod.MIDPOINT:
one_step_forward = midpoint_step(carried_state, interval_us[idx], interval_us[idx + 1])
elif integration_method == IntegrationMethod.RK4:
one_step_forward = rk4_step(carried_state, interval_us[2 * idx], interval_us[2 * idx + 1],
interval_us[2 * idx + 2])
else:
print("Please choose an integration order among: {CONSTANT, LINEAR, QUADRATIC}")
raise KeyError
return one_step_forward, one_step_forward # (carry, y)
# x_T, all_next_states = lax.scan(fn, x_0, jnp.arange(N))
plt.plot(interval_us, color='blue')
x_T, all_next_states = nice_scan(fn, x_0, jnp.arange(N))
return x_T, jnp.concatenate((x_0[jnp.newaxis], all_next_states))
def probe(hp: HParams, cfg: Config):
hp.key, subkey = jax.random.split(hp.key)
system = hp.system()
# Generate |total dataset size| control trajectories
total_size = hp.train_size + hp.val_size + hp.test_size
state_size = system.x_0.shape[0]
control_size = system.bounds.shape[0] - state_size
u_lower = system.bounds[state_size:, 0]
u_upper = system.bounds[state_size:, 1]
x_lower = system.bounds[:state_size, 0]
x_upper = system.bounds[:state_size, 1]
if jnp.isinf(u_lower).any() or jnp.isinf(u_upper).any():
raise Exception("infinite control bounds, aborting")
# if jnp.isinf(x_lower).any() or jnp.isinf(x_upper).any():
# raise Exception("infinite state bounds, aborting")
spread = (u_upper - u_lower) * hp.sample_spread
all_us = jax.random.uniform(subkey, (total_size, hp.num_steps + 1, control_size),
minval=u_lower, maxval=u_upper)
# Generate the start states
start_states = system.x_0[jnp.newaxis].repeat(total_size, axis=0)
# Generate the states from applying the chosen controls
if hp.start_spread > 0.:
hp.key, subkey = jax.random.split(hp.key)
start_states += jax.random.normal(subkey,
shape=start_states.shape) * hp.start_spread # TODO: explore different spreads
start_states = jnp.clip(start_states, a_min=x_lower, a_max=x_upper)
# Generate the corresponding state trajectories
_, all_xs = integrate_time_independent_in_parallel(system.dynamics, start_states,
all_us, hp.stepsize, hp.num_steps,
hp.integration_method)
print("the shape of the generated us is", all_us.shape)
print("the shape of the generated xs is", all_xs.shape)
# print("an example is", all_xs[-1])
for i, xs in enumerate(all_xs):
if not jnp.isfinite(xs).all():
print("there was an infinity encountered")
print("us", all_us[i])
print("start state", start_states[i])
raise SystemExit
plt.close()
for i, xs in enumerate(all_xs):
# print("xs is of shape", xs.shape)
for j, state in enumerate(xs.T):
plt.plot(state)
# break
# plt.plot(xs)
# plt.legend()
plt.show()
# plt.savefig(f'cool_{hp.system.name}.pdf')
plt.close()
def special_probe(hp, cfg):
# CARTPOLE
key = jax.random.PRNGKey(42)
key, subkey = jax.random.split(key)
system = hp.system()
hp.key, subkey = jax.random.split(hp.key)
file_path = 't_set'
train_set = pkl.load(open(file_path, 'rb'))
print("train set", train_set)
first_xs = train_set[0, :, :hp.state_size]
first_us = train_set[0, :, hp.state_size:]
given_params = {
'g': 15,
'm1': 1.0,
'm2': 0.1,
'length': 1.0
}
def dynamics(x, u):
return system.parametrized_dynamics(given_params, x, u)
print("first xs", first_xs.shape)
print("first us", first_us.shape)
start = first_xs[0]
print("start", start.shape)
_, xs = testing_integrate_time_independent(dynamics, start,
first_us, hp.stepsize, hp.num_steps,
hp.integration_method)
#####################
# train_xs = train_set[:, :, :hp.state_size]
# train_us = train_set[:, :, hp.state_size:]
# start_xs = train_xs[:, 0, :]
# # if cfg.verbose:
# # print("train xs", train_xs.shape)
# # print("train us", train_us.shape)
# # print("start train xs", start_xs.shape)
#
# _, predicted_states = integrate_time_independent_in_parallel(
# dynamics, start_xs, train_us, hp.stepsize, hp.num_steps, IntegrationMethod.HEUN
# )
############################3
print("us", first_us)
print("resulting xs", xs)
raise SystemExit
# print("us", us)
# print("xs", xs)
u = us[21]
uu = us[22]
uuu = us[23]
x = jnp.array([-2.68629165])
xx = jnp.array([-7.63482766])
def heun_step(x, u1, u2):
k1 = system.dynamics(x, u1)
print("k1", k1)
k2 = system.dynamics(x + hp.stepsize * k1, u2)
print("k2", k2)
return x + hp.stepsize / 2 * (k1 + k2)
print("step", heun_step(xx, uu, uuu))
| 7,453
| 30.451477
| 116
|
py
|
myriad
|
myriad-main/myriad/utils.py
|
# (c) 2021 Nikolaus Howe
from __future__ import annotations
import jax
import jax.numpy as jnp
import numpy as np
import time
import typing
if typing.TYPE_CHECKING:
from myriad.neural_ode.create_node import NeuralODE
from myriad.config import HParams, Config
from jax import jit, lax, vmap
from typing import Callable, Optional, Tuple, Dict
from myriad.config import Config, HParams, IntegrationMethod, SamplingApproach
from myriad.systems import FiniteHorizonControlSystem
from myriad.custom_types import Control, Controls, Dataset, DState, State, States, Cost, Timestep
def integrate(
dynamics_t: Callable[[State, Control, Timestep], DState], # dynamics function
x_0: State, # starting state
interval_us: Controls, # controls
h: float, # step size
N: int, # steps
ts: jnp.ndarray, # times
integration_method: IntegrationMethod # allows user to choose interpolation for controls
) -> Tuple[State, States]:
# QUESTION: do we want to keep this interpolation for rk4, or move to linear?
@jit
def rk4_step(x, u1, u2, u3, t):
k1 = dynamics_t(x, u1, t)
k2 = dynamics_t(x + h * k1 / 2, u2, t + h / 2)
k3 = dynamics_t(x + h * k2 / 2, u2, t + h / 2)
k4 = dynamics_t(x + h * k3, u3, t + h)
return x + h / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
@jit
def heun_step(x, u1, u2, t):
k1 = dynamics_t(x, u1, t)
k2 = dynamics_t(x + h * k1, u2, t + h)
return x + h / 2 * (k1 + k2)
@jit
def midpoint_step(x, u1, u2, t):
x_mid = x + h * dynamics_t(x, u1, t)
u_mid = (u1 + u2) / 2
return x + h * dynamics_t(x_mid, u_mid, t + h / 2)
@jit
def euler_step(x, u, t):
return x + h * dynamics_t(x, u, t)
def fn(carried_state, idx):
if integration_method == IntegrationMethod.EULER:
one_step_forward = euler_step(carried_state, interval_us[idx], ts[idx])
elif integration_method == IntegrationMethod.HEUN:
one_step_forward = heun_step(carried_state, interval_us[idx], interval_us[idx + 1], ts[idx])
elif integration_method == IntegrationMethod.MIDPOINT:
one_step_forward = midpoint_step(carried_state, interval_us[idx], interval_us[idx + 1], ts[idx])
elif integration_method == IntegrationMethod.RK4:
one_step_forward = rk4_step(carried_state, interval_us[2 * idx], interval_us[2 * idx + 1],
interval_us[2 * idx + 2], ts[idx])
else:
print("Please choose an integration order among: {CONSTANT, LINEAR, QUADRATIC}")
raise KeyError
return one_step_forward, one_step_forward # (carry, y)
x_T, all_next_states = lax.scan(fn, x_0, jnp.arange(N))
return x_T, jnp.concatenate((x_0[jnp.newaxis], all_next_states))
# Used for the augmented state cost calculation
integrate_in_parallel = vmap(integrate, in_axes=(None, 0, 0, None, None, 0, None)) # , static_argnums=(0, 5, 6)
def integrate_time_independent(
dynamics_t: Callable[[State, Control], DState], # dynamics function
x_0: State, # starting state
interval_us: Controls, # controls
h: float, # step size
N: int, # steps
integration_method: IntegrationMethod # allows user to choose int method
) -> Tuple[State, States]:
# QUESTION: do we want to keep the mid-controls as decision variables for RK4,
# or move to simply taking the average between the edge ones?
@jit
def rk4_step(x, u1, u2, u3):
k1 = dynamics_t(x, u1)
k2 = dynamics_t(x + h * k1 / 2, u2)
k3 = dynamics_t(x + h * k2 / 2, u2)
k4 = dynamics_t(x + h * k3, u3)
return x + h / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
@jit
def heun_step(x, u1, u2):
k1 = dynamics_t(x, u1)
k2 = dynamics_t(x + h * k1, u2)
return x + h / 2 * (k1 + k2)
@jit
def midpoint_step(x, u1, u2):
x_mid = x + h * dynamics_t(x, u1)
u_mid = (u1 + u2) / 2
return x + h * dynamics_t(x_mid, u_mid)
@jit
def euler_step(x, u):
return x + h * dynamics_t(x, u)
def fn(carried_state, idx):
if integration_method == IntegrationMethod.EULER:
one_step_forward = euler_step(carried_state, interval_us[idx])
elif integration_method == IntegrationMethod.HEUN:
one_step_forward = heun_step(carried_state, interval_us[idx], interval_us[idx + 1])
elif integration_method == IntegrationMethod.MIDPOINT:
one_step_forward = midpoint_step(carried_state, interval_us[idx], interval_us[idx + 1])
elif integration_method == IntegrationMethod.RK4:
one_step_forward = rk4_step(carried_state, interval_us[2 * idx], interval_us[2 * idx + 1],
interval_us[2 * idx + 2])
else:
print("Please choose an integration order among: {CONSTANT, LINEAR, QUADRATIC}")
raise KeyError
return one_step_forward, one_step_forward # (carry, y)
x_T, all_next_states = lax.scan(fn, x_0, jnp.arange(N))
return x_T, jnp.concatenate((x_0[jnp.newaxis], all_next_states))
integrate_time_independent_in_parallel = vmap(integrate_time_independent, in_axes=(None, 0, 0, None, None, None))
# Used for the adjoint integration
def integrate_fbsm(
dynamics_t: Callable[[State, Control, Optional[jnp.ndarray], Optional[jnp.ndarray]],
jnp.ndarray], # dynamics function
x_0: jnp.ndarray, # starting state
u: jnp.ndarray, # controls
h: float, # step size # is negative in backward mode
N: int, # steps
v: Optional[jnp.ndarray] = None,
t: Optional[jnp.ndarray] = None,
discrete: bool = False,
) -> Tuple[jnp.ndarray, jnp.ndarray]:
"""
Implementation of Runge-Kutta 4th order method for ODE solving, adapted for the FBSM method.
Specifically, it can either performs a numerical integration in a forward sweep over the states variables,
or a backward sweep to integrate over the adjoint variables.
Args:
dynamics_t: (Callable) -- The dynamics (ODEs) to integrate
x_0: The initial value to begin integration
u: (jnp.ndarray) -- A guess over a costate variable.
h: (float) -- The step size for the numerical integration
N: (int) -- The number of steps for the numerical integration
v: (jnp.ndarray, optional) -- Another costate variable, if needed
t: (jnp.ndarray, optional) -- The time variable, for time-dependent dynamics
discrete: (bool, optional) -- Perform direct calculation instead of integration if facing a discrete system.
Returns:
final_state, trajectory : Tuple[jnp.ndarray, jnp.array] -- The final value of the integrated variable and the complete trajectory
"""
@jit
def rk4_step(x_t1, u, u_next, v, v_next, t):
u_convex_approx = (u + u_next) / 2
v_convex_approx = (v + v_next) / 2
k1 = dynamics_t(x_t1, u, v, t)
k2 = dynamics_t(x_t1 + h * k1 / 2, u_convex_approx, v_convex_approx, t + h / 2)
k3 = dynamics_t(x_t1 + h * k2 / 2, u_convex_approx, v_convex_approx, t + h / 2)
k4 = dynamics_t(x_t1 + h * k3, u_next, v_next, t + h)
return x_t1 + (h / 6) * (k1 + 2 * k2 + 2 * k3 + k4)
if v is None:
v = jnp.empty_like(u)
if t is None:
t = jnp.empty_like(u)
direction = int(jnp.sign(h))
if discrete:
if direction >= 0:
fn = lambda x_t, idx: [dynamics_t(x_t, u[idx], v[idx], t[idx])] * 2
else:
fn = lambda x_t, idx: [dynamics_t(x_t, u[idx], v[idx - 1], t[idx - 1])] * 2
else:
fn = lambda x_t, idx: [rk4_step(x_t, u[idx], u[idx + direction], v[idx], v[idx + direction], t[idx])] * 2
if direction >= 0:
x_T, ys = lax.scan(fn, x_0, jnp.arange(N))
return x_T, jnp.concatenate((x_0[None], ys))
else:
x_T, ys = lax.scan(fn, x_0, jnp.arange(N, 0, -1))
return x_T, jnp.concatenate((jnp.flipud(ys), x_0[None]))
# First, get the optimal controls and resulting trajectory using the true system model.
# Then, replace the model dynamics with the trained neural network,
# and use that to find the "optimal" controls according to the NODE model.
# Finally get the resulting true state trajectory coming from those suboptimal controls.
# def plan_with_model(node: NeuralODE, regularize: bool = False) -> Controls:
# apply_net = lambda x, u: node.net.apply(node.params, jnp.append(x, u)) # use nonlocal net and params
#
# # Replace system dynamics, but remember it to restore later
# # old_dynamics = node.system.dynamics
# # node.system.dynamics = apply_net
#
# objective = functools.partial(node.optimizer.objective, custom_dynamics=apply_net)
# constraints = functools.partial(node.optimizer.constraints, custom_dynamics=apply_net)
#
# opt_inputs = {
# 'objective': objective,
# 'guess': node.optimizer.guess,
# 'constraints': constraints,
# 'bounds': node.optimizer.bounds,
# 'unravel': node.optimizer.unravel
# }
#
# _, u = solve(node.hp, node.cfg, opt_inputs)
#
# # Restore system dynamics
# # node.system.dynamics = old_dynamics
#
# return u.squeeze() # this is necessary for later broadcasting
def plan_with_node_model(node: NeuralODE) -> Tuple[States, Controls]:
apply_net = lambda x, u: node.net.apply(node.params, jnp.append(x, u)) # use nonlocal net and params
# Replace system dynamics, but remember it to restore later
old_dynamics = node.system.dynamics
node.system.dynamics = apply_net
solved_results = node.optimizer.solve()
# Restore system dynamics
node.system.dynamics = old_dynamics
return solved_results['x'], solved_results['u']
# TODO: I'm removing the squeeze on the ['u'] because it's causing problems later on
# hopefully this doesn't break something else... all in the name of supporting vector controls
# Find the optimal trajectory according the learned model
def get_optimal_node_trajectory(node: NeuralODE) -> Tuple[States, Controls]:
_, opt_u = plan_with_node_model(node)
_, opt_x = integrate_time_independent(node.system.dynamics, node.system.x_0, opt_u,
node.stepsize, node.num_steps, node.hp.integration_method)
# assert not jnp.isnan(opt_u).all() and not jnp.isnan(opt_x).all()
# NOTE: it used to return things in the opposite order! might cause bugs!
return opt_x, opt_u
# TODO: make the start state default to the system start state
def get_state_trajectory_and_cost(hp: HParams, system: FiniteHorizonControlSystem,
start_state: State, us: Controls) -> Tuple[States, Cost]:
@jax.jit
def augmented_dynamics(x_and_c: jnp.ndarray, u: Control, t: Timestep) -> jnp.ndarray:
x, c = x_and_c[:-1], x_and_c[-1]
return jnp.append(system.dynamics(x, u), system.cost(x, u, t))
num_steps = hp.intervals * hp.controls_per_interval
step_size = system.T / num_steps
times = jnp.linspace(0., system.T, num=num_steps + 1)
starting_x_and_cost = jnp.append(start_state, 0.)
# print("starting x and cost", starting_x_and_cost)
# print("us", us.shape)
# print("step size", step_size)
# print("num steps", num_steps)
# print("times", times.shape)
# raise SystemExit
# Integrate cost in parallel
# print("entering integration")
# print("the us are", us)
_, state_and_cost = integrate(
augmented_dynamics, starting_x_and_cost, us,
step_size, num_steps, times, hp.integration_method)
# print("the states and costs are", state_and_cost)
# print("the states and costs are", state_and_cost.shape)
# raise SystemExit
states = state_and_cost[:, :-1]
# print("extracted states", states.shape)
last_augmented_state = state_and_cost[-1]
# print("last aug state", last_augmented_state)
cost = last_augmented_state[-1]
# print("cost", cost)
if system.terminal_cost:
cost += system.terminal_cost_fn(last_augmented_state[:-1], us[-1])
# raise SystemExit
return states, cost
def smooth(curve: jnp.ndarray, its: int) -> jnp.ndarray:
curve = np.array(curve)
kernel = np.array([0.15286624, 0.22292994, 0.24840764, 0.22292994, 0.15286624]) # Gaussian blur
for it in range(its):
for i, row in enumerate(curve):
for j, dim in enumerate(row.T):
dim = np.pad(dim, (2, 2), 'edge')
dim = np.convolve(dim, kernel, mode='valid')
curve[i, :, j] = dim
return jnp.array(curve)
def get_defect(system: FiniteHorizonControlSystem, learned_xs: States) -> Optional[jnp.array]:
defect = None
if system.x_T is not None:
defect = []
for i, s in enumerate(learned_xs[-1]):
if system.x_T[i] is not None:
defect.append(s - system.x_T[i])
if defect is not None:
defect = jnp.array(defect)
return defect
def generate_dataset(hp: HParams, cfg: Config,
given_us: Optional[Controls] = None) -> Dataset:
system = hp.system()
hp.key, subkey = jax.random.split(hp.key)
# Generate |total dataset size| control trajectories
total_size = hp.train_size + hp.val_size + hp.test_size
# TODO: fix what happens in case of infinite bounds
u_lower = system.bounds[hp.state_size:, 0]
u_upper = system.bounds[hp.state_size:, 1]
x_lower = system.bounds[:hp.state_size, 0]
x_upper = system.bounds[:hp.state_size, 1]
if jnp.isinf(u_lower).any() or jnp.isinf(u_upper).any():
raise Exception("infinite control bounds, aborting")
if jnp.isinf(x_lower).any() or jnp.isinf(x_upper).any():
raise Exception("infinite state bounds, aborting")
spread = (u_upper - u_lower) * hp.sample_spread
########################
# RANDOM WALK CONTROLS #
########################
if hp.sampling_approach == SamplingApproach.RANDOM_WALK:
# Make all the first states
all_start_us = np.random.uniform(u_lower, u_upper, (total_size, 1, hp.control_size))
all_us = all_start_us
for i in range(hp.num_steps):
next_us = np.random.normal(0, spread, (total_size, 1, hp.control_size))
rightmost_us = all_us[:, -1:, :]
together = np.clip(next_us + rightmost_us, u_lower, u_upper)
all_us = np.concatenate((all_us, together), axis=1)
# elif hp.sampling_approach == SamplingApproach.RANDOM_GRID:
# single_ascending_controls = np.linspace(u_lower, u_upper, hp.num_steps + 1)
# parallel_ascending_controls = single_ascending_controls[np.newaxis].repeat(total_size)
# assert parallel_ascending_controls.shape == ()
# NOTE: we could also generate data by exhaustively considering every combination
# of state-control pair up to some discretization. This might just solve
# the problem. Unfortunately, curse of dimensionality is real.
# IDEA: let's try doing this on the CANCERTREATMENT domain, and see whether
# this is enough to help neural ODE figure out what is going on
# at the very start of planning
###########################
# UNIFORM RANDOM CONTROLS #
###########################
elif hp.sampling_approach == SamplingApproach.UNIFORM:
all_us = jax.random.uniform(subkey, (total_size, hp.num_steps + 1, hp.control_size),
minval=u_lower, maxval=u_upper) * 0.75 # TODO
# TODO: make sure having added control size everywhere didn't break things
#########################
# AROUND GIVEN CONTROLS #
#########################
elif hp.sampling_approach == SamplingApproach.TRUE_OPTIMAL or hp.sampling_approach == SamplingApproach.CURRENT_OPTIMAL:
if given_us is None:
print("Since you didn't provide any controls, we'll use a uniform random guess")
all_us = jax.random.uniform(subkey, (total_size, hp.num_steps + 1, hp.control_size),
minval=u_lower, maxval=u_upper) * 0.75 # TODO
# raise Exception("If sampling around a control trajectory, need to provide that trajectory.")
else:
noise = jax.random.normal(key=subkey, shape=(total_size, hp.num_steps + 1, hp.control_size)) \
* (u_upper - u_lower) * hp.sample_spread
all_us = jnp.clip(given_us[jnp.newaxis].repeat(total_size, axis=0).squeeze() + noise.squeeze(), a_min=u_lower,
a_max=u_upper)
else:
raise Exception("Unknown sampling approach, please choose among", SamplingApproach.__dict__['_member_names_'])
print("initial controls shape", all_us.shape)
# Smooth the controls if so desired
if hp.to_smooth:
start = time.time()
all_us = smooth(all_us, 2)
end = time.time()
print(f"smoothing took {end - start}s")
# TODO: I really dislike having to have this line below. Is there no way to remove it?
# Make the controls guess smaller so our dynamics don't explode
# all_us *= 0.1
# Generate the start states
start_states = system.x_0[jnp.newaxis].repeat(total_size, axis=0)
# Generate the states from applying the chosen controls
if hp.start_spread > 0.:
hp.key, subkey = jax.random.split(hp.key)
start_states += jax.random.normal(subkey,
shape=start_states.shape) * hp.start_spread # TODO: explore different spreads
start_states = jnp.clip(start_states, a_min=x_lower, a_max=x_upper)
# Generate the corresponding state trajectories
_, all_xs = integrate_time_independent_in_parallel(system.dynamics, start_states,
all_us, hp.stepsize, hp.num_steps,
hp.integration_method)
# Noise up the state observations
hp.key, subkey = jax.random.split(hp.key)
all_xs += jax.random.normal(subkey, shape=all_xs.shape) * (x_upper - x_lower) * hp.noise_level
all_xs = jnp.clip(all_xs, a_min=x_lower, a_max=x_upper)
# Stack the states and controls together
xs_and_us = jnp.concatenate((all_xs, all_us), axis=2)
if cfg.verbose:
print("Generating training control trajectories between bounds:")
print(" u lower", u_lower)
print(" u upper", u_upper)
print("of shapes:")
print(" xs shape", all_xs.shape)
print(" us shape", all_us.shape)
print(" together", xs_and_us.shape)
assert np.isfinite(xs_and_us).all()
return xs_and_us
def yield_minibatches(hp: HParams, total_size: int, dataset: Dataset) -> iter:
assert total_size <= dataset.shape[0]
tmp_dataset = np.random.permutation(dataset)
num_minibatches = total_size // hp.minibatch_size + (1 if total_size % hp.minibatch_size > 0 else 0)
for i in range(num_minibatches):
n = np.minimum((i + 1) * hp.minibatch_size, total_size) - i * hp.minibatch_size
yield tmp_dataset[i * hp.minibatch_size: i * hp.minibatch_size + n]
def sample_x_init(hp: HParams, n_batch: int = 1) -> np.ndarray:
s = hp.system()
res = np.random.uniform(s.bounds[:, 0], s.bounds[:, 1], (n_batch, hp.state_size + hp.control_size))
res = res[:, :hp.state_size]
assert np.isfinite(res).all()
return res
| 18,600
| 39.088362
| 133
|
py
|
myriad
|
myriad-main/myriad/useful_scripts.py
|
# (c) 2021 Nikolaus Howe
from __future__ import annotations
import jax.numpy as jnp
import numpy as np
import pickle as pkl
import simple_parsing
from jax.flatten_util import ravel_pytree
from jax.config import config
from pathlib import Path
from typing import Tuple
from myriad.config import HParams, Config
from myriad.custom_types import Cost, Defect, Optional
from myriad.neural_ode.create_node import NeuralODE
from myriad.trajectory_optimizers import get_optimizer
from myriad.utils import get_defect, integrate_time_independent, get_state_trajectory_and_cost, plan_with_node_model
from myriad.plotting import plot
from myriad.systems.neural_ode.node_system import NodeSystem
from myriad.config import OptimizerType
config.update("jax_enable_x64", True)
def run_trajectory_opt(hp: HParams, cfg: Config, save_as: str = None,
params_path: str = None) -> Tuple[Cost, Optional[Defect]]:
plot_path = f'plots/{hp.system.name}/trajectory_opt/'
Path(plot_path).mkdir(parents=True, exist_ok=True)
if save_as is not None:
save_as = plot_path + save_as
if params_path is not None:
params = pkl.load(open(params_path, 'rb'))
system = hp.system(**params)
print("loaded params:", params)
else:
system = hp.system()
print("made default system")
optimizer = get_optimizer(hp, cfg, system)
solution = optimizer.solve()
x = solution['x']
u = solution['u']
if optimizer.require_adj:
adj = solution['adj']
true_system = hp.system()
opt_x, c = get_state_trajectory_and_cost(hp, true_system, true_system.x_0, u)
defect = get_defect(true_system, opt_x)
if cfg.plot:
if cfg.pretty_plotting:
plot(hp, true_system,
data={'x': opt_x, 'u': u, 'cost': c, 'defect': defect},
labels={'x': '', 'u': ''},
styles={'x': '-', 'u': '-'},
widths={'x': 2, 'u': 2},
save_as=save_as)
else: # We also want to plot the state trajectory we got from the solver
if optimizer.require_adj:
plot(hp, true_system,
data={'x': x, 'u': u, 'adj': adj, 'other_x': opt_x, 'cost': c, 'defect': defect},
labels={'x': ' (from solver)',
'u': 'Controls from solver',
'adj': 'Adjoint from solver',
'other_x': ' (integrated)'},
save_as=save_as)
else:
plot(hp, true_system,
data={'x': x, 'u': u, 'other_x': opt_x, 'cost': c, 'defect': defect},
labels={'x': ' (from solver)',
'u': 'Controls from solver',
'other_x': ' (from integrating dynamics)'},
save_as=save_as)
return c, defect
def run_node_trajectory_opt(hp: HParams, cfg: Config, save_as: str = None,
params_path: str = None) -> Tuple[Cost, Optional[Defect]]:
true_system = hp.system()
node = NeuralODE(hp, cfg)
node.load_params(params_path)
node_system = NodeSystem(node, true_system)
node_optimizer = get_optimizer(hp, cfg, node_system)
node_solution = node_optimizer.solve_with_params(node.params)
u = node_solution['u']
opt_x, c = get_state_trajectory_and_cost(hp, true_system, true_system.x_0, u)
defect = get_defect(true_system, opt_x)
if cfg.plot:
plot(hp, true_system,
data={'x': opt_x, 'u': u, 'cost': c, 'defect': defect},
labels={'x': '', 'u': ''},
styles={'x': '-', 'u': '-'},
save_as=save_as)
return c, defect
def run_setup():
# def run_setup(gin_path='./myriad/gin-configs/default.gin'): # note: no longer need Gin
# Prepare experiment settings
parser = simple_parsing.ArgumentParser()
parser.add_arguments(HParams, dest="hparams")
parser.add_arguments(Config, dest="config")
# parser.add_argument("--gin_bindings", type=str) # Needed for the parser to work in conjunction with absl.flags
# key_dict = HParams.__dict__.copy()
# key_dict.update(Config.__dict__)
# print("the key dict is", key_dict)
# for key in key_dict.keys():
# if "__" not in key:
# flags.DEFINE_string(key, None, # Parser arguments need to be accepted by the flags
# 'Backward compatibility with previous parser')
# flags.DEFINE_multi_string(
# 'gin_bindings', [],
# 'Gin bindings to override the values set in the config files '
# '(e.g. "Lab1.A=1.0").')
# jax.config.update("jax_enable_x64", True)
args = parser.parse_args()
hp = args.hparams
cfg = args.config
print(hp)
print(cfg)
# Set our seeds for reproducibility
np.random.seed(hp.seed)
return hp, cfg
def plot_zero_control_dynamics(hp, cfg):
system = hp.system()
optimizer = get_optimizer(hp, cfg, system)
num_steps = hp.intervals * hp.controls_per_interval
stepsize = system.T / num_steps
zero_us = jnp.zeros((num_steps + 1,))
_, opt_x = integrate_time_independent(system.dynamics, system.x_0, zero_us,
stepsize, num_steps, hp.integration_method)
plot(hp, system,
data={'x': opt_x, 'u': zero_us},
labels={'x': 'Integrated state',
'u': 'Zero controls'})
xs_and_us, unused_unravel = ravel_pytree((opt_x, zero_us))
if hp.optimizer != OptimizerType.FBSM:
print("control cost from optimizer", optimizer.objective(xs_and_us))
print('constraint violations from optimizer', jnp.linalg.norm(optimizer.constraints(xs_and_us)))
# Plot the given control and state trajectory. Also plot the state
# trajectory which occurs when using the neural net for dynamics.
# If "optimal", do the same things as above but using the true
# optimal controls and corresponding true state trajectory.
# "extra_u" is just a way to plot an extra control trajectory.
def plot_trajectory(node: NeuralODE,
optimal: bool = False,
x: jnp.ndarray = None,
u: jnp.ndarray = None,
validation: bool = False,
title: str = None,
save_as: str = None) -> None:
if validation:
dset = node.validation_data
else:
dset = node.train_data
if x is None:
x: jnp.ndarray = dset[-1, :, :node.hp.state_size]
if u is None:
u: jnp.ndarray = dset[-1, :, node.hp.state_size:]
apply_net = lambda x, u: node.net.apply(node.params, jnp.concatenate((x, u), axis=0)) # use nonlocal net and params
# if node.cfg.verbose:
# print("states to plot", x.shape)
# print("controls to plot", u.shape)
if optimal:
x = node.true_opt_xs
u = node.true_opt_us
# Get states when using those controls
_, predicted_states = integrate_time_independent(apply_net, x[0], u,
node.stepsize, node.num_steps,
node.hp.integration_method)
# Get the true integrated cost of these controls
_, control_cost = get_state_trajectory_and_cost(node.hp, node.system, x[0], u)
# If there is a final state, also report the defect
defect = None
if node.system.x_T is not None:
defect = []
for i, s in enumerate(predicted_states[-1]):
if node.system.x_T[i] is not None:
defect.append(s - node.system.x_T[i])
defect = np.array(defect)
# Plot
plot(hp=node.hp,
system=node.system,
data={'x': x, 'u': u, 'other_x': predicted_states, 'cost': control_cost,
'defect': defect},
labels={'x': ' (true)', 'u': '', 'other_x': ' (predicted)'},
title=title, save_as=save_as)
# Plan with the model. Plot the controls from planning and corresponding true state trajectory.
# Compare it with the true optimal controls and corresponding state trajectory.
def plan_and_plot(node: NeuralODE, title: str = None, save_as: str = None) -> None:
planned_x, planned_us = plan_with_node_model(node)
xs, cost = get_state_trajectory_and_cost(node.hp, node.system, node.system.x_0, planned_us)
# If this is the best cost so far, update the best guess for us
# TODO: I don't think this is the place to do this... where is better?
if node.best_guess_us_cost is None or cost < node.best_guess_us_cost:
print("updating best us with a cost of", cost)
node.best_guess_us = planned_us
node.best_guess_us_cost = cost
new_guess, _ = ravel_pytree((planned_x, planned_us))
node.optimizer.guess = new_guess
# single_traj_train_controls = node.train_data[0, :, -1]
# single_traj_train_states = node.train_data[:, :, :-1]
#
# print("train controls are", single_traj_train_controls.shape)
# print("start train states are", single_traj_train_states[0, 0, :])
# _, train_cost = get_state_trajectory_and_cost(node.hp, node.system,
# single_traj_train_states[0, 0, :],
# single_traj_train_controls.squeeze())
# If there is a final state, also report the defect
opt_defect = None
defect = None
if node.system.x_T is not None:
opt_defect = node.true_opt_xs[-1] - node.system.x_T
defect = xs[-1] - node.system.x_T
plot(hp=node.hp,
system=node.system,
data={'x': node.true_opt_xs, 'u': node.true_opt_us, 'other_x': xs, 'other_u': planned_us,
'cost': node.true_opt_cost, 'defect': opt_defect, 'other_cost': cost, 'other_defect': defect},
labels={'x': ' (true)',
'u': ' (true)',
'other_x': ' (planned)',
'other_u': ' (planned)'},
title=title,
save_as=save_as)
##########################
# Test E2E Node planning #
##########################
def load_node_and_plan(hp, cfg):
params_path = f'params/{hp.system.name}/e2e_node/'
plots_path = f'params/{hp.system.name}/e2e_node/'
Path(params_path).mkdir(parents=True, exist_ok=True)
Path(plots_path).mkdir(parents=True, exist_ok=True)
params_names = [f'{i * 50}.p' for i in range(60, 201)]
plots_names = [f'{i * 50}_epochs.png' for i in range(60, 201)]
node = NeuralODE(hp, cfg, mle=False)
true_system = hp.system() # use the default params here
true_optimizer = get_optimizer(hp, cfg, true_system)
node_system = NodeSystem(node=node, true_system=true_system)
node_optimizer = get_optimizer(hp, cfg, node_system)
true_solution = true_optimizer.solve()
true_opt_us = true_solution['u']
_, true_opt_xs = integrate_time_independent(
true_system.dynamics, true_system.x_0, true_opt_us, hp.stepsize, hp.num_steps, hp.integration_method)
for i, params_name in enumerate(params_names):
try:
node.load_params(params_path + params_name)
print("loaded params")
solution = node_optimizer.solve_with_params(node.params)
solved_us = solution['u']
_, integrated_xs = integrate_time_independent(
true_system.dynamics, true_system.x_0, solved_us, hp.stepsize, hp.num_steps, hp.integration_method)
plot(hp, true_system,
data={'x': true_opt_xs,
'other_x': integrated_xs,
'u': true_opt_us,
'other_u': solved_us},
labels={'x': ' (optimal)',
'other_x': ' (learned)',
'u': ' (optimal)',
'other_u': ' (learned)'},
styles={'x': '.',
'other_x': '-',
'u': '.',
'other_u': '-'},
save_as=plots_path + plots_names[i])
except FileNotFoundError as e:
print("unable to find the params, so we'll skip")
| 11,496
| 36.087097
| 118
|
py
|
myriad
|
myriad-main/myriad/config.py
|
# (c) 2021 Nikolaus Howe
from typing import Tuple
import jax
from dataclasses import dataclass
from enum import Enum
from myriad.systems import SystemType
class OptimizerType(Enum):
"""Parser argument. Optimizing strategy used to solve the OCP"""
# _settings_ = NoAlias
COLLOCATION = "COLLOCATION"
SHOOTING = "SHOOTING"
FBSM = "FBSM"
class SamplingApproach(Enum):
UNIFORM = 'UNIFORM'
TRUE_OPTIMAL = 'TRUE_OPTIMAL'
RANDOM_WALK = 'RANDOM_WALK'
CURRENT_OPTIMAL = 'CURRENT_OPTIMAL' # TODO: current optimal is broken at the moment, because we're not
# TODO: the guess around which we are sampling
# RANDOM_GRID = 'RANDOM_GRID'
# This ^ isn't implemented yet. It's unclear how helpful it would be
# FULL_GRID = 'FULL_GRID'
# We're not doing the FULL GRID anymore because it breaks the idea of generating trajectories.
# But it would be interesting to compare performance against, since in some sense this is the
# theoretical best. I wonder how resilient it would be to noise though.
# ENDTOEND = "ENDTOEND"
# ORNSTECK_BLABLA = "snnth"
# Another one we should try to implement
class NLPSolverType(Enum):
SLSQP = "SLSQP" # Scipy's SLSQP
TRUST = "TRUST" # Scipy's trust-constr
IPOPT = "IPOPT" # ipopt
# INEXACTNEWTON="INEXACTNEWTON"
EXTRAGRADIENT = "EXTRAGRADIENT" # an extragradient-based solver
class IntegrationMethod(Enum):
EULER = "CONSTANT"
HEUN = "LINEAR"
MIDPOINT = "MIDPOINT"
RK4 = "RK4"
class QuadratureRule(Enum):
TRAPEZOIDAL = "TRAPEZOIDAL"
HERMITE_SIMPSON = "HERMITE_SIMPSON"
# Hyperparameters which change experiment results
@dataclass(eq=True, frozen=False) # or frozen == False
class HParams:
"""The hyperparameters of the experiment. Modifying these should change the results"""
seed: int = 2019
system: SystemType = SystemType.CANCERTREATMENT
optimizer: OptimizerType = OptimizerType.SHOOTING
nlpsolver: NLPSolverType = NLPSolverType.IPOPT
integration_method: IntegrationMethod = IntegrationMethod.HEUN
quadrature_rule: QuadratureRule = QuadratureRule.TRAPEZOIDAL
max_iter: int = 1000 # maxiter for NLP solver (usually 1000)
intervals: int = 1 # used by COLLOCATION and SHOOTING
controls_per_interval: int = 100 # used by SHOOTING
fbsm_intervals: int = 1000 # used by FBSM
sampling_approach: SamplingApproach = SamplingApproach.RANDOM_WALK
train_size: int = 100 # num trajectories per dataset
val_size: int = 3
test_size: int = 3
sample_spread: float = 0.05
start_spread: float = 0.1
noise_level: float = 0.01 * 0.
to_smooth: bool = False
learning_rate: float = 0.001
minibatch_size: int = 16
num_epochs: int = 10_001
num_experiments: int = 1 # num datesets
loss_recording_frequency: int = 10 # 1000
plot_progress_frequency: int = 10 # 10_000
early_stop_threshold: int = 30 # 30_000 # 70 for cartpole, 1 for cancertreatment
early_stop_check_frequency: int = 20 # 1000
hidden_layers: Tuple[int, int] = (50, 50) # (100, 100)
num_unrolled: int = 5
eta_x: float = 1e-1
eta_lmbda: float = 1e-3
adam_lr: float = 1e-4
def __post_init__(self):
if self.optimizer == OptimizerType.COLLOCATION:
self.controls_per_interval = 1
if self.nlpsolver == NLPSolverType.EXTRAGRADIENT:
self.max_iter *= 10
# For convenience, record number of steps and stepsize
system = self.system()
self.num_steps = self.intervals * self.controls_per_interval
self.stepsize = system.T / self.num_steps
self.key = jax.random.PRNGKey(self.seed)
self.state_size = system.x_0.shape[0]
self.control_size = system.bounds.shape[0] - self.state_size
# Fix the minibatch size if we're working with small datasets
self.minibatch_size = min([self.minibatch_size, self.train_size, self.val_size, self.test_size])
@dataclass(eq=True, frozen=False)
class Config:
"""Secondary configurations that should not change experiment results
and should be largely used for debugging"""
verbose: bool = True
"""Verbose mode; default to `True`"""
jit: bool = True
"""Enable [`@jit`](https://jax.readthedocs.io/en/latest/notebooks/quickstart.html#using-jit-to-speed-up-functions) compilation; default to `True`"""
plot: bool = True
"""Plot progress during (and results after) the experiment; default to `True`"""
pretty_plotting: bool = True
"""Only plot the true trajectory, ignoring the solver state output"""
load_params_if_saved: bool = True
figsize: Tuple[float, float] = (8, 6)
file_extension: str = 'png' # pdf, pgf, png
| 4,564
| 34.115385
| 150
|
py
|
myriad
|
myriad-main/myriad/defaults.py
|
# (c) 2021 Nikolaus Howe
from myriad.systems import SystemType
learning_rates = {
SystemType.PENDULUM: {
'eta_x': 1e-1,
'eta_v': 1e-3
},
SystemType.CANCERTREATMENT: { # works for single shooting, 50 controls
'eta_x': 1e-1,
'eta_v': 1e-3
},
SystemType.CARTPOLE: {
'eta_x': 1e-2,
'eta_v': 1e-4
}
}
param_guesses = {
SystemType.BACTERIA: {
'r': 0.8, # 1.
'A': 1.2, # 1.
'B': 2. # 1.
},
SystemType.BEARPOPULATIONS: {
'r': .2, # .1 (true values)
'K': .6, # .75
'm_f': .3, # .5
'm_p': .6 # .5
},
SystemType.BIOREACTOR: {
'D': 0.8, # 1.
'G': 1.2 # 1.
},
SystemType.PENDULUM: {
'g': 15.,
'm': 3.,
'length': 0.5
},
SystemType.CARTPOLE: {
'g': 10., # 9.81
'm1': 1.5, # 1.0
'm2': 0.2, # 0.3
'length': 0.6 # 0.5
},
SystemType.CANCERTREATMENT: {
'r': 0.1, # 0.3
# 'a': 0.1, # NOTE: a is entirely for the cost, so we're not learning it for now
'delta': 0.8 # 0.45
},
SystemType.GLUCOSE: {
'a': 0.5, # 1.
'b': 0.4, # 1.
'c': 0.6 # 1.
},
SystemType.HIVTREATMENT: {
'k': .000044, # 0.000024
'm_1': .01, # 0.02
'm_2': .9, # 0.5
'm_3': 3.4, # 4.4
'N': 250., # 300.
'r': 0.02, # 0.03
's': 11., # 10.
'T_max': 1400. # 1500.
},
SystemType.MOULDFUNGICIDE: {
'r': 0.1, # 0.3
'M': 8. # 10.
},
SystemType.MOUNTAINCAR: {
'power': 0.001, # 0.0015
'gravity': 0.005 # 0.0025
},
SystemType.PREDATORPREY: {
'd_1': 0.15, # 0.1
'd_2': 0.07 # 0.1
},
SystemType.TIMBERHARVEST: {
'k': 0.7 # 1. # .4426
},
SystemType.TUMOUR: {
'xi': 0.06, # 0.084
'b': 4.5, # 5.85
'd': 0.01, # 0.00873
'G': 0.2, # 0.15
'mu': 0.01 # 0.02
},
SystemType.VANDERPOL: {
'a': 0.5 # 1.
}
}
| 1,883
| 19.258065
| 85
|
py
|
myriad
|
myriad-main/myriad/study_scripts.py
|
# (c) 2021 Nikolaus Howe
import jax.numpy as jnp
import matplotlib
import matplotlib.pyplot as plt
import pickle as pkl
from jax.config import config
from pathlib import Path
from myriad.defaults import param_guesses
from myriad.neural_ode.create_node import NeuralODE
from myriad.experiments.mle_sysid import run_mle_sysid
from myriad.experiments.node_mle_sysid import run_node_mle_sysid
from myriad.trajectory_optimizers import get_optimizer
from myriad.systems.neural_ode.node_system import NodeSystem
from myriad.systems import get_name
from myriad.useful_scripts import run_trajectory_opt, run_node_trajectory_opt
from myriad.utils import get_state_trajectory_and_cost
config.update("jax_enable_x64", True)
###############
# Noise study #
###############
def study_noise(hp, cfg, experiment_string='mle_sysid'):
# Parametric, ML
noise_levels = [0.0, 0.001, 0.01, 0.1, 0.2, 0.5, 1., 2., 5.]
param_path = f'params/{hp.system.name}/{experiment_string}/'
plot_path = f'plots/{hp.system.name}/{experiment_string}/'
hp.num_experiments = 1
# Run the sysid
for noise_level in noise_levels:
hp.noise_level = noise_level
if experiment_string == 'mle_sysid':
run_mle_sysid(hp, cfg)
elif experiment_string == 'node_mle_sysid':
run_node_mle_sysid(hp, cfg)
else:
raise Exception("Didn't recognize experiment string")
# Make the loss vs noise plot
costs = []
defects = []
# cfg.plot_results = False
for noise_level in noise_levels:
param_name = f'noise_{noise_level}_smoothed_{hp.to_smooth}_10_3_3'
if experiment_string == 'mle_sysid':
c, d = run_trajectory_opt(hp, cfg, params_path=param_path + param_name + '.p')
elif experiment_string == 'node_mle_sysid':
c, d = run_node_trajectory_opt(hp, cfg, params_path=param_path + param_name + '_exp_0.p')
else:
raise Exception("Unknown experiment string")
costs.append(c)
defects.append(d)
cd_path = f'costs_and_defects/{hp.system.name}/{experiment_string}/'
Path(cd_path).mkdir(parents=True, exist_ok=True)
pkl.dump(noise_levels, open(cd_path + 'noise_levels', 'wb'))
pkl.dump(costs, open(cd_path + 'costs', 'wb'))
pkl.dump(defects, open(cd_path + 'defects', 'wb'))
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
plt.rcParams["figure.figsize"] = (3.7, 3.1)
# Get the cost of the truly optimal trajectory
system = hp.system()
optimizer = get_optimizer(hp, cfg, system)
solution = optimizer.solve()
_, optimal_cost = get_state_trajectory_and_cost(hp, system, system.x_0, solution['u'])
nl = pkl.load(open(cd_path + 'noise_levels', 'rb'))
c = pkl.load(open(cd_path + 'costs', 'rb'))
d = pkl.load(open(cd_path + 'defects', 'rb'))
plt.plot(nl, c)
plt.xlabel('noise level')
plt.ylabel('cost')
plt.axhline(optimal_cost, color='grey', linestyle='dashed')
plt.xlim(0, 5)
plt.grid()
if d[0] is not None:
plt.plot(nl, d)
# plt.title(hp.system.name)
title = get_name(hp)
if title is not None:
plt.suptitle(title)
plt.savefig(plot_path + 'aanoise_study.pgf', bbox_inches='tight')
plt.close()
params_path = f'params/{hp.system.name}/{experiment_string}/guess.p'
pkl.dump(param_guesses[hp.system], open(params_path, 'wb'))
c, d = run_trajectory_opt(hp, cfg, params_path=params_path)
print("c, d", c, d)
def load_system_and_us(hp, cfg, experiment_string, experiment_number):
system = hp.system()
optimizer = get_optimizer(hp, cfg, system)
solution = optimizer.solve()
us = solution['u']
if experiment_string is None:
pass
# system = hp.system()
# optimizer = get_optimizer(hp, cfg, system)
# solution = optimizer.solve()
# us = solution['u']
learned_dynamics = system.dynamics
elif experiment_string == 'mle' or experiment_string == 'e2e':
params_path = f'params/{hp.system.name}/node_{experiment_string}_sysid/'
if experiment_string == 'mle':
params_name = f'noise_{hp.noise_level}_smoothed_{hp.to_smooth}_' \
f'{hp.train_size}_{hp.val_size}_{hp.test_size}_exp_{experiment_number}.p'
else:
params_name = 'node_e2e.p'
node = NeuralODE(hp, cfg)
node.load_params(params_path + params_name)
print("loaded params", params_path + params_name)
system = NodeSystem(node, node.system)
# optimizer = get_optimizer(hp, cfg, system)
# solution = optimizer.solve_with_params(node.params)
# us = solution['u']
def learned_dynamics(x, u, t):
return system.parametrized_dynamics(node.params, x, u, t)
else:
raise Exception("Didn't recognize the experiment string")
return system, learned_dynamics, us
def study_vector_field(hp, cfg, experiment_string=None, experiment_number=0, title=''):
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
plt.rcParams["figure.figsize"] = (4, 3.3)
num_horizontal_arrows = 21
num_vertical_arrows = 15
system, true_dynamics, us = load_system_and_us(hp, cfg, None, 0)
_, learned_dynamics, _ = load_system_and_us(hp, cfg, experiment_string, 0)
opt_xs, c = get_state_trajectory_and_cost(hp, system, system.x_0, us)
# TODO: maybe want to also plot the integrated trajectory
ts_x = jnp.linspace(0, system.T, opt_xs.shape[0])
ts_u = jnp.linspace(0, system.T, us.shape[0])
state_bounds = system.bounds[:hp.state_size].squeeze()
plt.figure(figsize=(4, 4))
if experiment_string is None:
title = 'True Dynamics'
else:
title = 'Learned Dynamics'
plus_title = get_name(hp)
# if plus_title is not None:
# plt.suptitle(plus_title)
plt.suptitle(title + ' – ' + plus_title)
# plt.subplot(2, 1, 1)
xs, ys = jnp.meshgrid(jnp.linspace(0, system.T, num_horizontal_arrows),
jnp.linspace(state_bounds[0], state_bounds[1], num_vertical_arrows))
xs = xs.flatten()
ys = ys.flatten()
times_to_evaluate_at = jnp.linspace(0, system.T, num_horizontal_arrows)
interpolated_us = jnp.interp(times_to_evaluate_at, ts_u, us.flatten())
all_true_dynamics = []
all_learned_dynamics = []
for i, y in enumerate(ys):
all_true_dynamics.append(true_dynamics(y, interpolated_us[i % num_horizontal_arrows],
0)) # dynamics are time independent, so can put 0 here
all_learned_dynamics.append(learned_dynamics(y, interpolated_us[i % num_horizontal_arrows], 0))
vec_true_y = jnp.array(all_true_dynamics)
vec_learned_y = jnp.array(all_learned_dynamics)
vec_x = jnp.ones_like(vec_true_y)
plt.quiver(xs, ys, vec_x, vec_true_y, angles='xy', width=0.003, alpha=0.9, color='blue', label='True Dynamics')
plt.quiver(xs, ys, vec_x, vec_learned_y, angles='xy', width=0.003, alpha=0.9, color='orange',
label='Learned Dynamics')
# Also plot the true dynamics
plt.plot(ts_x, opt_xs, label='True Trajectory', lw=1, ls='--', c='grey')
plt.grid()
plt.ylim(state_bounds)
plt.xlim((0., system.T))
# plt.plot(ts_x, opt_xs, label='State')
# arrow = plt.arrow(0, 0, 0.5, 0.6)
# plt.legend([arrow, ], ['My label', ])
plt.legend(loc='upper right', fontsize=8, title_fontsize=10)
plt.ylabel('state (x)')
# plt.subplot(2, 1, 2)
#
# plt.plot(ts_u, us, label='Control')
# plt.grid()
# plt.xlabel('time (s)')
# plt.ylabel('control (u)')
# # plt.ylim((0., max(us)))
# plt.xlim((0., system.T))
if experiment_string is None:
plot_path = f'plots/{hp.system.name}/true/'
else:
plot_path = f'plots/{hp.system.name}/node_{experiment_string}_sysid/'
Path(plot_path).mkdir(parents=True, exist_ok=True)
plt.tight_layout()
plt.savefig(plot_path + f'{hp.system.name}_{experiment_string}_vector_study.{cfg.file_extension}',
bbox_inches='tight')
plt.close()
| 7,920
| 32.706383
| 113
|
py
|
myriad
|
myriad-main/myriad/__init__.py
|
"""
This library implements in [JAX](https://github.com/google/jax) various real-world environments,
neural ODEs for system identification, and trajectory optimizers for solving the optimal control problem.
"""
# from .config import *
# from .nlp_solvers import *
# from .trajectory_optimizers import *
# from .plotting import *
# from .utils import *
# Exclude from documentation
__pdoc__ = {'trajectory_optimizers.IndirectMethodOptimizer.require_adj': False,
'trajectory_optimizers.TrajectoryOptimizer.require_adj': False,
'trajectory_optimizers.TrapezoidalCollocationOptimizer.require_adj': False,
'trajectory_optimizers.HermiteSimpsonCollocationOptimizer.require_adj': False,
'trajectory_optimizers.MultipleShootingOptimizer.require_adj': False,
'trajectory_optimizers.IndirectMethodOptimizer.solve': False,
'custom_types': False,
'defaults': False,
'probing_numerical_instability': False,
'study_scripts': False,
}
| 1,043
| 40.76
| 105
|
py
|
myriad
|
myriad-main/myriad/neural_ode/data_generators.py
|
# # (c) 2021 Nikolaus Howe
# from __future__ import annotations # for nicer typing
#
# import typing
#
# if typing.TYPE_CHECKING:
# pass
# import jax
# import jax.numpy as jnp
# import numpy as np
# import time
#
# from typing import Optional
#
# from myriad.config import Config, HParams, SamplingApproach
# from myriad.custom_types import Controls, Dataset
# from myriad.utils import integrate_time_independent_in_parallel, smooth
#
#
# def generate_dataset(hp: HParams, cfg: Config,
# given_us: Optional[Controls] = None) -> Dataset:
# system = hp.system()
# hp.key, subkey = jax.random.split(hp.key)
#
# # Generate |total dataset size| control trajectories
# total_size = hp.train_size + hp.val_size + hp.test_size
#
# # TODO: fix what happens in case of infinite bounds
# u_lower = system.bounds[hp.state_size:, 0]
# u_upper = system.bounds[hp.state_size:, 1]
# x_lower = system.bounds[:hp.state_size, 0]
# x_upper = system.bounds[:hp.state_size, 1]
# if jnp.isinf(u_lower).any() or jnp.isinf(u_upper).any():
# raise Exception("infinite control bounds, aborting")
# if jnp.isinf(x_lower).any() or jnp.isinf(x_upper).any():
# raise Exception("infinite state bounds, aborting")
#
# spread = (u_upper - u_lower) * hp.sample_spread
#
# ########################
# # RANDOM WALK CONTROLS #
# ########################
# if hp.sampling_approach == SamplingApproach.RANDOM_WALK:
# # Make all the first states
# all_start_us = np.random.uniform(u_lower, u_upper, (total_size, 1, hp.control_size))
# all_us = all_start_us
#
# for i in range(hp.num_steps):
# next_us = np.random.normal(0, spread, (total_size, 1, hp.control_size))
# rightmost_us = all_us[:, -1:, :]
# together = np.clip(next_us + rightmost_us, u_lower, u_upper)
# all_us = np.concatenate((all_us, together), axis=1)
#
# # elif hp.sampling_approach == SamplingApproach.RANDOM_GRID:
# # single_ascending_controls = np.linspace(u_lower, u_upper, hp.num_steps + 1)
# # parallel_ascending_controls = single_ascending_controls[np.newaxis].repeat(total_size)
# # assert parallel_ascending_controls.shape == ()
# # NOTE: we could also generate data by exhaustively considering every combination
# # of state-control pair up to some discretization. This might just solve
# # the problem. Unfortunately, curse of dimensionality is real.
# # IDEA: let's try doing this on the CANCERTREATMENT domain, and see whether
# # this is enough to help neural ODE figure out what is going on
# # at the very start of planning
#
# ###########################
# # UNIFORM RANDOM CONTROLS #
# ###########################
# elif hp.sampling_approach == SamplingApproach.UNIFORM:
# all_us = jax.random.uniform(subkey, (total_size, hp.num_steps + 1, hp.control_size),
# minval=u_lower, maxval=u_upper) * 0.75 # TODO
# # TODO: make sure having added control size everywhere didn't break things
# #########################
# # AROUND GIVEN CONTROLS #
# #########################
# elif hp.sampling_approach == SamplingApproach.TRUE_OPTIMAL or hp.sampling_approach == SamplingApproach.CURRENT_OPTIMAL:
# if given_us is None:
# print("Since you didn't provide any controls, we'll use a uniform random guess")
# all_us = jax.random.uniform(subkey, (total_size, hp.num_steps + 1, hp.control_size),
# minval=u_lower, maxval=u_upper) * 0.75 # TODO
# # raise Exception("If sampling around a control trajectory, need to provide that trajectory.")
#
# else:
# noise = jax.random.normal(key=subkey, shape=(total_size, hp.num_steps + 1, hp.control_size)) \
# * (u_upper - u_lower) * hp.sample_spread
# all_us = jnp.clip(given_us[jnp.newaxis].repeat(total_size, axis=0).squeeze() + noise.squeeze(), a_min=u_lower,
# a_max=u_upper)
#
# else:
# raise Exception("Unknown sampling approach, please choose among", SamplingApproach.__dict__['_member_names_'])
#
# print("initial controls shape", all_us.shape)
#
# # Smooth the controls if so desired
# if hp.to_smooth:
# start = time.time()
# all_us = smooth(all_us, 2)
# end = time.time()
# print(f"smoothing took {end - start}s")
#
# # TODO: I really dislike having to have this line below. Is there no way to remove it?
# # Make the controls guess smaller so our dynamics don't explode
# # all_us *= 0.1
#
# # Generate the start states
# start_states = system.x_0[jnp.newaxis].repeat(total_size, axis=0)
#
# # Generate the states from applying the chosen controls
# if hp.start_spread > 0.:
# hp.key, subkey = jax.random.split(hp.key)
# start_states += jax.random.normal(subkey,
# shape=start_states.shape) * hp.start_spread # TODO: explore different spreads
# start_states = jnp.clip(start_states, a_min=x_lower, a_max=x_upper)
#
# # Generate the corresponding state trajectories
# _, all_xs = integrate_time_independent_in_parallel(system.dynamics, start_states,
# all_us, hp.stepsize, hp.num_steps,
# hp.integration_method)
#
# # Noise up the state observations
# hp.key, subkey = jax.random.split(hp.key)
# all_xs += jax.random.normal(subkey, shape=all_xs.shape) * (x_upper - x_lower) * hp.noise_level
# all_xs = jnp.clip(all_xs, a_min=x_lower, a_max=x_upper)
#
# # Stack the states and controls together
# xs_and_us = jnp.concatenate((all_xs, all_us), axis=2)
#
# if cfg.verbose:
# print("Generating training control trajectories between bounds:")
# print(" u lower", u_lower)
# print(" u upper", u_upper)
# print("of shapes:")
# print(" xs shape", all_xs.shape)
# print(" us shape", all_us.shape)
# print(" together", xs_and_us.shape)
#
# assert np.isfinite(xs_and_us).all()
# return xs_and_us
#
#
# def yield_minibatches(hp: HParams, total_size: int, dataset: Dataset) -> iter:
# assert total_size <= dataset.shape[0]
#
# tmp_dataset = np.random.permutation(dataset)
# num_minibatches = total_size // hp.minibatch_size + (1 if total_size % hp.minibatch_size > 0 else 0)
#
# for i in range(num_minibatches):
# n = np.minimum((i + 1) * hp.minibatch_size, total_size) - i * hp.minibatch_size
# yield tmp_dataset[i * hp.minibatch_size: i * hp.minibatch_size + n]
#
#
# def sample_x_init(hp: HParams, n_batch: int = 1) -> np.ndarray:
# s = hp.system()
# res = np.random.uniform(s.bounds[:, 0], s.bounds[:, 1], (n_batch, hp.state_size + hp.control_size))
# res = res[:, :hp.state_size]
# assert np.isfinite(res).all()
# return res
#
#
# if __name__ == "__main__":
# hp = HParams()
# cfg = Config()
# dset = generate_dataset(hp, cfg)
# # dset = np.random.rand(100, 5)
# # hp = HParams()
# # for e in yield_minibatches(hp, 91, dset):
# # print(e.shape)
# # pass
# # print(SamplingApproach.__dict__['_member_names_'])
# # hp = HParams()
# # n_batch = 10
# # res = sample_x_init(hp, n_batch)
# # print(res.shape)
# #
# # s = hp.system()
# # lower = s.bounds[:, 0]
# # upper = s.bounds[:, 1]
# # res2 = np.random.uniform(s.bounds[:, 0],
# # s.bounds[:, 1],
# # (n_batch,
# # hp.state_size + hp.control_size)) # keeping as is, though doesn't match our cartpole limits
# # res2 = res2[:, :hp.state_size]
# # print(res2.shape)
#
# # TODO: make a data generator, but with the optimal trajectories instead of random controls
# # def populate_data(hp: HParams, cfg: Config, system_params,
# # n_train, n_val, n_test, seed=0) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
# # np.random.seed(seed)
# # n_data = n_train + n_val + n_test
# # x_init = sample_x_init(hp=hp, n_batch=n_data)
# #
# # system_params = {"x_0": x_init, **system_params}
# #
# # system = hp.system(system_params)
# # optimizer = get_optimizer(hp, cfg, system)
# # solution = optimizer.solve()
# #
# # x = solution['x']
# # u = solution['u']
# # if hp.order == IntegrationOrder.QUADRATIC and hp.optimizer == OptimizerType.COLLOCATION:
# # x_mid = solution['x_mid']
# # u_mid = solution['u_mid']
# # if optimizer.require_adj:
# # adj = solution['adj']
# #
# # num_steps = hp.intervals * hp.controls_per_interval
# # stepsize = system.T / num_steps
# #
# # print("the shapes of x and u are", x.shape, u.shape)
# #
# # #########
# #
# # tau = np.cat((x, u), dim=2).transpose(0, 1)
# # print("tau is", tau.shape)
# # print("now splitting into train, val, and test")
# #
# # train_data = tau[:n_train]
# # val_data = tau[n_train:n_train + n_val]
# # test_data = tau[n_train + n_val:]
# #
# # return train_data, val_data, test_data
| 8,950
| 39.502262
| 124
|
py
|
myriad
|
myriad-main/myriad/neural_ode/node_training.py
|
# (c) Nikolaus Howe 2021
from __future__ import annotations
import haiku as hk
import jax
import jax.numpy as jnp
import optax
import typing
if typing.TYPE_CHECKING:
from myriad.neural_ode.create_node import NeuralODE
from jax.flatten_util import ravel_pytree
from tqdm import trange
from typing import Callable, Optional, Tuple
from myriad.custom_types import Batch, Controls, Cost, Epoch
from myriad.useful_scripts import plan_and_plot, plot_trajectory
from myriad.utils import integrate_time_independent, integrate_time_independent_in_parallel, plan_with_node_model, \
yield_minibatches
# Perform node.hp.num_epochs of minibatched gradient descent.
# Store losses in the "losses" dict. Return the termination epoch.
def train(node: NeuralODE,
start_epoch: Epoch = 0,
also_record_planning_loss: bool = False,
save_as: Optional[str] = None,
extension: Optional[str] = 'png') -> Epoch:
@jax.jit
def loss(params: hk.Params, minibatch: Batch) -> Cost:
# assert jnp.isfinite(minibatch) # had to comment out because of jitting
@jax.jit
def apply_net(x, u):
net_input = jnp.append(x, u)
# print("net input", net_input)
return node.net.apply(params, net_input)
# Extract controls and true state trajectory
controls = minibatch[:, :, node.hp.state_size:]
true_states = minibatch[:, :, :node.hp.state_size]
# Extract starting states
# print("true states", true_states.shape)
start_states = true_states[:, 0, :]
# Use neural net to predict state trajectory
_, predicted_states = integrate_time_independent_in_parallel(apply_net, start_states,
controls, node.stepsize, node.num_steps,
node.hp.integration_method)
return jnp.mean((predicted_states - true_states) * (predicted_states - true_states)) # MSE
# Gradient descent on the loss function in scope
@jax.jit
def update(params: hk.Params, opt_state: optax.OptState, minibatch: Batch) -> Tuple[hk.Params, optax.OptState]:
grads = jax.grad(loss)(params, minibatch)
updates, opt_state = node.opt.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, opt_state
best_val_loss = 10e10
best_params = None
epoch = None
count = 0
print("check loss frequency is", node.hp.loss_recording_frequency)
# train_loss, validation_loss = calculate_losses(node, loss, 0, also_record_planning_loss)
for epoch in trange(node.hp.num_epochs):
overall_epoch = start_epoch + epoch * node.hp.train_size
if epoch % node.hp.loss_recording_frequency == 0:
# As a side-effect, this function also fills the loss lists
# print('calculating losses')
train_loss, validation_loss = calculate_losses(node, loss, overall_epoch, also_record_planning_loss)
print(epoch, train_loss, validation_loss)
# Plot progress so far
if epoch % node.hp.plot_progress_frequency == 0:
if node.cfg.plot and save_as is not None:
print("saving progress plot :)")
plot_progress(node, overall_epoch, save_as, extension)
# Early stopping
if epoch % node.hp.early_stop_check_frequency == 0:
if count >= node.hp.early_stop_threshold:
print(f"Stopping early at epoch {epoch}. Threshold was {node.hp.early_stop_threshold} epochs.")
break
# Update early stopping counts/values
if validation_loss >= best_val_loss:
count += node.hp.early_stop_check_frequency
else:
best_val_loss = validation_loss
best_params = hk.data_structures.to_immutable_dict(node.params)
count = 0
# Descend on entire dataset, in minibatches
# NOTE: when we add new data to the train set, we still only use the same number of
# minibatches to count as an "epoch" (so after the first experiment, when
# we complete an epoch, there will still be unseen data each time)
for mb in yield_minibatches(node.hp, node.hp.train_size, node.train_data):
node.params, node.opt_state = update(node.params, node.opt_state, mb)
# Save the best params
node.params = best_params
if epoch and node.cfg.verbose:
print("Trained for {} epochs on dataset of size {}".format(epoch, node.hp.train_size))
return epoch
def calculate_losses(node: NeuralODE,
loss_fn: Callable[[hk.Params, Batch], float],
overall_epoch: int,
also_record_planning_losses: bool = False) -> Tuple[Cost, Cost]:
# Record how many training points we've used
node.losses['ts'].append(overall_epoch)
# Calculate losses
cur_loss = loss_fn(node.params, next(yield_minibatches(node.hp, node.hp.train_size, node.train_data)))
node.losses['train_loss'].append(cur_loss)
validation_loss = loss_fn(node.params, next(yield_minibatches(node.hp, node.hp.val_size, node.validation_data)))
node.losses['validation_loss'].append(validation_loss)
node.losses['loss_on_opt'].append(loss_fn(node.params, node.true_x_and_u_opt[jnp.newaxis]))
if also_record_planning_losses:
planning_loss, planning_defect, u = calculate_planning_loss(node)
node.losses['control_costs'].append(planning_loss)
if planning_defect is not None:
node.losses['constraint_violation'].append(planning_defect)
# Calculate divergences from the optimal trajectories
node.losses['divergence_from_optimal_us'].append(divergence_from_optimal_us(node, u))
node.losses['divergence_from_optimal_xs'].append(divergence_from_optimal_xs(node, u))
return cur_loss, validation_loss
def calculate_planning_loss(node: NeuralODE) -> Tuple[Cost, Optional[Cost], Controls]:
# Get the optimal controls, and cost of applying them
_, u = plan_with_node_model(node)
_, xs = integrate_time_independent(node.system.dynamics, node.system.x_0, u, node.stepsize, # true dynamics
node.num_steps, node.hp.integration_method)
# We only want the states at boundaries of shooting intervals
xs_interval_start = xs[::node.hp.controls_per_interval]
xs_and_us, unused_unravel = ravel_pytree((xs_interval_start, u))
cost1 = node.optimizer.objective(xs_and_us)
# Calculate the final constraint violation, if present
if node.system.x_T is not None:
cv = node.system.x_T - xs[-1]
# if node.cfg.verbose:
# print("constraint violation", cv)
cost2 = jnp.linalg.norm(cv)
else:
cost2 = None
return cost1, cost2, u
# TODO: jit
# This is the "outer" loss of the problem, one of the main things we care about.
# Another "outer" loss, which gives a more RL flavour,
# is the integral cost of applying controls in the true dynamics,
# and the final constraint violation (if present) when controls in the true dynamics.
def divergence_from_optimal_us(node: NeuralODE, us: Controls) -> Cost:
assert len(us) == len(node.true_opt_us)
return jnp.mean((us - node.true_opt_us) * (us - node.true_opt_us)) # MS
def divergence_from_optimal_xs(node: NeuralODE, us: Controls) -> Cost:
# Get true state trajectory from applying "optimal" controls
_, xs = integrate_time_independent(node.system.dynamics, node.system.x_0, us,
node.stepsize, node.num_steps, node.hp.integration_method)
assert len(xs) == len(node.true_opt_xs)
return jnp.mean((xs - node.true_opt_xs) * (xs - node.true_opt_xs)) # MSE
def plot_progress(node, trained_for, save_as, extension, also_plan=False):
plot_trajectory(node,
optimal=True,
title="Prediction on optimal trajectory after {} epochs".format(trained_for),
save_as=save_as + str(trained_for) + f"_im_opt.{extension}")
plot_trajectory(node,
optimal=False,
title="Prediction on train trajectory after {} epochs".format(trained_for),
save_as=save_as + str(trained_for) + f"_im_train_rand.{extension}")
plot_trajectory(node,
optimal=False,
validation=True,
title="Prediction on validation trajectory after {} epochs".format(trained_for),
save_as=save_as + str(trained_for) + f"_im_val_rand.{extension}")
# Use the network for planning
if also_plan:
plan_and_plot(node,
title="Planning after {} epochs".format(trained_for),
save_as=save_as + str(trained_for) + f"_plan.{extension}")
| 8,523
| 41.40796
| 116
|
py
|
myriad
|
myriad-main/myriad/neural_ode/__init__.py
| 0
| 0
| 0
|
py
|
|
myriad
|
myriad-main/myriad/neural_ode/create_node.py
|
# (c) 2021 Nikolaus Howe
from pathlib import Path
import haiku as hk
import jax
import jax.numpy as jnp
import optax
import pickle as pkl
from dataclasses import dataclass
from jax import config
from typing import Optional
from myriad.config import HParams, Config, SamplingApproach
from myriad.trajectory_optimizers import get_optimizer
from myriad.utils import get_state_trajectory_and_cost, generate_dataset, yield_minibatches
config.update("jax_enable_x64", True)
def make_empty_losses():
return {'ts': [],
'train_loss': [],
'validation_loss': [],
'loss_on_opt': [],
'control_costs': [],
'constraint_violation': [],
'divergence_from_optimal_us': [],
'divergence_from_optimal_xs': []}
##############################
# Neural ODE for opt control #
##############################
@dataclass
class NeuralODE(object):
hp: HParams
cfg: Config
key: jnp.ndarray = jax.random.PRNGKey(42)
mle: bool = True
dataset: Optional[jnp.ndarray] = None
def __post_init__(self) -> None:
self.system = self.hp.system()
self.num_steps = self.hp.intervals * self.hp.controls_per_interval
self.stepsize = self.system.T / self.num_steps # Segment length
# Get the true optimal controls and corresponding trajectory
# real_solver = self.hp.nlpsolver
# self.hp.nlpsolver = NLPSolverType.SLSQP
if self.mle:
# Try to load the optimal trajectories. If they don't exist, solve for them ourselves.
opt_path = f'datasets/{self.hp.system.name}/optimal_trajectories/'
Path(opt_path).mkdir(parents=True, exist_ok=True)
opt_name = f'{self.hp.intervals}_{self.hp.controls_per_interval}_{self.hp.optimizer.name}_' \
f'{self.hp.integration_method.name}_{self.hp.quadrature_rule.name}'
try:
self.true_opt_us = jnp.array(pkl.load(open(f'{opt_path + opt_name}_us', 'rb')))
self.true_opt_xs = jnp.array(pkl.load(open(f'{opt_path + opt_name}_xs', 'rb')))
except FileNotFoundError as e:
print("Didn't find pre-saved optimal trajectories, so calculating our own.")
self.optimizer = get_optimizer(self.hp, self.cfg, self.system)
self.optimal_solution = self.optimizer.solve()
self.true_opt_us = self.optimal_solution['u']
self.true_opt_xs = self.optimal_solution['x']
pkl.dump(self.true_opt_us, open(f'{opt_path + opt_name}_us', 'wb'))
pkl.dump(self.true_opt_xs, open(f'{opt_path + opt_name}_xs', 'wb'))
# TODO: think about quadratic case
# _, self.true_opt_xs = self.integrate(self.true_opt_us)
# print("getting state traj and cost")
self.true_opt_xs, self.true_opt_cost = get_state_trajectory_and_cost(
self.hp, self.system, self.system.x_0, self.true_opt_us)
self.true_x_and_u_opt = jnp.concatenate([self.true_opt_xs, self.true_opt_us], axis=1)
# self.hp.nlpsolver = real_solver
# Create a best guess which we'll update as we plan
self.best_guess_us = None
self.best_guess_us_cost = None
# Record the important info about this node
self.info = f"{self.hp.learning_rate}" \
f"_{self.hp.train_size}" \
f"_{self.hp.val_size}" \
f"_{self.hp.test_size}" \
f"_start_spread_{self.hp.start_spread}" \
f"_{self.hp.minibatch_size}" \
f"_({'_'.join(str(layer) for layer in self.hp.hidden_layers)})" \
f"_{self.hp.sample_spread}" \
f"_{self.hp.noise_level}"
# Generate the (initial) dataset
self.train_data, self.validation_data, self.test_data, self.full_data = self.make_datasets(first_time=True)
# Initialize the parameters and optimizer state
self.net = hk.without_apply_rng(hk.transform(self.net_fn))
mb = next(yield_minibatches(self.hp, self.hp.train_size, self.train_data))
print("node: params initialized with: ", mb[1, 1, :].shape)
self.key, subkey = jax.random.split(self.key) # Always update the NODE's key
self.params = self.net.init(subkey, mb[1, 1, :])
self.opt = optax.adam(self.hp.learning_rate)
self.opt_state = self.opt.init(self.params)
self.losses = make_empty_losses()
if self.cfg.verbose:
print("node: minibatches are of shape", mb.shape)
print("node: initialized network weights")
# The neural net for the neural ode: a small and simple MLP
def net_fn(self, x_and_u: jnp.array) -> jnp.array:
the_layers = []
for layer_size in self.hp.hidden_layers:
the_layers.append(hk.Linear(layer_size))
the_layers.append(jax.nn.sigmoid)
the_layers.append(hk.Linear(len(self.system.x_0)))
mlp = hk.Sequential(the_layers)
return mlp(x_and_u) # will automatically broadcast over minibatches
def save_params(self, filename: str) -> None:
pkl.dump(self.params, open(filename, 'wb'))
def load_params(self, params_pickle: str) -> None:
try:
temp_params = hk.data_structures.to_mutable_dict(pkl.load(open(params_pickle, 'rb')))
print("loaded node params from file")
if 'linear/~/linear' in temp_params:
temp_params['linear_1'] = temp_params['linear/~/linear']
del temp_params['linear/~/linear']
if 'linear/~/linear/~/linear' in temp_params:
temp_params['linear_2'] = temp_params['linear/~/linear/~/linear']
del temp_params['linear/~/linear/~/linear']
self.params = hk.data_structures.to_immutable_dict(temp_params)
except FileNotFoundError as e:
raise e
def load_dataset(self, file_path: str) -> None:
try:
dataset = pkl.load(open(file_path, 'rb'))
dataset = jnp.array(dataset)
self.train_data = dataset[:self.hp.train_size]
self.validation_data = dataset[self.hp.train_size:self.hp.train_size + self.hp.val_size]
self.test_data = dataset[self.hp.train_size + self.hp.val_size:]
self.all_data = dataset
except FileNotFoundError as e:
raise e
def make_datasets(self, first_time=False):
# Generate the new data
if self.hp.sampling_approach == SamplingApproach.CURRENT_OPTIMAL and self.best_guess_us is not None:
all_data = generate_dataset(self.hp, self.cfg, given_us=self.best_guess_us)
else:
all_data = generate_dataset(self.hp, self.cfg)
# Split the new data
train_data = all_data[:self.hp.train_size]
validation_data = all_data[self.hp.train_size:self.hp.train_size + self.hp.val_size]
test_data = all_data[self.hp.train_size + self.hp.val_size:]
# If not first time, add the new data to our existing dataset
if not first_time:
train_data = jnp.concatenate((self.train_data, train_data), axis=0)
validation_data = jnp.concatenate((self.validation_data, validation_data), axis=0)
test_data = jnp.concatenate((self.test_data, test_data), axis=0)
if self.cfg.verbose:
print("Generated training trajectories of shape", train_data.shape)
print("Generated validation trajectories of shape", validation_data.shape)
print("Generated test trajectories of shape", test_data.shape)
return train_data, validation_data, test_data, all_data
def augment_datasets(self):
self.train_data, self.validation_data, self.test_data, self.full_dataset = self.make_datasets(first_time=False)
if __name__ == "__main__":
hp = HParams()
cfg = Config()
my_node = NeuralODE(hp, cfg)
print("my_node", my_node)
| 7,428
| 40.044199
| 115
|
py
|
myriad
|
myriad-main/myriad/nlp_solvers/__init__.py
|
# (c) 2021 Nikolaus Howe
import jax
import jax.numpy as jnp
import time
from cyipopt import minimize_ipopt
from scipy.optimize import minimize
from typing import Dict
from myriad.config import Config, HParams, NLPSolverType
from myriad.defaults import learning_rates
from myriad.utils import get_state_trajectory_and_cost
### Import your new nlp solver here ###
from myriad.nlp_solvers.extra_gradient import extra_gradient
def solve(hp: HParams, cfg: Config, opt_dict: Dict) -> Dict[str, jnp.ndarray]:
"""
Use a the solver indicated in the hyper-parameters to solve the constrained optimization problem.
Args:
hp: the hyperparameters
cfg: the extra hyperparameters
opt_dict: everything needed for the solve
Returns
A dictionary with the optimal controls and corresponding states
(and for quadratic interpolation schemes, the midpoints too)
"""
_t1 = time.time()
opt_inputs = {
'fun': jax.jit(opt_dict['objective']) if cfg.jit else opt_dict['objective'],
'x0': opt_dict['guess'],
'constraints': ({
'type': 'eq',
'fun': jax.jit(opt_dict['constraints']) if cfg.jit else opt_dict['constraints'],
'jac': jax.jit(jax.jacrev(opt_dict['constraints'])) if cfg.jit else jax.jacrev(opt_dict['constraints']),
}),
'bounds': opt_dict['bounds'],
'jac': jax.jit(jax.grad(opt_dict['objective'])) if cfg.jit else jax.grad(opt_dict['objective']),
'options': {"maxiter": hp.max_iter}
}
### Add new nlp solvers to this list ###
if hp.nlpsolver == NLPSolverType.EXTRAGRADIENT:
opt_inputs['method'] = 'exgd'
if hp.system in learning_rates:
opt_inputs['options'] = {**opt_inputs['options'], **learning_rates[hp.system]}
solution = extra_gradient(**opt_inputs)
elif hp.nlpsolver == NLPSolverType.SLSQP:
opt_inputs['method'] = 'SLSQP'
solution = minimize(**opt_inputs)
elif hp.nlpsolver == NLPSolverType.TRUST:
opt_inputs['method'] = 'trust-constr'
solution = minimize(**opt_inputs)
elif hp.nlpsolver == NLPSolverType.IPOPT:
opt_inputs['method'] = 'ipopt'
solution = minimize_ipopt(**opt_inputs)
else:
print("Unknown NLP solver. Please choose among", list(NLPSolverType.__members__.keys()))
raise ValueError
_t2 = time.time()
if cfg.verbose:
print('Solver exited with success:', solution['success'])
print(f'Completed in {_t2 - _t1} seconds.')
system = hp.system()
opt_x, c = get_state_trajectory_and_cost(hp, system, system.x_0, (opt_dict['unravel'](solution['x']))[1])
print('Cost given by solver:', solution['fun'])
print("Cost given by integrating the control trajectory:", c)
if system.x_T is not None:
achieved_last_state = opt_x[-1]
desired_last_state = system.x_T
defect = []
for i, el in enumerate(desired_last_state):
if el is not None:
defect.append(achieved_last_state[i] - el)
print("Defect:", defect)
lmbda = None
if hp.nlpsolver == NLPSolverType.IPOPT:
lmbda = solution.info['mult_g']
elif hp.nlpsolver == NLPSolverType.TRUST:
lmbda = solution['v']
elif hp.nlpsolver == NLPSolverType.EXTRAGRADIENT:
lmbda = solution['v']
# print("the full solution was", solution)
# raise SystemExit
results = {'x': (opt_dict['unravel'](solution['x']))[0],
'u': (opt_dict['unravel'](solution['x']))[1],
'xs_and_us': solution['x'],
'cost': solution['fun']}
if lmbda is not None:
results['lambda'] = lmbda
return results
| 3,500
| 34.363636
| 110
|
py
|
myriad
|
myriad-main/myriad/nlp_solvers/extra_gradient.py
|
# (c) 2021 Nikolaus Howe
import jax.numpy as jnp
from jax import jit, grad
from tensorboardX import SummaryWriter # for parameter tuning
writer = SummaryWriter()
def extra_gradient(fun, x0, method, constraints, bounds, jac, options):
del method, jac
print("we're trying exgd with steps:", options['maxiter'])
constraint_fun = constraints['fun']
max_iter = options['maxiter'] if 'maxiter' in options else 30_000
eta_x = options['eta_x'] if 'eta_x' in options else 1e-1 # primals
eta_v = options['eta_v'] if 'eta_v' in options else 1e-3 # duals
atol = options['atol'] if 'atol' in options else 1e-6 # convergence tolerance
@jit
def lagrangian(x, lmbda):
return fun(x) + lmbda @ constraint_fun(x)
@jit
# We address bounds by clipping
def step(x, lmbda):
x_bar = jnp.clip(x - eta_x * grad(lagrangian, argnums=0)(x, lmbda),
a_min=bounds[:, 0], a_max=bounds[:, 1])
x_new = jnp.clip(x - eta_x * grad(lagrangian, argnums=0)(x_bar, lmbda),
a_min=bounds[:, 0], a_max=bounds[:, 1])
lmbda_new = lmbda + eta_v * grad(lagrangian, argnums=1)(x_new, lmbda)
return x_new, lmbda_new
def solve(x, lmbda):
nonlocal eta_x, eta_v # so we can modify them during solve
success = False
x_old = x + 20 # just so we don't terminate immediately
for i in range(max_iter):
if i % 2000 == 0:
# Tensorboard recording here
writer.add_scalar('loss/fx', fun(x), i)
cur_lag = lagrangian(x, lmbda)
writer.add_scalar('lagrangian/lag', cur_lag, i)
for d, hi in enumerate(constraint_fun(x)):
writer.add_scalar('vars/lambda_{}'.format(d), lmbda[d], i)
writer.add_scalar('constraints/hx_{}'.format(d), hi, i)
# Success
if i % 1000 == 0 and jnp.allclose(x_old, x, rtol=0., atol=atol): # tune tolerance according to need
success = True
break
# Decrease step size
if i % 1000 == 0:
eta_x *= 0.999
eta_v *= 0.999
x_old = x
x, lmbda = step(x, lmbda)
if i % 1000 and (jnp.isnan(x).any() or jnp.isnan(lmbda).any()):
print("WE GOT NANS")
print("cur x", x)
print("cur lmbda", lmbda)
raise SystemExit
writer.close()
return x, lmbda, success
lmbda_init = jnp.ones_like(constraint_fun(x0))
x, lmbda, success = solve(x0, lmbda_init)
# writer.export_scalars_to_json("./all_scalars.json")
return {
'x': x,
'v': lmbda,
'fun': fun(x),
'success': success
}
| 2,531
| 29.878049
| 106
|
py
|
myriad
|
myriad-main/myriad/systems/base.py
|
# (c) 2021 Nikolaus Howe
from abc import ABC
from dataclasses import dataclass
from typing import Mapping, Optional
import jax.numpy as jnp
from myriad.custom_types import Control, Controls, Cost, DState, Params, State, States
@dataclass
class FiniteHorizonControlSystem(object):
"""
Abstract class describing a finite-horizon control system. Model a problem of the form:
.. math::
\\begin{align}
&\\min_u \\quad &&g_T(x_T,u_T,T) + \\int_0^T g(x,u,t) dt \\\\
& \\; \\mathrm{s.t.}\\quad && x'(t) = f(x,u,t) \\\\
& && x(0)=x_0
\\end{align}
"""
x_0: jnp.ndarray
""" State at time 0"""
x_T: Optional[jnp.ndarray]
"""State at time T"""
T: float
"""Duration of trajectory"""
bounds: jnp.ndarray
"""State and control bounds"""
terminal_cost: bool = False
"""Whether or not there is an additional cost added at the end of the trajectory"""
discrete: bool = False
"""Whether or not the system is discrete"""
# def __post_init__(self):
# self.x_0 = self.x_0.astype(jnp.float64)
# if self.x_T is not None:
# assert self.x_0.shape == self.x_T.shape
# self.x_T = self.x_T.astype(jnp.float64)
# assert self.bounds.shape == (self.x_0.shape[0]+1, 2)
# assert self.T > 0
def dynamics(self, x_t: State, u_t: Control) -> DState:
""" The set of equations defining the dynamics of the system. For continuous system, return the vector fields
of the state variables \\(x\\) under the influence of the controls \\(u\\), i.e.:
$$x'(t) = f(x,u,t)$$
Args:
x_t: (State) -- An array, representing the state variables at various time t
u_t: (Control) -- An array, representing the control variables at various time t
Returns:
dx_t: (DState) -- The derivative value of the state variables, x_t, at corresponding time t
"""
raise NotImplementedError
def parametrized_dynamics(self, params: Params, x_t: State, u_t: Control):
"""
Run the system with custom parameters. Override in individual system definition
if you want to use this.
Args:
params: (Params)
x_t: (State)
u_t: (Control)
Returns:
dx_t: (DState)
"""
return self.dynamics(x_t, u_t)
def cost(self, x_t: State, u_t: Control, t: Optional[float]) -> Cost:
""" The instantaneous time function that the system seeks to minimize.
Args:
x_t: (State) -- State variables at time t
u_t: (Control) -- Control variables at time t
t: (float, optional) -- Time parameter
Returns:
cost: (Cost) -- The instantaneous cost \\( g(x_t,u_t,t) \\)
"""
raise NotImplementedError
def parametrized_cost(self, params: Params, x_t: State, u_t: Control, t: Optional[float]):
"""
Run the cost with custom parameters. Override in individual system definition
if you want to use this
Args:
params: (Mapping)
x_t: (State)
u_t: (Control)
t: (optional float)
Returns:
cost: (Cost)
"""
return self.cost(x_t, u_t, t)
# TODO: decide if this should also have a parametrized version
def terminal_cost_fn(self, x_T: State, u_T: Control, T: Optional[float] = None) -> Cost:
""" The cost function associated to the final state
Args:
x_T: (State) -- Final state
u_T: (Control) -- Final control
T: (float) -- The Horizon
Returns:
cost_T: (Cost) -- The terminal cost \\(g_T(x_T,u_T,T\\)
"""
return 0
# def plot_solution(self, x: States, u: Controls) -> None:
# """ The plotting tool for the current system
#
# Args:
# x: State array
# u: Control array
# """
#
# raise NotImplementedError
@dataclass
class IndirectFHCS(FiniteHorizonControlSystem, ABC):
"""
Augment the base class for defining control problem under a finite horizon so that indirect methods can be use.
Model a problem of the form:
.. math::
\\begin{align}
& \\min_u \\quad && g_T(x_T,u_T,T) + \\int_0^T g(x,u,t) dt\\\\
& \\; \\mathrm{s.t.}\\quad && x'(t) = f(x,u,t)\\\\
& &&x(0)=x_0
\\end{align}
Taking into account the adjoint dynamics and the optimal characterization given by the Pontryagin's maximum principle
"""
adj_T: Optional[jnp.ndarray] = None
"""Adjoint at time T"""
guess_a: Optional[float] = None
"""Initial lower guess for secant method"""
guess_b: Optional[float] = None
"""Initial upper guess for secant method"""
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
"""
The adjoint dynamics, given by:
$$\\lambda '(t) = -\\frac{\\partial H}{\\partial x}$$
\\( H \\) being the system Hamiltonian
Args:
adj_t: (jnp.ndarray) -- An array, representing the adjoint variables at various time t
x_t: (jnp.ndarray) -- An array, representing the state variables at various time t
u_t: (jnp.ndarray, optional) -- An array, representing the control variables at various time t
t: (jnp.ndarray, optional) -- The time array, for time-dependent systems
Returns:
d_adj_t: (jnp.ndarray) -- The derivative value of the adjoint variables, \\(\\lambda\\), at corresponding time t
"""
raise NotImplementedError
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
"""
The optimality characterization of the controls w/r to the state and adjoint variables. That is, the controls cannot
be optimal if they don't satisfy:
$$\\frac{\\partial H}{\\partial u} = 0 \\; \\mathrm{at} \\; u^*$$
This leads to the following condition, the optimality characterization, on \\(u^*\\) if \\(H\\) is quadratic in
\\(u\\):
$$u^* = h(x,t)$$
Args:
adj_t: (jnp.ndarray) -- An array, representing the adjoint variables at various time t
x_t: (jnp.ndarray, optional) -- An array, representing the state variables at various time t
t: (jnp.ndarray, optional) -- The time array, for time-dependent systems
Returns:
u_star: (jnp.ndarray) -- Control candidates at corresponding time t that meets the above condition
"""
raise NotImplementedError
| 6,308
| 33.47541
| 121
|
py
|
myriad
|
myriad-main/myriad/systems/__init__.py
|
# (c) 2021 Nikolaus Howe
from enum import Enum
from typing import Union
from .base import FiniteHorizonControlSystem, IndirectFHCS
from myriad.systems.classical_control.cartpole import CartPole
from myriad.systems.classical_control.mountain_car import MountainCar
from myriad.systems.classical_control.pendulum import Pendulum
from myriad.systems.miscellaneous.rocket_landing import RocketLanding
from myriad.systems.miscellaneous.seir import SEIR
from myriad.systems.miscellaneous.tumour import Tumour
from myriad.systems.miscellaneous.van_der_pol import VanDerPol
from myriad.systems.lenhart.bacteria import Bacteria
from myriad.systems.lenhart.bear_populations import BearPopulations
from myriad.systems.lenhart.bioreactor import Bioreactor
from myriad.systems.lenhart.cancer_treatment import CancerTreatment
from myriad.systems.lenhart.epidemic_seirn import EpidemicSEIRN
from myriad.systems.lenhart.harvest import Harvest
from myriad.systems.lenhart.glucose import Glucose
from myriad.systems.lenhart.hiv_treatment import HIVTreatment
from myriad.systems.lenhart.invasive_plant import InvasivePlant
from myriad.systems.lenhart.mould_fungicide import MouldFungicide
from myriad.systems.lenhart.predator_prey import PredatorPrey
from myriad.systems.lenhart.simple_case import SimpleCase
from myriad.systems.lenhart.simple_case_with_bounds import SimpleCaseWithBounds
from myriad.systems.lenhart.timber_harvest import TimberHarvest
class SystemType(Enum):
CARTPOLE = CartPole
VANDERPOL = VanDerPol
SEIR = SEIR
TUMOUR = Tumour
MOUNTAINCAR = MountainCar
PENDULUM = Pendulum
SIMPLECASE = SimpleCase
MOULDFUNGICIDE = MouldFungicide
BACTERIA = Bacteria
SIMPLECASEWITHBOUNDS = SimpleCaseWithBounds
CANCERTREATMENT = CancerTreatment
EPIDEMICSEIRN = EpidemicSEIRN
HARVEST = Harvest
HIVTREATMENT = HIVTreatment
BEARPOPULATIONS = BearPopulations
GLUCOSE = Glucose
TIMBERHARVEST = TimberHarvest
BIOREACTOR = Bioreactor
PREDATORPREY = PredatorPrey
INVASIVEPLANT = InvasivePlant
ROCKETLANDING = RocketLanding
def __call__(self, *args, **kwargs) -> Union[FiniteHorizonControlSystem, IndirectFHCS]:
return self.value(*args, **kwargs)
state_descriptions = {
SystemType.CARTPOLE: [[0, 1, 2, 3], ["Position", "Angle", "Velocity", "Angular velocity"]],
SystemType.SEIR: [[0, 1, 2, 3], ["S", "E", "I", "N"]],
SystemType.TUMOUR: [[0, 1, 2], ["p", "q", "y"]],
SystemType.VANDERPOL: [[0, 1], ["x0", "x1"]],
SystemType.BACTERIA: [[0], ["Bacteria concentration"]],
SystemType.BEARPOPULATIONS: [[0, 1, 2], ["Park population", "Forest population", "Urban population"]],
SystemType.BIOREACTOR: [[0], ["Bacteria concentration"]],
SystemType.CANCERTREATMENT: [[0], ["Normalized tumour density"]],
SystemType.EPIDEMICSEIRN: [[2], ["Susceptible population", "Exposed population",
"Infectious population", "Total population"]],
SystemType.HARVEST: [[0], ["Population mass"]],
SystemType.GLUCOSE: [[0, 1], ["Blood glucose", "Net hormonal concentration"]],
SystemType.HIVTREATMENT: [[0], ["Healthy cells", "Infected cells", "Viral charge"]],
SystemType.INVASIVEPLANT: [[0, 1, 2, 3, 4], ["Focus 1", "Focus 2", "Focus 3", "Focus 4", "Focus 5"]],
SystemType.MOULDFUNGICIDE: [[0], ["Mould population"]],
SystemType.MOUNTAINCAR: [[0, 1], ["Position", "Velocity"]],
SystemType.PENDULUM: [[0, 1], ["Angle", "Angular velocity"]],
SystemType.PREDATORPREY: [[0, 1], ["Predator population", "Prey population"]],
SystemType.ROCKETLANDING: [[0, 1, 2, 3, 4, 5], ["x", "dot x", "y", "dot y", "theta", "dot theta"]],
SystemType.SIMPLECASE: [[0], ["State"]],
SystemType.SIMPLECASEWITHBOUNDS: [[0], ["State"]],
SystemType.TIMBERHARVEST: [[0], ["Cumulative timber harvested"]]
}
# NOTE: the control descriptions are currently not used for plotting
control_descriptions = {
SystemType.CARTPOLE: [[0], ["Force"]],
SystemType.SEIR: [[0], ["Response intensity"]],
SystemType.TUMOUR: [[0], ["Drug strength"]],
SystemType.VANDERPOL: [[0], ["Control"]],
SystemType.BACTERIA: [[0], ["Amount of chemical nutrient"]],
SystemType.BEARPOPULATIONS: [[0, 1], ["Harvesting rate in park", "Harvesting rate in forest"]],
SystemType.BIOREACTOR: [[0], ["Amount of chemical nutrient"]],
SystemType.CANCERTREATMENT: [[0], ["Drug strength"]],
SystemType.EPIDEMICSEIRN: [[0], ["Vaccination rate"]],
SystemType.HARVEST: [[0], ["Harvest rate"]],
SystemType.GLUCOSE: [[0], ["Insulin level"]],
SystemType.HIVTREATMENT: [[0], ["Drug intensity"]],
SystemType.MOULDFUNGICIDE: [[0], ["Fungicide level"]],
SystemType.MOUNTAINCAR: [[0], ["Force"]],
SystemType.PENDULUM: [[0], ["Force"]],
SystemType.PREDATORPREY: [[0], ["Pesticide level"]],
SystemType.ROCKETLANDING: [[0, 1], ["Thrust percent", "Angle"]],
SystemType.SIMPLECASE: [[0], ["Control"]],
SystemType.SIMPLECASEWITHBOUNDS: [[0], ["Control"]],
SystemType.TIMBERHARVEST: [[0], ["Reinvestment level"]]
}
def get_name(hp):
if hp.system == SystemType.CANCERTREATMENT:
title = "Cancer Treatment"
elif hp.system == SystemType.MOUNTAINCAR:
title = 'Mountain Car'
elif hp.system == SystemType.MOULDFUNGICIDE:
title = 'Mould Fungicide'
elif hp.system == SystemType.VANDERPOL:
title = 'Van Der Pol'
elif hp.system == SystemType.PREDATORPREY:
title = 'Predator Prey'
elif hp.system == SystemType.PENDULUM:
title = "Pendulum"
else:
title = None
return title
| 5,439
| 43.590164
| 104
|
py
|
myriad
|
myriad-main/myriad/systems/neural_ode/node_system.py
|
# (c) 2021 Nikolaus Howe
from __future__ import annotations
import typing
if typing.TYPE_CHECKING:
from myriad.neural_ode.create_node import NeuralODE
import jax.numpy as jnp
from myriad.systems.base import FiniteHorizonControlSystem
from myriad.custom_types import Control, Cost, DState, Params, State, Timestep
class NodeSystem(FiniteHorizonControlSystem):
def __init__(self, node: NeuralODE, true_system: FiniteHorizonControlSystem) -> None:
"""
A generic system with NODE dynamics
"""
self.node = node
self.true_system = true_system
super().__init__(
x_0=true_system.x_0,
x_T=true_system.x_T,
T=true_system.T,
bounds=true_system.bounds,
terminal_cost=true_system.terminal_cost
)
# NOTE: for now, only the dynamics is learnable (not the cost)
# True dynamics
def dynamics(self, x_t: State, u_t: Control, t: Timestep = None) -> DState:
return self.true_system.dynamics(x_t, u_t) # TODO: we really should make the dynamics accept a t
# Neural ODE dynamics
def parametrized_dynamics(self, params: Params, x_t: State, u_t: Control, t: Timestep = None) -> DState:
x_and_u = jnp.append(x_t, u_t)
return self.node.net.apply(params, x_and_u)
# True cost
def cost(self, x_t: State, u_t: Control, t: Timestep = None) -> Cost:
return self.true_system.cost(x_t, u_t, t)
| 1,363
| 30.72093
| 106
|
py
|
myriad
|
myriad-main/myriad/systems/neural_ode/__init__.py
| 0
| 0
| 0
|
py
|
|
myriad
|
myriad-main/myriad/systems/miscellaneous/tumour.py
|
import jax.numpy as jnp
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from myriad.custom_types import Params
from myriad.systems import FiniteHorizonControlSystem
class Tumour(FiniteHorizonControlSystem):
"""
Tumour anti-angiogenesis model, from [Practical Methods for Optimal Control Using Nonlinear Programming (Third Edition, Chapter 10.70)](https://my.siam.org/Store/Product/viewproduct/?ProductId=31657301).
More details can be found in [Ledzewicz and Schattler](https://www.siue.edu/~uledzew/papers/angioMTNS.pdf)
The model describes the growth of a tumor that needs its own blood vessels to continue to grow. Endothelial cells
provide lining for newly forming blood vessels and as such, can be inhibited to reduce the tumor growth. The model can
be described as:
.. math::
\\begin{align}
& \\min_{u} \\quad && p(T) \\\\
& \\; \\mathrm{s.t.}\\quad && p'(t) = -\\xi p \\ln(\\frac{p}{q}) \\\\
& && q'(t) = bp - (\\mu + dp^{\\frac{2}{3}}) q - G u q \\\\
& && y'(t) = u \\\\
& && p(0) = p_0 ,\\; q(0) = q_0 ,\\; y(0) = 0 \\\\
& && 0 <= p(t) ,\\; 0 <= q(t) ,\\; 0 <= y(t) <= A ,\\; 0 <= u(t) <= a \\\\
\\end{align}
Notes
-----
\\(p(t)\\): The size of the tumor \n
\\(q(t)\\): The amount of vascular endothelial cells \n
\\(u(t)\\): Angionesic dose rate; an external inhibitor decreasing \\(q(t)\\) \n
\\(y(t)\\) : A measure of the total amount of external inhibitor used \n
\\(a\\): Instantaneous limit over the inhibitor that can be administered \n
\\(A\\): Total limit over the inhibitor that can be administered \n
\\(\\xi\\): Tumor growth parameter \n
\\(\\mu\\): Loss rate of endothelial cells from natural causes \n
\\(b\\): Birth rate of endothelial cells from stimulation by the tumor \n
\\(d\\): Death rate of endothelial cells from inhibition by the tumor
"""
def __init__(self, xi=0.084, b=5.85, d=0.00873, G=0.15, mu=0.02):
# Learnable parameters
self.xi = xi # per day (tumour growth)
self.b = b # per day (birth rate)
self.d = d # per mm^2 per day (death rate)
self.G = G # kg per mg of dose per day (antiangiogenic killing)
self.mu = mu # per day (loss of endothelial cells due to natural causes)
t_F = 1.2 # days
# State and Control Bounds
a = 75 # maximum instantaneous dosage
A = 15 # maximum cumulative dosage
p_ = q_ = ((self.b - self.mu) / self.d) ** (3 / 2) # asymptotically stable focus
# Initial State
p_0 = p_ / 2 # Initial tumour volume
q_0 = q_ / 4 # Initial vascular capacity
y_0 = 0 # Initial cumulative dosage
assert p_0 >= q_0 # condition for well-posed problem
super().__init__(
x_0=jnp.array([p_0, q_0, y_0]),
x_T=None,
T=t_F,
bounds=jnp.array([
[0., p_], # p
[0., q_], # q
[0., A], # y
[0., a], # control
]),
terminal_cost=True,
)
def dynamics(self, x_t: jnp.ndarray, u_t: float, t: float = None) -> jnp.ndarray:
p, q, y = x_t
_p = jnp.squeeze(-self.xi * p * jnp.log(p / q))
_q = jnp.squeeze(q * (self.b - (self.mu + self.d * p ** (2 / 3) + self.G * u_t)))
_y = jnp.squeeze(u_t)
return jnp.asarray([_p, _q, _y])
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: float, t: float = None) -> jnp.ndarray:
xi = params['xi']
b = params['b']
d = params['d']
mu = params['mu']
G = params['G']
p, q, y = x_t
_p = jnp.squeeze(-xi * p * jnp.log(p / q))
_q = jnp.squeeze(q * (b - (mu + d * p ** (2 / 3) + G * u_t)))
_y = jnp.squeeze(u_t)
return jnp.asarray([_p, _q, _y])
def cost(self, x_t: jnp.ndarray, u_t: float, t: float = None) -> float:
# nh: I think this should be changed to u^2, otherwise there
# is no penalty for oscillating in u
# return u_t * u_t
return 0.
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: float, t: float = None) -> float:
return 0. # nothing to learn here
def terminal_cost_fn(self, x_T: jnp.ndarray, u_T: jnp.ndarray, T: jnp.ndarray = None) -> float:
p, q, y = x_T
return p
# def plot_solution(self, x: jnp.ndarray, u: jnp.ndarray) -> None:
# colnames = ['p', 'q', 'y']
# x = pd.DataFrame(x, columns=colnames)
#
# sns.set(style='darkgrid')
# plt.figure(figsize=(10, 3))
# ts_x = jnp.linspace(0, self.T, x.shape[0])
# ts_u = jnp.linspace(0, self.T, u.shape[0])
#
# for idx, title in enumerate(colnames):
# plt.subplot(1, 4, idx+1)
# plt.title(title)
# plt.plot(ts_x, x[title])
# plt.xlabel('time (days)')
#
# plt.subplot(1, 4, 4)
# plt.title('u')
# plt.step(ts_u, u, where="post")
# plt.xlabel('time (days)')
#
# plt.tight_layout()
# plt.show()
| 4,802
| 35.386364
| 205
|
py
|
myriad
|
myriad-main/myriad/systems/miscellaneous/seir.py
|
import jax.numpy as jnp
from myriad.systems import FiniteHorizonControlSystem
class SEIR(FiniteHorizonControlSystem):
"""
SEIR epidemic model for COVID-19, inspired by [Perkins and Espana, 2020](https://link.springer.com/article/10.1007/s11538-020-00795-y).
This model is an adaptation of SEIR models, specifically tailored to COVID-19 epidemic trying to limit the spread
via non-pharmaceutical interventions (example: reducing contacts between individuals). As such, the control variable
( \\(u(t)\\) ) is a reduction in the transmission coefficient ( \\( \\beta\\) ) resulting from all societal measures
that allow to control the virus spread. The goal of the model is to help decision-maker quantify the impact of
policies limiting the spread.
The formal model is given by:
.. math::
\\begin{align}
& \\min_{u} \\quad && \\int_0^T D(t)^2 + cu(t)^2 dt \\\\
& \\; \\mathrm{s.t.}\\quad && S'(t) = \\mu - (\\delta + \\beta(1-u(t))(\\alpha A(t) + I(t) + H(t))
+ \\iota + \\nu)S(t) \\\\
& && E'(t) = ( \\beta(1-u(t))(\\alpha A(t) + I(t) + H(t))) (S(t) + (1-\\epsilon)V(t)) + \\iota S(t)
- (\\delta + \\rho) E(t) \\\\
& && A'(t) = (1-\\sigma)\\rho E(t) - (\\delta + \\gamma) A(t) \\\\
& && I'(t) = \\sigma \\rho E(t) - (\\delta + \\gamma)I(t) \\\\
& && H'(t) = \\gamma \\kappa I(t) - (\\delta + \\eta) H(t) \\\\
& && V'(t) = \\nu S(t) - (\\delta + \\beta(1 -u(t))(\\alpha A(t) + I(t) + H(t)) (1-\\epsilon)) V(t) \\\\
& && S(0) = S_0 ,\\; E(0) = E_0 ,\\; A(0) = A_0 ,\\; I(0) = I_0 ,\\; H(0) = H_0 ,\\; V(0)=V_0 \\\\
\\end{align}
Notes
-----
\\(D(t)\\): Population death from covid-19, estimated as a ratio of hospitalized population \\(H(t)\\) \n
\\(u(t)\\): Cumulative impact of societal measures (reduction) on the transmission coefficient \n
\\(c\\) : Parameter weighting the cost of societal measures relative to the death toll \n
\\(S(t)\\): Population susceptible to infection \n
\\(E(t)\\): Exposed population but not yet infectious \n
\\(A(t)\\): Infected population but asymptomatic \n
\\(I(t)\\): Infected population and symptomatic \n
\\(H(t)\\): Hospitalized population \n
\\(V(t)\\): Vaccinated population that has not been infected \n
Other constants: See table 2 page 4 of [Perkins and Espana, 2020](https://link.springer.com/content/pdf/10.1007/s11538-020-00795-y.pdf)
"""
def __init__(self):
self.b = 0.525
self.d = 0.5
self.c = 0.0001
self.e = 0.5
self.g = 0.1
self.a = 0.2
self.S_0 = 1000.0
self.E_0 = 100.0
self.I_0 = 50.0
self.R_0 = 15.0
self.N_0 = self.S_0 + self.E_0 + self.I_0 + self.R_0
self.A = 0.1
self.M = 1000
super().__init__(
x_0=jnp.array([self.S_0, self.E_0, self.I_0, self.N_0]),
x_T=None,
T=20,
bounds=jnp.array([
# [-jnp.inf, jnp.inf],
# [-jnp.inf, jnp.inf],
# [-jnp.inf, jnp.inf],
# [-jnp.inf, jnp.inf],
[0., 2000.], # Chosen by observation
[0., 250.], # "
[0., 250.], # "
[0., 3000.], # "
[0., 1.],
]),
terminal_cost=False,
)
def dynamics(self, y_t: jnp.ndarray, u_t: float, t: float = None) -> jnp.ndarray:
S, E, I, N = y_t
s_dot = jnp.squeeze(self.b*N - self.d*S - self.c*S*I - u_t*S)
e_dot = jnp.squeeze(self.c*S*I - (self.e+self.d)*E)
i_dot = jnp.squeeze(self.e*E - (self.g+self.a+self.d)*I)
n_dot = jnp.squeeze((self.b-self.d)*N - self.a*I)
y_t_dot = jnp.array([s_dot, e_dot, i_dot, n_dot])
return y_t_dot
def cost(self, y_t: jnp.ndarray, u_t: float, t: float = None) -> float:
return self.A * y_t[2] + u_t ** 2
# def plot_solution(self, x: jnp.ndarray, u: jnp.ndarray) -> None:
# sns.set()
# plt.figure(figsize=(12, 2.5))
# ts_x = jnp.linspace(0, self.T, x.shape[0])
# ts_u = jnp.linspace(0, self.T, u.shape[0])
#
# plt.subplot(151)
# plt.title('applied control')
# plt.plot(ts_u, u)
# plt.ylim(-0.1, 1.01)
#
# for idx, title in enumerate(['S', 'E', 'I', 'N']):
# plt.subplot(1, 5, idx+2)
# plt.title(title)
# plt.plot(ts_x, x[:, idx])
#
# plt.tight_layout()
# plt.show()
| 4,207
| 35.591304
| 137
|
py
|
myriad
|
myriad-main/myriad/systems/miscellaneous/rocket_landing.py
|
# (c) 2021 Nikolaus Howe
import jax
import jax.numpy as jnp
from typing import Optional
from myriad.custom_types import Control, Cost, DState, Params, State, Timestep
from myriad.systems import FiniteHorizonControlSystem
class RocketLanding(FiniteHorizonControlSystem):
"""
Simulate a starship landing! Inspired by Thomas Godden's [medium post](https://thomas-godden.medium.com/how-spacex-lands-starship-sort-of-ee96cdde650b).
This environment models a rocket trying to land vertically on a flat surface, in a similar fashion to how [SpaceX are
landing their reusable rockets](https://youtu.be/Aq7rDQx9jns?t=20). Usually, the rocket is free-falling
from an initial horizontal position and must uses its thruster ( \\(u_0(t), u_1(t)\\) ) to both rotate the craft and
slow down the fall. The goal is to achieve the desired end state while minimizing the fuel usage (minimizing thrust)
and the angular velocity in order to limit the strain on the vehicle.
A simplified version of this task form can be modeled as:
.. math::
\\begin{align}
& \\min_{u} \\quad && \\int_0^T u_0(t)^2 + u_1(t)^2 + \\phi'(t)^2 dt \\\\
& \\; \\mathrm{s.t.}\\quad && x_0''(t) = \\frac{F_v * u_0(t) * \\sin(u_1(t) + \\phi)}{m} \\\\
& && x_1''(t) = \\frac{F_v * u_0(t) * \\cos(u_1(t) + \\phi)}{m} - g \\\\
& && \\phi''(t) = \\frac{-6}{F_v * u_0(t) * \\sin(u_1(t)) * m * l} \\\\
& && x_0(0) = x_0'(0) = 0 ,\\; x_1(0) = h_i ,\\; x_1'(0) = v_i ,\\; \\phi(0) = -\\pi/2 ,\\; \\phi'(0)=0\\\\
& && x_0(T) = x_0'(T) = x_1(T) = x_1'(T) = \\phi(T) = \\phi'(T) = 0 \\\\
& && -1 <= u_0(t) <= 1 \\\\
& && -F_g <= u_1(t) <= F_g
\\end{align}
Notes
-----
\\(x_0\\): Horizontal position of the rocket \n
\\(x_0'\\): Horizontal velocity of the rocket \n
\\(x_1\\): Height of the rocket \n
\\(x_1'\\): Falling velocity of the rocket \n
\\(\\phi\\): Angle of the rocket \n
\\(\\phi'\\): Angular velocity of the rocket \n
\\(u_0\\): The vertical thrust, as a ratio of the maximal thrust \\(F_v\\) \n
\\(u_1\\): The [gimbaled thrust](https://en.wikipedia.org/wiki/Gimbaled_thrust) \n
\\(F_v\\): Maximal thrust \n
\\(F_g\\): Maximal gimbaled thrust \n
\\(g\\): Gravity force \n
\\(m\\): Total mass of the rocket \n
\\(l\\): Length of the rocket \n
\\(h_i, v_i\\): Initial height and falling speed \n
\\(T\\): The horizon
"""
# TODO: think about this http://larsblackmore.com/losslessconvexification.htm
def __init__(self, g: float = 9.8, m: float = 100_000, length: float = 50, width: float = 10) -> None:
self.g = g # m/s^2
self.m = m # kg
self.length = length # m
self.width = width # m
self.min_thrust = 880 * 1000 # N
self.max_thrust = 1 * 2210 * 1000 # kN
# Inertia for a uniform density rod
self.I = 1 / 12 * m * length ** 2
deg_to_rad = 0.01745329
self.max_gimble = 20 * deg_to_rad
self.min_gimble = -self.max_gimble
self.min_percent_thrust = 0.4
self.max_percent_thrust = 1.
# x[0] = x position (m)
# x[1] = x velocity (m/s)
# x[2] = y position (m)
# x[3] = y velocity (m/s)
# x[4] = angle (rad)
# x[5] = angular velocity (rad/s)
# u[0] = thrust (percent)
# u[1] = thrust angle (rad)
super().__init__(
x_0=jnp.array([0., 0., 1000., -80., -jnp.pi / 2., 0.]),
x_T=jnp.array([0., 0., 0., 0., 0., 0.]),
T=16., # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1, ...) are given first,
[-250., 150.], # followed by bounds over controls (u_0, u_1, ...)
[-250., 150.],
[0., 1000.],
[-250., 150.],
[-2 * jnp.pi, 2 * jnp.pi],
[-250., 150.],
[self.min_percent_thrust, self.max_percent_thrust],
[self.min_gimble, self.max_gimble],
]),
terminal_cost=False,
)
def dynamics(self, x_t: State, u_t: Control, t: Optional[Timestep] = None) -> DState:
theta = x_t[4]
thrust = u_t[0]
thrust_angle = u_t[1]
# Horizontal force
F_x = self.max_thrust * thrust * jnp.sin(thrust_angle + theta)
x_dot = x_t[1]
x_dotdot = F_x / self.m
# Vertical force
F_y = self.max_thrust * thrust * jnp.cos(thrust_angle + theta)
y_dot = x_t[3]
y_dotdot = F_y / self.m - self.g
# Torque
T = -self.length / 2 * self.max_thrust * thrust * jnp.sin(thrust_angle)
theta_dot = x_t[5]
theta_dotdot = T / self.I
return jnp.array([x_dot, x_dotdot, y_dot, y_dotdot, theta_dot, theta_dotdot])
def cost(self, x_t: State, u_t: Control, t: Optional[Timestep] = None) -> Cost:
return u_t[0] ** 2 + u_t[1] ** 2 + 2 * x_t[5] ** 2
| 4,647
| 36.184
| 154
|
py
|
myriad
|
myriad-main/myriad/systems/miscellaneous/__init__.py
| 0
| 0
| 0
|
py
|
|
myriad
|
myriad-main/myriad/systems/miscellaneous/van_der_pol.py
|
import jax.numpy as jnp
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from myriad.custom_types import Params
from myriad.systems import FiniteHorizonControlSystem
class VanDerPol(FiniteHorizonControlSystem):
"""
Driven Van der Pol oscillator, from [CasADi](http://casadi.sourceforge.net/v1.8.0/users_guide/html/node8.html).
This model tries to drive a [Van de Pol oscillator](https://arxiv.org/pdf/0803.1658.pdf) to the origin and can
formally described as:
.. math::
\\begin{align}
& \\min_{u} \\quad && \\int_0^{10} x_0(t)^2 + x_1(t)^2 + u(t)^2 dt \\\\
& \\; \\mathrm{s.t.}\\quad && x_0'(t) = a (1 - x_1(t)^2) x_0(t) - x_1(t) + u(t) \\\\
& && x_1'(t) = x_0(t) \\\\
& && x_0(0) = 0 ,\\; x_1(0) = 1 \\\\
& && x_0(10) = x_1(10) = 0 \\\\
& && -0.75 <= u_0(t) <= 1.0 \\\\
\\end{align}
"""
def __init__(self, a=1.):
self.a = a
super().__init__(
x_0=jnp.array([0., 1.]),
x_T=jnp.zeros(2),
T=10.0,
bounds=jnp.array([
# [-jnp.inf, jnp.inf], # state 1
# [-jnp.inf, jnp.inf], # state 2
[-4., 4.], # state 1 (from observation)
[-4., 4.], # state 2 (from observation)
[-0.75, 1.0], # control
]),
terminal_cost=False,
)
def dynamics(self, x_t: jnp.ndarray, u_t: float, t: float = None) -> jnp.ndarray:
x0, x1 = x_t
_x0 = jnp.squeeze(self.a * (1. - x1 ** 2) * x0 - x1 + u_t)
_x1 = jnp.squeeze(x0)
return jnp.asarray([_x0, _x1])
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: float, t: float = None) -> jnp.ndarray:
a = params['a']
x0, x1 = x_t
_x0 = jnp.squeeze(a * (1. - x1 ** 2) * x0 - x1 + u_t)
_x1 = jnp.squeeze(x0)
return jnp.asarray([_x0, _x1])
def cost(self, x_t: jnp.ndarray, u_t: float, t: float = None) -> float:
return x_t.T @ x_t + u_t ** 2
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: float, t: float = None) -> float:
return x_t.T @ x_t + u_t ** 2 # nothing to learn here!
# def plot_solution(self, x: jnp.ndarray, u: jnp.ndarray) -> None:
# x = pd.DataFrame(x, columns=['x0', 'x1'])
#
# sns.set(style='darkgrid')
# plt.figure(figsize=(9, 4))
# ts_u = jnp.linspace(0, self.T, u.shape[0])
#
# plt.subplot(1, 2, 1)
# plt.plot(x['x0'], x['x1'])
#
# plt.subplot(1, 2, 2)
# plt.step(ts_u, u, where="post")
# plt.xlabel('time (s)')
#
# plt.tight_layout()
# plt.show()
| 2,496
| 29.82716
| 113
|
py
|
myriad
|
myriad-main/myriad/systems/classical_control/cartpole.py
|
# (c) 2021 Nikolaus Howe
import jax.numpy as jnp
from typing import Optional
from myriad.systems.base import FiniteHorizonControlSystem
from myriad.custom_types import Control, Cost, DState, Params, State, Timestep
class CartPole(FiniteHorizonControlSystem):
"""
Cart-pole swing-up, from [(Kelly, 2017)](https://epubs.siam.org/doi/10.1137/16M1062569).
This environment models a cart moving in a unidimensional direction with a pendulum hanging freely from it.
The usually associated task is to move the cart in such a way that the pendulum swings up to the non-stationary
equilibrium point above the cart.
The goal is to find the cart velocity ( \\(q_1'(t)\\) ) that will impede
an angular velocity of the pendulum ( \\(q_2'(t)\\) ) that achieves the task, while minimising the force
( \\(u(t)\\) ) needed to generate such a movement. The system to solve is:
.. math::
\\begin{align}
& \\min_{u} \\quad && \\int_0^T u(t)^2 dt \\\\
& \\; \\mathrm{s.t.}\\quad && q_1''(t) = \\frac{l m_2 \\sin(q_2(t)) q_2^2(t)' + u(t) + m_2 g \\cos(q_2(t)) \\sin(q_2(t))}
{m_1 + m_2 (1-\\cos^2 (q_2(t)))}\\\\
& && q_2''(t) = - \\frac{l m_2 \\cos(q_2(t))\\sin(q_2(t)) q_2^2(t) + u(t) \\cos(q_2(t))
+ (m_1 +m_2)g \\sin(q_2(t))}{l m_1 + l m_2 (1 - \\cos^2(q_2(t))} \\\\
& && q_1(0)=q_2(0)=q_1'(0)=q_2'(0)=0 \\\\
& && q_1(T) = d ,\\; q_2(T) = \\pi ,\\; q_1'(T)=q_2'(T)=0 \\\\
& && -d_M <= q_1(t) <= d_M ,\\; -u_M <= u(t) <= u_M
\\end{align}
Notes
-----
\\(q_1\\): Position of the cart \n
\\(q_2\\): Angle of the pole \n
\\(q_1'\\): Velocity of the cart \n
\\(q_2'\\): Angular velocity of the pole \n
\\(m_1\\): Mass of the cart \n
\\(m_2\\): Mass of the pendulum \n
\\(l\\): Length of the pole \n
\\(g\\): Gravity force \n
\\(d_M\\): Maximal distance that can be traveled by the cart \n
\\(u_M\\): Maximal force that can be applied to the motor \n
\\(T\\): The horizon
"""
def __init__(self, g: float = 9.81, m1: float = 1., m2: float = .3, length: float = 0.5):
# Physical parameters for the cart-pole example (Table 3)
self.m1 = m1 # kg mass of cart
self.m2 = m2 # kg mass of pole
self.length = length # m pole length
self.g = g # m/s^2 gravity acceleration
self.u_max = 20 # N maximum actuator force
self.d_max = 2.0 # m extent of the rail that cart travels on
self.d = 1.0 # m distance traveled during swing-up
super().__init__(
x_0=jnp.array([0., 0., 0., 0.]), # Starting state (Eq. 6.9)
x_T=jnp.array([self.d, jnp.pi, 0., 0.]), # Ending state (Eq. 6.9)
T=2.0, # s duration of swing-up,
bounds=jnp.array([
[-self.d_max, self.d_max], # Eq. 6.7
[-2 * jnp.pi, 2 * jnp.pi],
[-5., 5.], # Observed from optimal plot, taken as reasonable
[-10., 10.],
[-self.u_max, self.u_max], # Control bounds (Eq. 6.8)
]),
terminal_cost=False,
)
# Cart-Pole Example: System Dynamics (Section 6.1)
def dynamics(self, x_t: State, u_t: Control, t: Optional[Timestep] = None) -> DState:
x, theta, dx, dtheta = x_t
# Eq. 6.1
ddx = ((self.length * self.m2 * jnp.sin(theta) * dtheta ** 2 + u_t + self.m2 * self.g * jnp.cos(theta) * jnp.sin(theta))
/ (self.m1 + self.m2 * (1 - jnp.cos(theta) ** 2)))
ddx = jnp.squeeze(ddx)
# Eq. 6.2
ddtheta = - ((self.length * self.m2 * jnp.cos(theta) * dtheta ** 2 + u_t * jnp.cos(theta)
+ (self.m1 + self.m2) * self.g * jnp.sin(theta))
/ (self.length * self.m1 + self.length * self.m2 * (1 - jnp.cos(theta) ** 2)))
ddtheta = jnp.squeeze(ddtheta)
return jnp.array([dx, dtheta, ddx, ddtheta])
def parametrized_dynamics(self, params: Params, x_t: State, u_t: Control, t: Optional[Timestep] = None) -> DState:
g = jnp.abs(params['g']) # convert negative values to positive ones
m1 = jnp.abs(params['m1'])
m2 = jnp.abs(params['m2'])
length = jnp.abs(params['length'])
x, theta, dx, dtheta = x_t
# Eq. 6.1
ddx = ((length * m2 * jnp.sin(theta) * dtheta ** 2 + u_t + m2 * g * jnp.cos(theta) * jnp.sin(theta))
/ (m1 + m2 * (1 - jnp.cos(theta) ** 2)))
ddx = jnp.squeeze(ddx)
# Eq. 6.2
ddtheta = - ((length * m2 * jnp.cos(theta) * dtheta ** 2 + u_t * jnp.cos(theta)
+ (m1 + m2) * g * jnp.sin(theta))
/ (length * m1 + length * m2 * (1 - jnp.cos(theta) ** 2)))
ddtheta = jnp.squeeze(ddtheta)
return jnp.array([dx, dtheta, ddx, ddtheta])
def cost(self, x_t: State, u_t: Control, t: Timestep = None) -> Cost:
# Eq. 6.3
return u_t ** 2
def parametrized_cost(self, params: Params, x_t: State, u_t: Control, t: Optional[Timestep]) -> Cost:
return self.cost(x_t, u_t, t)
# def plot_solution(self, x: jnp.ndarray, u: jnp.ndarray) -> None:
# x = pd.DataFrame(x, columns=['q1', 'q2', 'q̈1', 'q̈2'])
#
# # Plot optimal trajectory (Figure 10)
# sns.set(style='darkgrid')
# plt.figure(figsize=(9, 6))
# ts_x = jnp.linspace(0, self.T, x.shape[0])
# ts_u = jnp.linspace(0, self.T, u.shape[0])
#
# plt.subplot(3, 1, 1)
# plt.ylabel('position (m)')
# plt.xlim(0, 2.01)
# plt.ylim(0, 1.5)
# plt.plot(ts_x, x['q1'], '-bo', clip_on=False, zorder=10)
#
# plt.subplot(3, 1, 2)
# plt.ylabel('angle (rad)')
# plt.plot(ts_x, x['q2'], '-bo', clip_on=False, zorder=10)
# plt.xlim(0, 2.01)
# plt.ylim(-2, 4)
#
# plt.subplot(3, 1, 3)
# plt.ylabel('force (N)')
# # plt.plot(ts_u, u, '-bo', clip_on=False, zorder=10)
# plt.step(ts_u, u, where="post", clip_on=False)
# plt.xlim(0, 2.01)
# plt.ylim(-20, 11)
#
# plt.xlabel('time (s)')
# plt.tight_layout()
# plt.show()
| 5,799
| 39.277778
| 127
|
py
|
myriad
|
myriad-main/myriad/systems/classical_control/mountain_car.py
|
# (c) 2021 Nikolaus Howe
import jax
import jax.numpy as jnp
from typing import Optional
from myriad.custom_types import Control, Cost, DState, Params, State, Timestep
from myriad.systems.base import FiniteHorizonControlSystem
def hill_function(x: float) -> float:
# return jnp.max(jnp.array([-3 * x - jnp.pi, -1/3 * jnp.cos(3 * x), 3 * x]))
return 0.5 * x * x
class MountainCar(FiniteHorizonControlSystem):
"""
Continuous Mountain Car environment, inspired by the [OpenAI gym environment](https://github.com/openai/gym/blob/master/gym/envs/classic_control/continuous_mountain_car.py).
Model was originally described in [Andrew Moore's PhD Thesis (1990)](https://www.cl.cam.ac.uk/techreports/UCAM-CL-TR-209.pdf).
This environment model a unidimensional car located between two hills, while the goal is often to make it to the top
of one of the hill. Usually, this environment is made challenging by limiting the force ( \\(u(t)\\) ) the car can
generate, making it unable to climb directly to the desired steep hill top. In this scenario, the solution
is to first climb the opposite hill in order to generate enough potential energy to make it on top of the desired hill.
The system can formally be described as:
.. math::
\\begin{align}
& \\min_{u} \\quad && \\int_0^T u(t)^2 dt \\\\
& \\; \\mathrm{s.t.}\\quad && x'(t) = p u(t) - g h'(x) \\\\
& && x(0) = x_i ,\\; x'(0) = v_i \\\\
& && x(T) = x_f ,\\; x'(T) = v_f \\\\
& && -1 <= u(t) <= 1
\\end{align}
Notes
-----
\\(x\\): Position of the car \n
\\(x'\\): Velocity of the car \n
\\(p\\): Maximal power that the car engine can output \n
\\(u\\): The force applied to the car, as a fraction of \\(p\\) \n
\\(g\\): Gravity force \n
\\(h(x)\\): Function describing the hill landscape \n
\\(x_i, v_i\\): Initial position and speed \n
\\(x_f, v_f\\): Goal position and speed \n
\\(T\\): The horizon
"""
def __init__(self, power=0.0015, gravity=0.0025) -> None:
self.min_action = -1.0
self.max_action = 1.0
self.min_position = -1.2
self.max_position = 0.6
self.max_speed = 0.07
self.start_position = -0.1
self.start_velocity = 0.
self.goal_position = 0.45 # was 0.5 in gym, 0.45 in Arnaud de Broissia's version
self.goal_velocity = 0
# self.power = 0.0015
# self.gravity = 0.0025
self.power = power
self.gravity = gravity
super().__init__(
# [self.np_random.uniform(low=-0.6, high=-0.4), 0]
x_0=jnp.array([self.start_position, self.start_velocity]), # Starting state: position, velocity
x_T=jnp.array([self.goal_position, self.goal_velocity]), # Ending state
T=300., # s duration (note, this is not in the original problem)
bounds=jnp.array([
[self.min_position, self.max_position], # Position bounds
[-self.max_speed, self.max_speed], # Velocity bounds
[self.min_action, self.max_action], # Control bounds
]),
terminal_cost=False,
)
# def _height(self, xs):
# return jnp.sin(3 * xs) * .45 + .55
def dynamics(self, x_t: State, u_t: Control, t: Optional[Timestep] = None) -> DState:
position, velocity = x_t
force = jnp.clip(u_t, a_min=self.min_action, a_max=self.max_action)
d_position = velocity.squeeze()
d_velocity = (force * self.power - self.gravity * jax.grad(hill_function)(position)).squeeze()
return jnp.array([d_position, d_velocity])
def parametrized_dynamics(self, params: Params, x_t: State, u_t: Control, t: Optional[Timestep] = None) -> DState:
position, velocity = x_t
power = params['power']
gravity = params['gravity']
force = jnp.clip(u_t, a_min=self.min_action, a_max=self.max_action)
d_position = velocity.squeeze()
d_velocity = (force * power - gravity * jax.grad(hill_function)(position)).squeeze()
return jnp.array([d_position, d_velocity])
def cost(self, x_t: State, u_t: Control, t: Optional[Timestep] = None) -> Cost:
return 10. * u_t ** 2
def parametrized_cost(self, params: Params, x_t: State, u_t: Control, t: Optional[Timestep] = None) -> Cost:
return self.cost(x_t, u_t, t)
# def plot_solution(self, x: jnp.ndarray, u: jnp.ndarray) -> None:
# x = pd.DataFrame(x, columns=['q1', 'q2', 'q̈1', 'q̈2'])
#
# # Plot optimal trajectory (Figure 10)
# sns.set(style='darkgrid')
# plt.figure(figsize=(9, 6))
# ts_x = jnp.linspace(0, self.T, x.shape[0])
# ts_u = jnp.linspace(0, self.T, u.shape[0])
#
# plt.subplot(3, 1, 1)
# plt.ylabel('position (m)')
# plt.xlim(0, 2.01)
# plt.ylim(0, 1.5)
# plt.plot(ts_x, x['q1'], '-bo', clip_on=False, zorder=10)
#
# plt.subplot(3, 1, 2)
# plt.ylabel('angle (rad)')
# plt.plot(ts_x, x['q2'], '-bo', clip_on=False, zorder=10)
# plt.xlim(0, 2.01)
# plt.ylim(-2, 4)
#
# plt.subplot(3, 1, 3)
# plt.ylabel('force (N)')
# # plt.plot(ts_u, u, '-bo', clip_on=False, zorder=10)
# plt.step(ts_u, u, where="post", clip_on=False)
# plt.xlim(0, 2.01)
# plt.ylim(-20, 11)
#
# plt.xlabel('time (s)')
# plt.tight_layout()
# plt.show()
if __name__ == "__main__":
import numpy as np
import matplotlib.pyplot as plt
mc = MountainCar()
y1 = np.linspace(-1.5, 1.5, 20)
y2 = np.linspace(-0.1, 0.1, 20)
Y1, Y2 = np.meshgrid(y1, y2)
t = 0
u, v = np.zeros(Y1.shape), np.zeros(Y2.shape)
NI, NJ = Y1.shape
for i in range(NI):
for j in range(NJ):
x = Y1[i, j]
y = Y2[i, j]
yprime = mc.dynamics(jnp.array([x, y]), 0, t)
u[i, j] = yprime[0]
v[i, j] = yprime[1]
Q = plt.quiver(Y1, Y2, u, v, color='r')
plt.xlabel('position')
plt.ylabel('velocity')
plt.xlim([-1.5, 1])
plt.ylim([-.1, .1])
plt.show()
| 5,788
| 31.706215
| 175
|
py
|
myriad
|
myriad-main/myriad/systems/classical_control/pendulum.py
|
# (c) 2021 Nikolaus Howe
# inspired by https://github.com/openai/gym/blob/master/gym/envs/classic_control/pendulum.py
# and https://github.com/locuslab/mpc.pytorch/blob/07f43da67581b783f4f230ca97b0efbc421773af/mpc/env_dx/pendulum.py
import jax
import jax.numpy as jnp
from typing import Optional
from myriad.systems.base import FiniteHorizonControlSystem
from myriad.custom_types import Control, DState, Params, State, Timestep
# https://github.com/openai/gym/blob/ee5ee3a4a5b9d09219ff4c932a45c4a661778cd7/gym/envs/classic_control/pendulum.py#L101
@jax.jit
def angle_normalize(x):
return ((x + jnp.pi) % (2 * jnp.pi)) - jnp.pi
class Pendulum(FiniteHorizonControlSystem):
"""
Continuous Pendulum environment, inspired by the [OpenAI gym environment](https://github.com/openai/gym/blob/master/gym/envs/classic_control/pendulum.py).
This environment model the movement of a pendulum upon which a torque ( \\(u(t)\\) ) is applied upon the extremity
moving freely. The goal is to generate a movement such that the pendulum will balance in an upright position.
It can be modeled as:
.. math::
\\begin{align}
& \\min_{u} \\quad && \\int_0^T \\theta(t)^2 + 0.1 * \\theta(t)' + C_p u(t) dt \\\\
& \\; \\mathrm{s.t.}\\quad && \\theta''(t) = -\\frac{g \\sin(\\theta(t))}{2 l}
+ \\frac{u(t)}{m l^2}\\\\
& && \\theta(0) = \\pi ,\\; \\theta'(0)=0 \\\\
& && \\theta(T) = 0 ,\\; \\theta'(T)=0 \\\\
& && -u_M <= u(t) <= u_M
\\end{align}
Notes
-----
\\(\\theta\\): Pendulum's angle \n
\\(\\theta'\\): Angular velocity \n
\\(u\\): The torque applied to the pendulum \n
\\(l\\): Length of the rope holding the pendulum \n
\\(g\\): Gravity force \n
\\(u_M\\): Maximum torque that can be applied \n
\\(T\\): The horizon
"""
def __init__(self, g: float = 10., m: float = 1., length: float = 1.):
# Learnable parameters
self.g = g
self.m = m
self.length = length
# Fixed parameters
self.max_speed = 8.
self.max_torque = 2.
self.x_0 = jnp.array([0., 0.])
self.x_T = jnp.array([jnp.pi, 0.])
self.ctrl_penalty = 0.001
super().__init__(
x_0=self.x_0, # Starting state: position, velocity
x_T=self.x_T, # Ending state
T=15, # s duration (note, this is not in the original problem)
bounds=jnp.array([
[-jnp.pi, jnp.pi], # theta
[-self.max_speed, self.max_speed], # dtheta
[-self.max_torque, self.max_torque], # Control bounds
]),
terminal_cost=False,
)
def parametrized_dynamics(self, params: Params, x: State, u: Control, t: Optional[Timestep] = None) -> DState:
u = jnp.clip(u, a_min=-self.max_torque, a_max=self.max_torque)
g = params['g']
m = params['m']
length = params['length']
theta, dot_theta = x
theta = angle_normalize(theta)
dot_theta = jnp.clip(dot_theta, a_min=-self.max_speed, a_max=self.max_speed)
# print("theta, dot_theta", x)
dot_dot_theta = (-3. * g / (2. * length) * jnp.sin(theta)
+ 3. * u / (m * length ** 2)).squeeze() * 0.05
# print("dot theta", dot_theta)
# print("dot dot", dot_dot_theta)
return jnp.array([dot_theta, dot_dot_theta])
def dynamics(self, x: State, u: Control, t: Optional[Timestep] = None) -> DState:
u = jnp.clip(u, a_min=-self.max_torque, a_max=self.max_torque)
theta, dot_theta = x
theta = angle_normalize(theta)
dot_theta = jnp.clip(dot_theta, a_min=-self.max_speed, a_max=self.max_speed)
# print("theta, dot_theta", x)
dot_dot_theta = (-3. * self.g / (2. * self.length) * jnp.sin(theta)
+ 3. * u / (self.m * self.length ** 2)).squeeze() * 0.05
# print("dot theta", dot_theta)
# print("dot dot", dot_dot_theta)
return jnp.array([dot_theta, dot_dot_theta])
def parametrized_cost(self, params: Params, x: State, u: Control, t: Timestep):
# Do nothing, for now
return self.cost(x, u, t)
def cost(self, x: State, u: Control, t: Timestep) -> float:
# print("state is", x)
assert len(x) == 2
theta, dot_theta = x
return angle_normalize(theta) ** 2 + 0.1 * dot_theta ** 2 + self.ctrl_penalty * u ** 2
if __name__ == "__main__":
pass
# import numpy as np
# import matplotlib.pyplot as plt
#
# pd = Pendulum()
#
# y1 = np.linspace(-2*jnp.pi, 2*jnp.pi, 20)
# y2 = np.linspace(-pd.max_speed, pd.max_speed, 20)
#
# Y1, Y2 = np.meshgrid(y1, y2)
#
# t = 0
#
# u, v = np.zeros(Y1.shape), np.zeros(Y2.shape)
#
# NI, NJ = Y1.shape
#
# for i in range(NI):
# for j in range(NJ):
# x = Y1[i, j]
# y = Y2[i, j]
# yprime = pd.dynamics(jnp.array([x, y]), 0, t)
# u[i, j] = yprime[0]
# v[i, j] = yprime[1]
#
# Q = plt.quiver(Y1, Y2, u, v, color='r')
#
# plt.xlabel('angle')
# plt.ylabel('angular velocity')
# plt.xlim([-2*jnp.pi, 2*jnp.pi])
# plt.ylim([-pd.max_speed, pd.max_speed])
# plt.show()
# def get_frame(self, x, ax=None):
# x = util.get_data_maybe(x.view(-1))
# assert len(x) == 3
# l = self.params[2].item()
#
# cos_th, sin_th, dth = torch.unbind(x)
# th = np.arctan2(sin_th, cos_th)
# x = sin_th * l
# y = cos_th * l
#
# if ax is None:
# fig, ax = plt.subplots(figsize=(6, 6))
# else:
# fig = ax.get_figure()
#
# ax.plot((0, x), (0, y), color='k')
# ax.set_xlim((-l * 1.2, l * 1.2))
# ax.set_ylim((-l * 1.2, l * 1.2))
# return fig, ax
# def get_true_obj(self):
# q = torch.cat((
# self.goal_weights,
# self.ctrl_penalty * torch.ones(self.n_ctrl)
# ))
# assert not hasattr(self, 'mpc_lin')
# px = -torch.sqrt(self.goal_weights) * self.goal_state # + self.mpc_lin
# p = torch.cat((px, torch.zeros(self.n_ctrl)))
# return Variable(q), Variable(p)
| 5,848
| 30.446237
| 156
|
py
|
myriad
|
myriad-main/myriad/systems/classical_control/__init__.py
| 0
| 0
| 0
|
py
|
|
myriad
|
myriad-main/myriad/systems/lenhart/mould_fungicide.py
|
from typing import Union, Optional
import gin
import jax.numpy as jnp
import matplotlib.pyplot as plt
import seaborn as sns
from myriad.custom_types import Params
from myriad.systems import IndirectFHCS
@gin.configurable
class MouldFungicide(IndirectFHCS):
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 6, Lab 2)
This environment models the concentration level of a mould population that we try to control by
applying a fungicide. The state ( \\(x\\) ) is the population concentration, while the control ( \\(u\\) ) is
the amount of fungicide added. We are trying to minimize:
.. math::
\\begin{align}
& \\min_u \\quad &&\\int_0^T Ax^2(t) + u^2(t) dt \\\\
&\\; \\mathrm{s.t.}\\quad && x'(t) = r(M - x(t)) - u(t)x(t) \\\\
& && x(0)=x_0 \\;
\\end{align}
"""
def __init__(self, r=0.3, M=10., A=10., x_0=1.0, T=5):
super().__init__(
x_0=jnp.array([x_0]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1, ...) are given first,
[0., 5.], # followed by bounds over controls (u_0, u_1, ...)
[0., 5.], # nh: I replaced [-inf, inf] with [0, 5] in both of the bounds
]),
terminal_cost=False,
discrete=False,
)
self.adj_T = None # Final condition over the adjoint, if any
self.r = r
"""Growth rate"""
self.M = M
"""Carrying capacity"""
self.A = A
"""Weight parameter, balancing between controlling the population and limiting the fungicide use"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
d_x = self.r * (self.M - x_t) - u_t * x_t
return d_x
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None,
t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
r = params['r']
M = params['M']
d_x = r * (M - x_t) - u_t * x_t
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return self.A * x_t ** 2 + u_t ** 2
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
t: Optional[jnp.ndarray] = None) -> float:
return self.A * x_t ** 2 + u_t ** 2 # don't learn the cost for now
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return adj_t * (self.r + u_t) - 2 * self.A * x_t
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char = 0.5 * adj_t * x_t
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char))
| 3,084
| 37.5625
| 113
|
py
|
myriad
|
myriad-main/myriad/systems/lenhart/simple_case.py
|
from typing import Union, Optional, Dict
import gin
import jax.numpy as jnp
import matplotlib.pyplot as plt
import seaborn as sns
from myriad.systems import IndirectFHCS
@gin.configurable
class SimpleCase(IndirectFHCS):
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 5, Lab 1)
A simple introductory environment example of the form:
.. math::
\\begin{align}
& \\max_u \\quad && \\int_0^1 Ax(t) - Bu^2(t) dt \\\\
& \\; \\mathrm{s.t.}\\quad && x'(t) = -\\frac{1}{2}x^2(t) + Cu(t) \\\\
& && x(0)=x_0>-2, \\; A \\geq 0, \\; B > 0
\\end{align}
"""
def __init__(self, A=1., B=1., C=4., x_0=1., T=1.):
super().__init__(
x_0=jnp.array([x_0]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1, ...) are given first,
[jnp.NINF, jnp.inf], # followed by bounds over controls (u_0, u_1, ...)
[jnp.NINF, jnp.inf],
]),
terminal_cost=False,
discrete=False,
)
self.A = A
"""Weight parameter"""
self.B = B
"""Weight parameter"""
self.C = C
"""Weight parameter"""
self.adj_T = None # Final condition over the adjoint, if any
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
d_x = -0.5*x_t**2 + self.C*u_t
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return -self.A*x_t + self.B*u_t**2 # Maximization problem converted to minimization
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return -self.A + x_t*adj_t
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char = (self.C*adj_t)/(2*self.B)
return jnp.minimum(self.bounds[0, 1], jnp.maximum(self.bounds[0, 0], char))
| 2,217
| 34.206349
| 112
|
py
|
myriad
|
myriad-main/myriad/systems/lenhart/cancer_treatment.py
|
import gin
import jax.numpy as jnp
from typing import Optional, Union
from myriad.custom_types import Params
from myriad.systems import IndirectFHCS
@gin.configurable
class CancerTreatment(IndirectFHCS):
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 10, Lab 5)
The model was originally described in K. Renee Fister and John Carl Panetta. Optimal control applied to
competing chemotherapeutic cell-kill strategies. SIAM Journal of Applied Mathematics, 63(6):1954–71, 2003.
The tumour is assumed to Gompertzian growth and the model follows a Skipper's log-kill hypothesis, that is, the
cell-kill due to the chemotherapy treatment is proportional to the tumour population.
This environment models the normalized density of a cancerous tumour undergoing chemotherapy.
The state ( \\(x\\) ) is the
normalized density of the tumour, while the control ( \\(u\\) ) is the strength of the drug used for chemotherapy.
We are trying to minimize:
An important note about this system: due to the log of the reciprocal of the state in the
dynamics equation, special care must be taken to ensure that your numerical integration
scheme doesn't take a step into the negative values for x. As such, it is recommended
to either take small steps, or to always clip the state to [0., inf] during
integration.
.. math::
\\begin{align}
&\\min_u \\quad && \\int_0^T ax^2(t) + u^2(t) dt \\\\
& \\; \\mathrm{s.t.}\\quad &&x'(t) = rx(t)\\ln \\big( \\frac{1}{x(t)} \\big) - u(t)\\delta x(t) \\\\
& && x(0)=x_0, \\; u(t) \\geq 0
\\end{align}
"""
def __init__(self, r=0.3, a=3., delta=0.45, x_0=0.975, T=20):
super().__init__(
x_0=jnp.array([x_0]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1 ...) are given first,
[1e-3, 1.], # followed by bounds over controls (u_0, u_1,...)
# [0., jnp.inf] # Original bounds
[0., 2.] # Bounds based on optimal policy
]),
terminal_cost=False,
discrete=False,
)
self.adj_T = None # Final condition over the adjoint, if any
self.r = r
"""Growth rate of the tumour"""
self.a = a
"""Positive weight parameter"""
self.delta = delta
"""Magnitude of the dose administered"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
d_x = self.r * x_t * jnp.log(1 / x_t) - u_t * self.delta * x_t
return d_x
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None,
t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
r = params['r']
delta = params['delta']
d_x = r * x_t * jnp.log(1 / x_t) - u_t * delta * x_t
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return self.a * x_t ** 2 + u_t ** 2
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
t: Optional[jnp.ndarray] = None) -> float:
# a = params['a'] # TODO: change back if we want to learn the cost also
a = self.a
return a * x_t ** 2 + u_t ** 2
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return adj_t * (self.r + self.delta * u_t - self.r * jnp.log(1 / x_t)) - 2 * self.a * x_t
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char = 0.5 * adj_t * self.delta * x_t
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char))
| 4,085
| 43.413043
| 120
|
py
|
myriad
|
myriad-main/myriad/systems/lenhart/simple_case_with_bounds.py
|
from typing import Union, Optional
import gin
import jax.numpy as jnp
import matplotlib.pyplot as plt
import seaborn as sns
from myriad.systems import IndirectFHCS
@gin.configurable
class SimpleCaseWithBounds(IndirectFHCS):
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 9, Lab 4). \n
A simple introductory environment example of the form:
.. math::
\\begin{align}
& \\max_u \\quad && \\int_0^1 Ax(t) - u^2(t) dt \\\\
& \\; \\mathrm{s.t.}\\quad && x'(t) = -\\frac{1}{2}x^2(t) + Cu(t) \\\\
& && x(0)=x_0>-2, \\; A \\geq 0, \\; M_1 \\leq u(t) \\leq M_2
\\end{align}
"""
def __init__(self, A=1., C=4., M_1=-1., M_2=2., x_0=1., T=1.):
super().__init__(
x_0=jnp.array([x_0]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1 ...) are given first,
# [jnp.NINF, jnp.inf], # followed by bounds over controls (u_0,u_1,...)
[0., 3.], # changed based on observation of the true optimal trajectory using default M_1 and M_2
[M_1, M_2],
]),
terminal_cost=False,
discrete=False,
)
self.A = A
"""Weight parameter"""
self.C = C
"""Weight parameter"""
self.M_1 = M_1
"""Lower bound for the control"""
self.M_2 = M_2
"""Upper bound for the control"""
self.adj_T = None # Final condition over the adjoint, if any
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
d_x = -0.5*x_t**2 + self.C*u_t
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return -self.A*x_t + u_t**2 # Maximization problem converted to minimization
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return -self.A + x_t*adj_t
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char = (self.C*adj_t)/2
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char))
| 2,409
| 35.515152
| 112
|
py
|
myriad
|
myriad-main/myriad/systems/lenhart/predator_prey.py
|
import gin
import jax.numpy as jnp
from typing import Union, Optional
from myriad.custom_types import Params
from myriad.systems import IndirectFHCS
@gin.configurable
class PredatorPrey(IndirectFHCS):
# TODO: there is an error when trying to plot with PredatorPrey
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 22, Lab 13)
The states evolution is base on a standard Lotka-Volterra model.
This particular environment is inspired from Bean San Goh, George Leitmann, and Thomas L. Vincent.
Optimal control of a prey-predator system. Mathematical Biosciences, 19, 1974.
This environment models the evolution of a pest (prey) population ( \\(x_0(t)\\) ) and a predator population ( \\(x_1(t) \\)) in
the presence of a pesticide ( \\(u(t)\\) ) that affects both the pest and predator populations. The objective in mind is
to minimize the final pest population, while limiting the usage of the pesticide. Thus:
.. math::
\\begin{align}
& \\min_{u} \\quad && x_0(T) + \\frac{A}{2}\\int_0^T u(t)^2 dt \\\\
& \\; \\mathrm{s.t.}\\quad && x_0'(t) = (1 - x_1(t))x_0(t) - d_1x_0(t)u(t) \\\\
& && x_1'(t) = (x_0(t) - 1)x_1(t) - d_2x_1(t)(t)u(t) \\\\
& && 0 \\leq u(t) \\leq M, \\quad \\int_0^T u(t) dt = B
\\end{align}
The particularity here is that the total amount of pesticide to be applied is fixed. To take into account this
constraint, a virtual state variable ( \\(z(t)\\) ) is added where:
.. math::
z'(t) = u(t), \\; z(0) = 0, \\; z(T) = B
Finally, note that `guess_a` and `guess_b` have been carefully chosen in the study cases to allow for fast iteration
and ensure convergence.
Notes
-----
x_0: Initial density of the pest and prey population \\( (x_0, x_1) \\)
"""
def __init__(self, d_1=.1, d_2=.1, A=1., B=5.,
guess_a=-.52, guess_b=.5, M=1.,
x_0=(10., 1., 0.), T=10.):
super().__init__(
x_0=jnp.array([
x_0[0],
x_0[1],
x_0[2]
]), # Starting state
x_T=[None, None, B], # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1 ...) are given first,
[0., 11.], # followed by bounds over controls (u_0, u_1, ...)
[0., 11.],
[0., 5.],
[0, M]
]),
terminal_cost=True,
discrete=False,
)
self.adj_T = jnp.array([1, 0, 0]) # Final condition over the adjoint, if any
self.d_1 = d_1
"""Impact of the pesticide on the pest population"""
self.d_2 = d_2
"""Impact of the pesticide on the prey population"""
self.A = A
"""Weight parameter balancing the cost"""
self.guess_a = guess_a
"""Node 2 at which the secant method begins its iteration (Newton's method)"""
self.guess_b = guess_b
"""Node 1 at which the secant method begins its iteration (Newton's method)"""
self.M = M
"""Bound on pesticide application at a given time"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
x_0, x_1, x_2 = x_t
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([
(1 - x_1) * x_0 - self.d_1 * x_0 * u_t,
(x_0 - 1) * x_1 - self.d_2 * x_1 * u_t,
u_t,
])
return d_x
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None,
t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
d_1 = params['d_1']
d_2 = params['d_2']
x_0, x_1, x_2 = x_t
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([
(1 - x_1) * x_0 - d_1 * x_0 * u_t,
(x_0 - 1) * x_1 - d_2 * x_1 * u_t,
u_t,
])
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return self.A * 0.5 * u_t ** 2
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
t: Optional[jnp.ndarray] = None) -> float:
return self.A * 0.5 * u_t ** 2 # Not learning cost for now
def terminal_cost_fn(self, x_T: Optional[jnp.ndarray], u_T: Optional[jnp.ndarray],
T: Optional[jnp.ndarray] = None) -> float:
return x_T[0]
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return jnp.array([
adj_t[0] * (x_t[1] - 1 + self.d_1 * u_t[0]) - adj_t[1] * x_t[1],
adj_t[0] * x_t[0] + adj_t[1] * (1 - x_t[0] + self.d_2 * u_t[0]),
0
])
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char = (adj_t[:, 0] * self.d_1 * x_t[:, 0] + adj_t[:, 1] * self.d_2 * x_t[:, 1] - adj_t[:, 2]) / self.A
char = char.reshape(-1, 1)
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char))
| 5,190
| 36.615942
| 132
|
py
|
myriad
|
myriad-main/myriad/systems/lenhart/bacteria.py
|
from typing import Union, Optional
import gin
import jax.numpy as jnp
from myriad.custom_types import Params
from myriad.systems import IndirectFHCS
@gin.configurable
class Bacteria(IndirectFHCS):
"""Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 7, Lab 3)
This environment models the concentration level of a bacteria population that we try to control by providing
a chemical nutrient that stimulates growth. However, the use of the chemical leads to the production of
a chemical byproduct by the bacteria that in turn hinders growth. The state ( \\(x\\) ) is the bacteria population
concentration, while the control ( \\(u\\) ) is the amount of chemical nutrient added. We are trying to maximize:
(note: fbsm estimates different trajectory here than what you actually
get when you integrate the given controls. Weird!)
Note that the state must always remain positive in this domain.
For this reason, the dynamics are halted if the state ever reaches 0
(or passes it due to numerical integration issues).
.. math::
\\begin{align}
& \\max_u \\quad &&Cx(1) - \\int_0^1 u^2(t) dt \\\\
& \\; \\mathrm{s.t.} \\quad &&x'(t) = rx(t) + Au(t)x(t) - Bu^2(t)e^{-x(t)} \\\\
& && x(0)=x_0, \\\\
& && A,B,C \\geq 0
\\end{align}
"""
def __init__(self, r=1., A=1., B=12., C=1., x_0=1.):
super().__init__(
x_0=jnp.array([x_0]), # Starting state
x_T=None, # Terminal state, if any
T=1, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1 ...) are given first,
# [jnp.NINF, jnp.inf], # followed by bounds over controls (u_0, u_1,...)
[0., 10.], # followed by bounds over controls (u_0, u_1,...)
# [jnp.NINF, jnp.inf],
[0., 2.], # set based on observation of optimal
]),
terminal_cost=True,
discrete=False,
)
self.adj_T = jnp.array([C]) # Final condition over the adjoint, if any
self.r = r
"""Growth rate"""
self.A = A
"""Relative strength of the chemical nutrient"""
self.B = B # used to be set at 12
"""Strength of the byproduct"""
self.C = C
"""Payoff associated to the final bacteria population concentration"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
# x_t += 0.1 # Niki added to avoid negatives
d_x = self.r * x_t + self.A * u_t * x_t - self.B * u_t ** 2 * jnp.exp(-x_t)
return d_x
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None,
t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
r = params['r']
A = params['A']
B = params['B']
# x_t += 0.1 # Niki added to avoid negatives
d_x = r * x_t + A * u_t * x_t - B * u_t ** 2 * jnp.exp(-x_t)
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return u_t ** 2 # Maximization problem converted to minimization
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
t: Optional[jnp.ndarray] = None) -> float:
return u_t ** 2 # Maximization problem converted to minimization
def terminal_cost_fn(self, x_T: Optional[jnp.ndarray], u_T: Optional[jnp.ndarray],
T: Optional[jnp.ndarray] = None) -> float:
return -self.C * x_T.squeeze() # squeeze is necessary for using SHOOTING
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return -adj_t * (self.r + self.A * u_t + self.B * u_t ** 2 * jnp.exp(-x_t))
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char = adj_t * self.A * x_t / (2 * (1 + self.B * adj_t * jnp.exp(-x_t)))
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char))
| 4,287
| 43.666667
| 120
|
py
|
myriad
|
myriad-main/myriad/systems/lenhart/harvest.py
|
import gin
import jax.numpy as jnp
from typing import Union, Optional
from myriad.systems import IndirectFHCS
@gin.configurable
class Harvest(IndirectFHCS):
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 11, Lab 6)
The model was was adapted from Wayne M. Getz. Optimal control and principles in population management.
Proceedings of Symposia in Applied Mathematics, 30:63–82, 1984.
This environment models the population level (scaled) of a population
(for example, of vegetables) to be harvested.
The time scale is too small for reproduction to occur, but the mass
of each member of the population will grow over time following
\\(\\frac{kt}{t+1}\\). The state ( \\(x\\) ) is the population level,
while the control ( \\(u\\) ) is the harvest rate.
We are trying to maximize:
.. math::
\\begin{align}
& \\max_u \\quad && \\int_0^T A \\frac{kt}{t+1}x(t)u(t) - u^2(t) dt \\\\
& \\; \\mathrm{s.t.}\\quad && x'(t) = -(m+u(t)) x(t) \\\\
& && x(0)=x_0, \\; 0\\leq u(t) \\leq M, \\; A > 0
\\end{align}
"""
def __init__(self, A=5., k=10., m=.2, M=1., x_0=.4, T=10.):
super().__init__(
x_0=jnp.array([x_0]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1, ...) are given first,
[jnp.NINF, jnp.inf], # followed by bounds over controls (u_0,u_1, ...)
[0, M],
]),
terminal_cost=False,
discrete=False,
)
self.adj_T = None # Final condition over the adjoint, if any
self.A = A
"""Nonnegative weight parameter"""
self.k = k
"""Maximum mass of the species"""
self.m = m
"""Natural death rate of the species"""
self.M = M
"""Upper bound on harvesting that may represent physical limitations"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
d_x = -(self.m+u_t)*x_t
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return -1*self.A*(self.k*t/(t+1))*x_t*u_t + u_t**2 # Maximization problem converted to minimization
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return adj_t*(self.m+u_t) - self.A*(self.k*t/(t+1))*u_t
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char = 0.5*x_t * (self.A*(self.k*t/(t+1)) - adj_t)
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char))
| 2,859
| 38.722222
| 112
|
py
|
myriad
|
myriad-main/myriad/systems/lenhart/bear_populations.py
|
import gin
import jax.numpy as jnp
from typing import Union, Optional
from myriad.custom_types import Params
from myriad.systems import IndirectFHCS
@gin.configurable
class BearPopulations(IndirectFHCS):
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 15, Lab 9)
Additional reference can be found in R. A. Salinas, S. Lenhart, and L. J. Gross. Control of a metapopulation
harvesting model for black bears. Natural Remyriad Modeling, 18:307–21, 2005.
The model represents the metapopulation of black bears, i.e. a population consisting of multiple local
populations, which can interact with each other. In this particular scenario, the author models the
bear population density in a park (protected) area ( \\(x_0\\)), a forest area ( \\(x_1\\)) and a urban area
( \\(x_2\\)). Natural reproduction happens only inside the park and forest area, and the goal is to limit the bear
population that migrates to the urban area.
The control is a harvesting rate (hunting) that occurs inside the forest area and, with bigger cost, in the
park area. The goal is thus to minimize:
.. math::
\\begin{align}
&\\min_{u_p,u_f} \\quad &&\\int_0^T x_2(t) + c_p u_p(t)^2 + c_f u_f(t)^2 dt \\\\
& \\; \\mathrm{s.t.}\\quad && x_0'(t) = rx_0(t) - \\frac{r}{K}x_0(t)^2 + \\frac{m_f r}{K}\\big( 1 - \\frac{x_0(t)}{K} \\big)x_1(t)^2 - u_p(t)x_0(t),\\; x_0(0)\\geq 0 \\\\
& && x_1'(t) = rx_1(t) - \\frac{r}{K}x_1(t)^2 + \\frac{m_p r}{K}\\big( 1 - \\frac{x_1(t)}{K} \\big)x_0(t)^2 - u_f(t)x_1(t),\\; x_1(0)\\geq 0 \\\\
& && x_2'(t) = r(1-m_p)\\frac{x_0(t)^2}{K} + r(1-m_f)\\frac{x_1(t)^2}{K} + \\frac{m_f r}{K^2}x_0(t)x_1(t)^2 + \\frac{m_p r}{K^2}x_0(t)^2x_1(t)^,\\; x_2(0)\\geq 0 \\\\
& && 0\\leq u_p(t) \\leq 1, \\; 0\\leq u_f(t) \\leq 1
\\end{align}
"""
def __init__(self, r=.1, K=.75, m_p=.5, m_f=.5, c_p=10_000,
c_f=10, x_0=(.4, .2, 0.), T=25):
super().__init__(
x_0=jnp.array([
x_0[0],
x_0[1],
x_0[2],
]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1, ...) are given first,
[0., 2.], # followed by bounds over controls (u_0, u_1, ...)
[0., 2.],
[0., 2.], # nh: I changed the bounds to be reasonable amounts
[0., .2],
[0., .2],
]),
terminal_cost=False,
discrete=False,
)
self.adj_T = None # Final condition over the adjoint, if any
self.r = r
"""Population growth rate"""
self.K = K
"""Carrying capacity of the areas (density wise)"""
self.m_p = m_p
"""Proportion of the park boundary connected to the forest areas"""
self.m_f = m_f
"""Proportion of the forest areas connected to the park area"""
self.c_p = c_p
"""Cost associated with harvesting in the park"""
self.c_f = c_f
"""Cost associated with harvesting in the forest"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
k = self.r / self.K
k2 = self.r / self.K ** 2
x_0, x_1, x_2 = x_t
u_0, u_1 = u_t
d_x = jnp.array([
self.r * x_0 - k * x_0 ** 2 + k * self.m_f * (1 - x_0 / self.K) * x_1 ** 2 - u_0 * x_0,
self.r * x_1 - k * x_1 ** 2 + k * self.m_p * (1 - x_1 / self.K) * x_0 ** 2 - u_1 * x_1,
k * (1 - self.m_p) * x_0 ** 2 + k * (1 - self.m_f) * x_1 ** 2 + k2 * self.m_f * x_0 * x_1 ** 2 + k2 * self.m_p * (
x_0 ** 2) * x_1,
])
return d_x
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None,
t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
r = params['r']
K = params['K']
m_f = params['m_f']
m_p = params['m_p']
k = r / K
k2 = r / K ** 2
x_0, x_1, x_2 = x_t
u_0, u_1 = u_t
d_x = jnp.array([
r * x_0 - k * x_0 ** 2 + k * m_f * (1 - x_0 / K) * x_1 ** 2 - u_0 * x_0,
r * x_1 - k * x_1 ** 2 + k * m_p * (1 - x_1 / K) * x_0 ** 2 - u_1 * x_1,
k * (1 - m_p) * x_0 ** 2 + k * (1 - m_f) * x_1 ** 2 + k2 * m_f * x_0 * x_1 ** 2 + k2 * m_p * (
x_0 ** 2) * x_1,
])
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return x_t[2] + self.c_p * u_t[0] ** 2 + self.c_f * u_t[1] ** 2
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
t: Optional[jnp.ndarray] = None) -> float:
# Not learning the cost function for now
return x_t[2] + self.c_p * u_t[0] ** 2 + self.c_f * u_t[1] ** 2
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
k = self.r / self.K
k2 = self.r / self.K ** 2
return jnp.array([
adj_t[0] * (2 * k * x_t[0] + k2 * self.m_f * x_t[1] ** 2 + u_t[0] - self.r)
- adj_t[1] * (2 * k * self.m_p * (1 - x_t[1] / self.K) * x_t[0])
+ adj_t[2] * (
2 * k * (self.m_p - 1) * x_t[0] - k2 * self.m_f * x_t[1] ** 2 - 2 * k2 * self.m_p * x_t[0] * x_t[1]),
adj_t[1] * (2 * k * x_t[1] + k2 * self.m_p * x_t[0] ** 2 + u_t[1] - self.r)
- adj_t[0] * (2 * k * self.m_f * (1 - x_t[0] / self.K) * x_t[1])
+ adj_t[2] * (
2 * k * (self.m_f - 1) * x_t[1] - 2 * k2 * self.m_f * x_t[0] * x_t[1] - k2 * self.m_p * x_t[0] ** 2),
-1,
])
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char_0 = adj_t[:, 0] * x_t[:, 0] / (2 * self.c_p)
char_0 = char_0.reshape(-1, 1)
char_0 = jnp.minimum(self.bounds[-2, 1], jnp.maximum(self.bounds[-2, 0], char_0))
char_1 = adj_t[:, 1] * x_t[:, 1] / (2 * self.c_f)
char_1 = char_1.reshape(-1, 1)
char_1 = jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char_1))
return jnp.hstack((char_0, char_1))
| 6,332
| 42.675862
| 178
|
py
|
myriad
|
myriad-main/myriad/systems/lenhart/invasive_plant.py
|
import gin
import jax.numpy as jnp
import matplotlib.pyplot as plt
from typing import Union, Optional
from myriad.systems import IndirectFHCS
@gin.configurable
class InvasivePlant(IndirectFHCS):
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 24, Lab 14)
This problem was first look at in M. E. Moody and R. N. Mack. Controlling the spread of plant invasions:
the importance of nascent foci. Journal of Applied Ecology, 25:1009–21, 1988.
The general formulation that the we look at in this environment was presented in A. J. Whittle, S. Lenhart, and
L. J. Gross. Optimal control for management of an invasive plant species. Mathematical Biosciences and
Engineering, to appear, 2007.
The scenario considered in this environment has been modified from its original formulation so
so that the state terminal cost term is linear instead of quadratic. Obviously, the optimal solutions are
different from the original problem, but the model behavior is similar.
In this environment, we look at the growth of an invasive species that has a main focus population ( \\(x_i\\) ) and
4 smaller satellite populations ( \\(x_{i\\neq j}\\) ). The area occupied by the different population are assumed to be
circular, with a growth that can be represented via the total radius of the population area. Annual interventions
are made after the growth period, removing a ratio of the population radius ( \\(u_{j,t}\\) ). Since the interventions are
annual, we are in a discrete time model. We aim to:
.. math::
\\begin{align}
& \\min_{u} \\quad &&\\sum_{j=0}^4 \\bigg[x_{j,T} + B\\sum_{t=0}^{T-1} u_{j,t}^2 \\bigg] \\\\
& \\; \\mathrm{s.t.}\\quad && x_{j,t+1} = \\bigg( x_{j,t} + \\frac{k x_{j,t}}{\\epsilon + x_{j,t}}\\bigg) (1-u_{j,t}) ,\\; x_{j,0} = \\rho_j \\\\
& && 0 \\leq u_{j,t} \\leq 1
\\end{align}
"""
def __init__(self, B=1., k=1., eps=.01,
x_0=(.5, 1., 1.5, 2., 10.), T=10.):
super().__init__(
x_0=jnp.array(x_0), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1 ...) are given first,
[jnp.NINF, jnp.inf], # followed by bounds over controls (u_0,u_1,...)
[jnp.NINF, jnp.inf],
[jnp.NINF, jnp.inf],
[jnp.NINF, jnp.inf],
[jnp.NINF, jnp.inf],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
]),
terminal_cost=False,
discrete=True,
)
self.adj_T = jnp.ones(5) # Final condition over the adjoint, if any
self.B = B
"""Positive weight parameter"""
self.k = k
"""Spread rate of the population"""
self.eps = eps
"""Small constant, used to scale the spread by \\(\\frac{r}{\\epsilon+r}\\) so eradication is possible"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
next_x = (x_t + x_t*self.k/(self.eps + x_t)) * (1 - u_t)
return next_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return self.B*(u_t**2).sum()
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
prev_adj = adj_t * (1-u_t) * (1 + self.eps*self.k/(self.eps + x_t)**2)
return prev_adj
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
shifted_adj = adj_t[1:, :]
shifted_x_t = x_t[:-1, :]
char = 0.5*shifted_adj/self.B * (shifted_x_t + shifted_x_t*self.k/(self.eps + shifted_x_t))
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char)) # same bounds for all control
def plot_solution(self, x: jnp.ndarray, u: jnp.ndarray,
adj: Optional[jnp.ndarray] = None,
other_x: Optional[jnp.ndarray] = None) -> None:
# sns.set(style='darkgrid')
plt.figure(figsize=(9, 9))
x, u, adj = x.T, u.T, adj.T
ts_x = jnp.linspace(0, self.T, x[0].shape[0])
ts_u = jnp.linspace(0, self.T - 1, u[0].shape[0])
ts_adj = jnp.linspace(0, self.T, adj[0].shape[0])
labels = ["Focus 1", "Focus 2", "Focus 3", "Focus 4", "Focus 5"]
to_print = [0, 1, 2, 3, 4] # curves we want to print out
plt.subplot(3, 1, 1)
for idx, x_i in enumerate(x):
if idx in to_print:
plt.plot(ts_x, x_i, 'o', label=labels[idx])
if other_x is not None:
for idx, x_i in enumerate(other_x.T):
plt.plot(ts_u, x_i, 'o', label="integrated "+labels[idx])
plt.legend()
plt.title("Optimal state of dynamic system via forward-backward sweep")
plt.ylabel("state (x)")
plt.subplot(3, 1, 2)
for idx, u_i in enumerate(u):
plt.plot(ts_u, u_i, 'o', label='Focus ratio cropped')
plt.legend()
plt.title("Optimal control of dynamic system via forward-backward sweep")
plt.ylabel("control (u)")
plt.subplot(3, 1, 3)
for idx, adj_i in enumerate(adj):
if idx in to_print:
plt.plot(ts_adj, adj_i, "o")
plt.title("Optimal adjoint of dynamic system via forward-backward sweep")
plt.ylabel("adjoint (lambda)")
plt.xlabel('time (s)')
plt.tight_layout()
plt.show()
| 5,532
| 39.985185
| 151
|
py
|
myriad
|
myriad-main/myriad/systems/lenhart/epidemic_seirn.py
|
import gin
import jax.numpy as jnp
from typing import Union, Optional
from myriad.systems import IndirectFHCS
@gin.configurable
class EpidemicSEIRN(IndirectFHCS): # TODO : Add R calculation at the end
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 13, Lab 7)
A typical SEIRN (or SEIR) model is considered here in order to find an optimal schedule for a vaccination
campaign. Additional information about this model and some of its variations can be found in H. R. Joshi,
S. Lenhart, M. Y. Li, and L. Wang. Optimal control methods applied to disease models. AMS Volume on Mathematical
Studies on Human Disease Dynamics Emerging Paradigms and Challenges, 410:187–207, 2006
The model contains multiples state variables; \\(S(t)\\)(i.e. \\(x_0\\)) is the number of individuals susceptible of contracting
the disease at time t, while \\(I(t)\\)(i.e. \\(x_2\\)) and \\(R(t)\\)(i.e. \\(x_3\\)), are respectively the number of infectious and recovered
(and immune) individuals. \\(E(t)\\)(i.e. \\(x_1\\)) is the number of individuals who have been exposed to the disease and are
now in a latent state: they may develop the disease later on and become infectious, or they may simply become
immune. \\(N(t)\\)(i.e. \\(x_4\\)) is the total population, i.e., the sum of all other states.
The control is the vaccination rate among the susceptible individuals.
Finally, note that all individuals are considered to be born susceptible. We want to minimize:
.. math::
\\begin{align}
&\\min_u \\quad &&\\int_0^T A x_0(t) + u^2(t) dt \\\\
& \\; \\mathrm{s.t.}\\quad && x_0'(t) = bx_4(t) - dx_0(t) - cx_0(t)x_2(t) - u(t)x_0(t),\\; x_0(0)\\geq 0 \\\\
& && x_1'(t) = cx_0(t)x_2(t) - (e+d)x_1(t),\\; x_1(0)\\geq 0 \\\\
& && x_2'(t) = ex_1(t) - (g+a+d)x_2(t),\\; x_2(0)\\geq 0 \\\\
& && x_3'(t) = gx_2(t) - dx_3(t) + u(t)x_0(t),\\; x_3(0)\\geq 0 \\\\
& && x_4'(t) = (b-d)x_4(t) - ax_2(t),\\; x_4(0)\\geq 0 \\\\
& && 0\\leq u(t) \\leq 0.9, \\; A > 0
\\end{align}
Notes
-----
x_0: The initial state is given here by \\( (S(t_0), E(t_0), I(t_0), R(t_0) ) \\)
"""
def __init__(self, A=.1, b=.525, d=.5, c=.0001,
e=.5, g=.1, a=.2, x_0=(1000., 100., 50., 15.), T=20.):
super().__init__(
x_0=jnp.array([
x_0[0],
x_0[1],
x_0[2],
jnp.sum(jnp.asarray(x_0)),
]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1 ...) are given first,
[jnp.NINF, jnp.inf], # followed by bounds over controls (u_0,u_1,...)
[jnp.NINF, jnp.inf],
[jnp.NINF, jnp.inf],
[jnp.NINF, jnp.inf],
[0, 0.9],
]),
terminal_cost=False,
discrete=False,
)
self.adj_T = None # Final condition over the adjoint, if any
self.b = b
"""The exponential birth rate of the population"""
self.d = d
"""The exponential death rate of the population"""
self.c = c
"""The incidence rate of contamination"""
self.e = e
"""The rate at which exposed individuals become contagious (1/e is the mean latent period)"""
self.g = g
"""The recovery rate among infectious individuals (1/g is the mean infectious period)"""
self.a = a
"The death rate due to the disease"
self.A = A
"""Weight parameter balancing between the reduction of the infectious population and the vaccination cost"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
x_0, x_1, x_2, x_3 = x_t
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([
self.b*x_3 - self.d*x_0 - self.c*x_0*x_2 - u_t*x_0,
self.c*x_0*x_2 - (self.e+self.d)*x_1,
self.e*x_1 - (self.g+self.a+self.d)*x_2,
(self.b-self.d)*x_3 - self.a*x_2
])
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return self.A*x_t[2] + u_t**2
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return jnp.array([
adj_t[0]*(self.d+self.c*x_t[2]+u_t[0]) - adj_t[1]*self.c*x_t[2],
adj_t[1]*(self.e+self.d) - adj_t[2]*self.e,
-self.A + adj_t[0]*self.c*x_t[0] - adj_t[1]*self.c*x_t[0] + adj_t[2]*(self.g+self.a+self.d)
+ adj_t[3]*self.a,
-self.b*adj_t[0] + adj_t[3]*(self.d-self.d)
])
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char = adj_t[:, 0]*x_t[:, 0]/2
char = char.reshape(-1, 1)
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char))
| 5,092
| 44.473214
| 149
|
py
|
myriad
|
myriad-main/myriad/systems/lenhart/hiv_treatment.py
|
import gin
import jax.numpy as jnp
from typing import Union, Optional
from myriad.custom_types import Params
from myriad.systems import IndirectFHCS
@gin.configurable
class HIVTreatment(IndirectFHCS):
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 14, Lab 8)
Model adapted from : S. Butler, D. Kirschner, and S. Lenhart. Optimal control of chemotherapy affecting the
infectivity of HIV. Advances in Mathematical Population Dynamics - Molecules, Cells and Man, 6:557–69, 1997.
This model describes the the evolution of uninfected and infected (respectively \\(x_0\\) and \\(x_1\\) ) CD4⁺T cells, in the
presence of free virus particles ( \\(x_2\\) ). The control is the administration of a chemotherapy drug that affects
the infectivity of the virus. The goal is to maximize the number of uninfected CD4⁺T cells.
Note that \\(u(t) = 0\\) represents maximum therapy, while \\(u(t) = 1\\) is no therapy. We want to maximize:
.. math::
\\begin{align}
& \\max_u \\quad && \\int_0^T A x_0(t) - (1-u(t))^2 dt \\\\
& \\; \\mathrm{s.t.}\\quad && x_0'(t) = \\frac{s}{1+x_2(t)} - m_1x_0(t) + rx_0(t)\\big[1 - \\frac{x_0(t)+x_1(t)}{T_{\\mathrm{max}}} \\big],\\; x_0(0)> 0 \\\\
& && x_1'(t) = u(t)kx_2(t)x_0(t) - m_2x_1(t),\\; x_1(0)> 0 \\\\
& && x_2'(t) = Nm_2x_1(t) - m_3x_2(t),\\; x_2(0)> 0 \\\\
& && 0\\leq u(t) \\leq 1, \\; A > 0
\\end{align}
"""
def __init__(self, s=10., m_1=.02, m_2=.5, m_3=4.4, r=.03,
T_max=1500., k=.000024, N=300., x_0=(800., .04, 1.5),
A=.05, T=20.):
super().__init__(
x_0=jnp.array([
x_0[0],
x_0[1],
x_0[2],
]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1 ...) are given first,
[0., 1600.], # followed by bounds over controls (u_0, u_1,...)
[0., 100.],
[0., 100.], # all were inf before, except control
[0., 1.],
]),
terminal_cost=False,
discrete=False,
)
self.adj_T = None # Final condition over the adjoint, if any
self.s = s
"""Parameter varying the rate of generation of new CD4⁺T cells"""
self.m_1 = m_1
"""Natural death rate of uninfected CD4⁺T cells"""
self.m_2 = m_2
"""Natural death rate of infected CD4⁺T cells"""
self.m_3 = m_3
"""Natural death rate of free virus particles"""
self.r = r
"""Growth rate of CD4⁺T cells per day"""
self.T_max = T_max
"""Maximum growth of CD4⁺T cells"""
self.k = k
"""Rate of infection among CD4⁺T cells from free virus particles"""
self.N = N
"""Average number of virus particles produced before the CD4⁺T host cell dies."""
self.A = A
"""Weight parameter balancing the cost"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
x_0, x_1, x_2 = x_t
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([
self.s / (1 + x_2) - self.m_1 * x_0 + self.r * x_0 * (1 - (x_0 + x_1) / self.T_max) - u_t * self.k * x_0 * x_2,
u_t * self.k * x_0 * x_2 - self.m_2 * x_1,
self.N * self.m_2 * x_1 - self.m_3 * x_2,
])
return d_x
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None,
t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
k = params['k']
m_1 = params['m_1']
m_2 = params['m_2']
m_3 = params['m_3']
N = params['N']
r = params['r']
s = params['s']
T_max = params['T_max']
x_0, x_1, x_2 = x_t
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([
s / (1 + x_2) - m_1 * x_0 + r * x_0 * (1 - (x_0 + x_1) / T_max) - u_t * k * x_0 * x_2,
u_t * k * x_0 * x_2 - m_2 * x_1,
N * m_2 * x_1 - m_3 * x_2,
])
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return -self.A * x_t[0] + (1 - u_t) ** 2 # Maximization problem converted to minimization
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
t: Optional[jnp.ndarray] = None) -> float:
return -self.A * x_t[0] + (1 - u_t) ** 2 # No cost learning for now
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return jnp.array([
-self.A + adj_t[0] * (self.m_1 - self.r * (1 - (x_t[0] + x_t[1]) / self.T_max) + self.r * x_t[0] / self.T_max
+ u_t[0] * self.k * x_t[2]) - adj_t[1] * u_t[0] * self.k * x_t[2],
adj_t[0] * self.r * x_t[0] / self.T_max + adj_t[1] * self.m_2 - adj_t[2] * self.N * self.m_2,
adj_t[0] * (self.s / (1 + x_t[2]) ** 2 + u_t[0] * self.k * x_t[0]) - adj_t[1] * u_t[0] * self.k * x_t[0] + adj_t[
2] * self.m_3,
])
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char = 1 + 0.5 * self.k * x_t[:, 0] * x_t[:, 2] * (adj_t[:, 1] - adj_t[:, 0])
char = char.reshape(-1, 1)
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char))
| 5,535
| 40.939394
| 163
|
py
|
myriad
|
myriad-main/myriad/systems/lenhart/__init__.py
| 0
| 0
| 0
|
py
|
|
myriad
|
myriad-main/myriad/systems/lenhart/bioreactor.py
|
import gin
import jax.numpy as jnp
from typing import Union, Optional
from myriad.custom_types import Params
from myriad.systems import IndirectFHCS
@gin.configurable
class Bioreactor(IndirectFHCS): # TODO: Add resolution for z state after optimization
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 19, Lab 12)
Additional information about this kind of model can be found in A. Heinricher, S. Lenhart, and A. Solomon.
The application of optimal control methodology to a well-stirred bioreactor. Natural Remyriad Modeling, 9:61–80,
1995.
This environment is an example of a model where the cost is linear with respect to the control.
It can still be solved by the FBSM algorithm since the optimal control are of the "bang-bang" type,
i.e. it jumps from one boundary value to the other.
This environment models the evolution of a bacteria population ( \\(x(t)\\) ) that helps in the degradation of a
contaminant ( \\(z(t)\\) ) in the presence of a chemical nutrient ( \\(u(t)\\) ) that is added to boost the bacteria population
growth. In this particular problem, the fact that only a terminal cost is associated to the state variable \\(z(t)\\)
allows for the simplification of the problem into:
.. math::
\\begin{align}
&\\max_{u} \\quad &&\\int_0^T Kx(t) - u(t) dt \\\\
& \\; \\mathrm{s.t.}\\quad &&x'(t) = Gu(t)x(t) - Dx^2(t) ,\\; x(0) = x_0 \\\\
& && 0 \\leq u(t) \\leq M
\\end{align}
"""
def __init__(self, K=2., G=1., D=1., M=1., x_0=(.5, .1), T=2.):
super().__init__(
x_0=jnp.array([
x_0[0],
]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1, ...) are given first,
[0., 1.], # followed by bounds over controls (u_0, u_1, ...)
[0., M],
]),
terminal_cost=False,
discrete=False,
)
self.adj_T = None # Final condition over the adjoint, if any
self.K = K
"""Weight parameter"""
self.G = G
"""Maximum growth rate of the bacteria population"""
self.D = D
"""Natural death rate of the bacteria population"""
self.M = M
"""Physical limitation into the application of the chemical nutrient"""
def dynamics(self, x_t: jnp.ndarray,
u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None,
t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([self.G * u_t * x_t[0] - self.D * x_t[0] ** 2])
return d_x
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None,
t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
G = params['G']
D = params['D']
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([G * u_t * x_t[0] - D * x_t[0] ** 2])
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return -self.K * x_t[0] + u_t # Maximization problem converted to minimization
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
t: Optional[jnp.ndarray] = None) -> float:
return -self.K * x_t[0] + u_t # not learning cost for now
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return jnp.array([
-self.K - self.G * u_t[0] * adj_t[0] + 2 * self.D * x_t[0] * adj_t[0]
])
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
# bang-bang scenario
temp = -1 + self.G * adj_t[:, 0] * x_t[:, 0]
char = jnp.sign(temp.reshape(-1, 1)) * 2 * jnp.max(jnp.abs(self.bounds[-1])) + jnp.max(jnp.abs(self.bounds[-1]))
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char))
| 4,278
| 40.95098
| 133
|
py
|
myriad
|
myriad-main/myriad/systems/lenhart/timber_harvest.py
|
from typing import Union, Optional
import gin
import jax.numpy as jnp
import matplotlib.pyplot as plt
import seaborn as sns
from myriad.custom_types import Params
from myriad.systems import IndirectFHCS
@gin.configurable
class TimberHarvest(IndirectFHCS):
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 18, Lab 11)
Additional information can be found in Morton I. Kamien and Nancy L. Schwartz. Dynamic Optimization:
The Calculus of Variations and Optimal Control in Economics and Management. North-Holland, New York, 1991.
This environment is an example of model where the cost is linear with respect to the control.
It can still be solved by the FBSM algorithm since the optimal control are of the "bang-bang" type,
i.e., it jumps from one boundary value to the other.
In this problem we are trying to optimize tree harvesting in a timber farm, resulting in the production of
raw timber ( \\(x(t)\\) ). The harvest percentage over the land
is low enough that we can assume that there will always
be sufficiently many mature trees ready for harvest. The timber is sold immediately after production,
generating a income proportional to the production at every time t. The operators then have the choice of
reinvesting a fraction of this revenue directly into the plant ( \\(u(t)\\) ), thus stimulating future production.
But, this reinvestment comes at the price of losing potential interest over the period T if the
revenue were saved. The control problem is therefore:
.. math::
\\begin{align}
& \\max_{u} \\quad && \\int_0^T e^{-rt}x(t)[1 - u(t)] dt \\\\
& \\mathrm{s.t.}\\quad && x'(t) = kx(t)u(t) ,\\; x(0) > 0 \\\\
& && 0 \\leq u(t) \\leq 1
\\end{align}
"""
def __init__(self, r=0., k=1., x_0=100., T=5.):
super().__init__(
x_0=jnp.array([
x_0,
]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1 ...) are given first,
[0., 20_000], # followed by bounds over controls (u_0,u_1,...)
[0., 1.], # nh added the bounds
]),
terminal_cost=False,
discrete=False,
)
self.adj_T = None # Final condition over the adjoint, if any
self.r = r
"""Discount rate encouraging investment early on"""
self.k = k
"""Return constant of reinvesting into the plant, taking into account cost of labor and land"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([
self.k * x_t[0] * u_t
])
return d_x
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None,
t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
k = params['k']
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([
k * x_t[0] * u_t
])
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return -jnp.exp(-self.r * t) * x_t[0] * (1 - u_t) # Maximization problem converted to minimization
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
t: Optional[jnp.ndarray] = None) -> float:
return -jnp.exp(-self.r * t) * x_t[0] * (1 - u_t) # not learning cost function for now
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return jnp.array([
u_t[0] * (jnp.exp(-self.r * t[0]) - self.k * adj_t[0]) - jnp.exp(-self.r * t[0])
])
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
# bang-bang scenario
temp = x_t[:, 0] * (self.k * adj_t[:, 0] - jnp.exp(-self.r * t[:, 0]))
char = jnp.sign(temp.reshape(-1, 1)) * 2 * jnp.max(jnp.abs(self.bounds[-1])) + jnp.max(jnp.abs(self.bounds[-1]))
return jnp.minimum(self.bounds[-1, 1], jnp.maximum(self.bounds[-1, 0], char))
| 4,409
| 41.403846
| 118
|
py
|
myriad
|
myriad-main/myriad/systems/lenhart/glucose.py
|
import gin
import jax.numpy as jnp
from typing import Union, Optional
from myriad.custom_types import Params
from myriad.systems import IndirectFHCS
@gin.configurable
class Glucose(IndirectFHCS):
"""
Taken from: Optimal Control Applied to Biological Models, Lenhart & Workman (Chapter 16, Lab 10)
Model is presented in more details in Martin Eisen. Mathematical Methods and Models in the Biological Sciences.
Prentice Hall, Englewood Cliffs, New Jersey, 1988.
This environment models the blood glucose ( \\(x_0(t)\\) ) level of a diabetic person in the presence of injected
insulin ( \\(u(t)\\) ), along with the net hormonal concentration ( \\(x_1(t)\\) ) of insulin in the person's system.
In this model, the diabetic person is assumed to be unable to produce natural insulin via their pancreas.
Note that the model was developed for regulating blood glucose levels over a short window of time. As such, \\(T\\)
should be kept under 0.45 in order for the model to make sense.
( \\(T\\) is measured in days, so 0.45 corresponds to ~11 hours)
The goal of the control is to maintain the blood glucose level close to a desired level, \\(l\\), while also taking
into account that there is a cost associated to the treatment. Thus the objective is:
.. math::
\\begin{align}
& \\min_{u} \\quad && \\int_0^T A(x_0(t)-l)^2 + u_f(t)^2 dt \\\\
& \\; \\mathrm{s.t.}\\quad && x_0'(t) = -ax_0(t) - bx_1(t) ,\\; x_0(0) > 0 \\\\
& && x_1'(t) = -cx_1(t) + u(t) ,\\; x_1(0)=0 \\\\
& && a,b,c > 0, \\; A \\geq 0
\\end{align}
Notes
-----
x(0): Initial blood glucose level and insulin level \\((x_0(0),x_1(0))\\) \n
T: The horizon should be kept under 0.45
"""
def __init__(self, a=1., b=1., c=1., A=2., l=.5, x_0=(.75, 0.), T=.2):
super().__init__(
x_0=jnp.array([
x_0[0],
x_0[1],
]), # Starting state
x_T=None, # Terminal state, if any
T=T, # Duration of experiment
bounds=jnp.array([ # Bounds over the states (x_0, x_1, ...) are given first,
[0., 1.], # followed by bounds over controls (u_0, u_1, ...)
[0., 1.],
[0., 0.01],
]),
terminal_cost=False,
discrete=False,
)
self.adj_T = None # Final condition over the adjoint, if any
self.a = a
"""Rate of decrease in glucose level resulting of its use by the body"""
self.b = b
"""Rate of decrease in glucose level resulting from its degradation provoked by insulin"""
self.c = c
"""Rate of degradation of the insulin"""
self.A = A
"""Weight parameter balancing the objective"""
self.l = l
"""Desired level of blood glucose"""
def dynamics(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None, t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
x_0, x_1 = x_t
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([
-self.a * x_0 - self.b * x_1,
-self.c * x_1 + u_t
])
return d_x
def parametrized_dynamics(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
v_t: Optional[Union[float, jnp.ndarray]] = None,
t: Optional[jnp.ndarray] = None) -> jnp.ndarray:
a = params['a']
b = params['b']
c = params['c']
x_0, x_1 = x_t
if u_t.ndim > 0:
u_t, = u_t
d_x = jnp.array([
-a * x_0 - b * x_1,
-c * x_1 + u_t
])
return d_x
def cost(self, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray], t: Optional[jnp.ndarray] = None) -> float:
return 100_000 * (self.A * (x_t[0] - self.l) ** 2 + u_t ** 2) # multiplying by 100_000 so we can actually see it
def parametrized_cost(self, params: Params, x_t: jnp.ndarray, u_t: Union[float, jnp.ndarray],
t: Optional[jnp.ndarray] = None) -> float:
# A = params['A'] # Uncomment these and recomment the others
# l = params['l'] # if we want to also learn the cost
A = self.A
l = self.l
return 100_000 * (A * (x_t[0] - l) ** 2 + u_t ** 2) # multiplying by 100_000 so we can actually see it
def adj_ODE(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray], u_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
return jnp.array([
-2 * self.A * (x_t[0] - self.l) + adj_t[0] * self.a,
adj_t[0] * self.b + adj_t[1] * self.c
])
def optim_characterization(self, adj_t: jnp.ndarray, x_t: Optional[jnp.ndarray],
t: Optional[jnp.ndarray]) -> jnp.ndarray:
char_0 = -adj_t[:, 1] / 2
char_0 = char_0.reshape(-1, 1)
return char_0
| 4,719
| 36.165354
| 121
|
py
|
myriad
|
myriad-main/myriad/trajectory_optimizers/forward_backward_sweep.py
|
# (c) 2021 Nikolaus Howe
import jax.numpy as jnp
from jax.flatten_util import ravel_pytree
# from jax.ops import index_update
# from ipopt import minimize_ipopt
from scipy.optimize import minimize
from dataclasses import dataclass
from typing import Callable, Dict, Tuple, Union
from myriad.config import Config, HParams, OptimizerType, SystemType, IntegrationMethod, QuadratureRule
from myriad.custom_types import Solution
from myriad.nlp_solvers import solve
from myriad.systems import FiniteHorizonControlSystem, IndirectFHCS
from myriad.utils import integrate_in_parallel, integrate_time_independent, \
integrate_time_independent_in_parallel, integrate_fbsm
from myriad.trajectory_optimizers.base import IndirectMethodOptimizer
class FBSM(IndirectMethodOptimizer): # Forward-Backward Sweep Method
"""
The Forward-Backward Sweep Method, as described in Optimal Control Applied to Biological Models, Lenhart & Workman
An iterative solver that, given an initial guess over the controls, will do a forward pass to retrieve the state
variables trajectory followed by a backward pass to retrieve the adjoint variables trajectory. The optimality
characterization is then used to update the control values.
The process is repeated until convergence over the controls.
"""
def __init__(self, hp: HParams, cfg: Config, system: IndirectFHCS):
self.system = system
self.N = hp.fbsm_intervals
self.h = system.T / self.N
if system.discrete:
self.N = int(system.T)
self.h = 1
state_shape = system.x_0.shape[0]
control_shape = system.bounds.shape[0] - state_shape
x_guess = jnp.vstack((system.x_0, jnp.zeros((self.N, state_shape))))
if system.discrete:
u_guess = jnp.zeros((self.N, control_shape))
else:
u_guess = jnp.zeros((self.N + 1, control_shape))
if system.adj_T is not None:
adj_guess = jnp.vstack((jnp.zeros((self.N, state_shape)), system.adj_T))
else:
adj_guess = jnp.zeros((self.N + 1, state_shape))
self.t_interval = jnp.linspace(0, system.T, num=self.N + 1).reshape(-1, 1)
guess, unravel = ravel_pytree((x_guess, u_guess, adj_guess))
self.x_guess, self.u_guess, self.adj_guess = x_guess, u_guess, adj_guess
x_bounds = system.bounds[:-1]
u_bounds = system.bounds[-1:]
bounds = jnp.vstack((x_bounds, u_bounds))
self.x_bounds, self.u_bounds = x_bounds, u_bounds
# Additional condition if terminal condition are present
self.terminal_cdtion = False
if self.system.x_T is not None:
num_term_state = 0
for idx, x_Ti in enumerate(self.system.x_T):
if x_Ti is not None:
self.terminal_cdtion = True
self.term_cdtion_state = idx
self.term_value = x_Ti
num_term_state += 1
if num_term_state > 1:
raise NotImplementedError("Multiple states with terminal condition not supported yet")
super().__init__(hp, cfg, bounds, guess, unravel)
def reinitiate(self, a):
"""Helper function for `sequencesolver`
"""
state_shape = self.system.x_0.shape[0]
control_shape = self.system.bounds.shape[0] - state_shape
self.x_guess = jnp.vstack((self.system.x_0, jnp.zeros((self.N, state_shape))))
self.u_guess = jnp.zeros((self.N + 1, control_shape))
if self.system.adj_T is not None:
adj_guess = jnp.vstack((jnp.zeros((self.N, state_shape)), self.system.adj_T))
else:
adj_guess = jnp.zeros((self.N + 1, state_shape))
# self.adj_guess = index_update(adj_guess, (-1, self.term_cdtion_state), a)
self.adj_guess = adj_guess.at[(-1, self.term_cdtion_state)].set(a)
def solve(self) -> Solution:
"""Solve the continuous optimal problem with the Forward-Backward Sweep Method"""
if self.terminal_cdtion:
return self.sequencesolver()
n = 0
while n == 0 or self.stopping_criterion((self.x_guess, old_x), (self.u_guess, old_u), (self.adj_guess, old_adj)):
old_u = self.u_guess.copy()
old_x = self.x_guess.copy()
old_adj = self.adj_guess.copy()
self.x_guess = integrate_fbsm(self.system.dynamics, self.x_guess[0], self.u_guess, self.h, self.N,
t=self.t_interval, discrete=self.system.discrete)[-1]
self.adj_guess = integrate_fbsm(self.system.adj_ODE, self.adj_guess[-1], self.x_guess, -1 * self.h, self.N,
self.u_guess, t=self.t_interval, discrete=self.system.discrete)[-1]
u_estimate = self.system.optim_characterization(self.adj_guess, self.x_guess, self.t_interval)
# Use basic convex approximation to update the guess on u
self.u_guess = 0.5 * (u_estimate + old_u)
n = n + 1
solution = {
'x': self.x_guess,
'u': self.u_guess,
'adj': self.adj_guess
}
return solution
def sequencesolver(self) -> Solution:
"""Implement the secant method for the special case where there is a terminal value on some state variables in
addition to the initial values.
"""
self.terminal_cdtion = False
count = 0
# Adjust lambda to the initial guess
a = self.system.guess_a
self.reinitiate(a)
tmp_solution = self.solve()
x_a = tmp_solution['x']
# x_a, _, _ = self.solve()
Va = x_a[-1, self.term_cdtion_state] - self.term_value
b = self.system.guess_b
self.reinitiate(b)
tmp_solution = self.solve()
x_b = tmp_solution['x']
# x_b, _, _ = self.solve()
Vb = x_b[-1, self.term_cdtion_state] - self.term_value
while jnp.abs(Va) > 1e-10:
if jnp.abs(Va) > jnp.abs(Vb):
a, b = b, a
Va, Vb = Vb, Va
d = Va * (b - a) / (Vb - Va)
b = a
Vb = Va
a = a - d
self.reinitiate(a)
tmp_solution = self.solve()
x_a = tmp_solution['x']
# x_a, _, _ = self.solve()
Va = x_a[-1, self.term_cdtion_state] - self.term_value
count += 1
solution = {
'x': self.x_guess,
'u': self.u_guess,
'adj': self.adj_guess
}
return solution
| 6,006
| 36.779874
| 117
|
py
|
myriad
|
myriad-main/myriad/trajectory_optimizers/base.py
|
# (c) 2021 Nikolaus Howe
from __future__ import annotations
import typing
if typing.TYPE_CHECKING:
from myriad.config import Config, HParams
# from myriad.config import
import jax
import jax.numpy as jnp
import numpy as np
from jax import vmap
from jax.flatten_util import ravel_pytree
# from ipopt import minimize_ipopt
from scipy.optimize import minimize
from dataclasses import dataclass
from typing import Callable, Dict, Optional, Tuple
from myriad.config import SystemType
from myriad.nlp_solvers import solve
from myriad.systems import FiniteHorizonControlSystem, IndirectFHCS
from myriad.utils import integrate_in_parallel, integrate_time_independent, \
integrate_time_independent_in_parallel, integrate_fbsm
from myriad.custom_types import Params
@dataclass
class TrajectoryOptimizer(object):
"""
An abstract class representing an "optimizer" which can find the solution
(an optimal trajectory) to a given "system", using a direct approach.
"""
hp: HParams
"""The hyperparameters"""
cfg: Config
"""Additional hyperparemeters"""
objective: Callable[[jnp.ndarray], float]
"""Given a sequence of controls and states, calculates how "good" they are"""
parametrized_objective: Callable[[Params, jnp.ndarray], float]
constraints: Callable[[jnp.ndarray], jnp.ndarray]
"""Given a sequence of controls and states, calculates the magnitude of violations of dynamics"""
parametrized_constraints: Callable[[Params, jnp.ndarray], float]
bounds: jnp.ndarray
"""Bounds for the states and controls"""
guess: jnp.ndarray
"""An initial guess for the states and controls"""
unravel: Callable[[jnp.ndarray], Tuple]
"""Use to separate decision variable array into states and controls"""
require_adj: bool = False
"""Does this trajectory optimizer require adjoint dynamics in order to work?"""
def __post_init__(self):
if self.cfg.verbose:
# print("optimizer type", self._type)
print("hp opt type", self.hp.optimizer)
print("hp quadrature rule", self.hp.quadrature_rule)
# print(f"x_guess.shape = {self.x_guess.shape}")
# print(f"u_guess.shape = {self.u_guess.shape}")
print(f"guess.shape = {self.guess.shape}")
# print(f"x_bounds.shape = {self.x_bounds.shape}")
# print(f"u_bounds.shape = {self.u_bounds.shape}")
print(f"bounds.shape = {self.bounds.shape}")
if self.hp.system == SystemType.INVASIVEPLANT:
raise NotImplementedError("Discrete systems are not compatible with Trajectory trajectory_optimizers")
def solve(self) -> Dict[str, jnp.ndarray]:
opt_inputs = {
'objective': self.objective,
'guess': self.guess,
'constraints': self.constraints,
'bounds': self.bounds,
'unravel': self.unravel
}
return solve(self.hp, self.cfg, opt_inputs)
# TODO: fix solve of FBSM
def solve_with_params(self, params: Params, guess: Optional[jnp.ndarray] = None) -> Dict[str, jnp.ndarray]:
opt_inputs = {
'objective': (lambda xs_and_us: self.parametrized_objective(params, xs_and_us)),
'guess': self.guess,
'constraints': (lambda xs_and_us: self.parametrized_constraints(params, xs_and_us)),
'bounds': self.bounds,
'unravel': self.unravel
}
if guess is not None:
opt_inputs['guess'] = guess
return solve(self.hp, self.cfg, opt_inputs)
# NOTE: I believe FBSM doesn't work here either
# You can override these if you want to enable end-to-end planning and model learning
# def parametrized_objective(self, xs_and_us, params):
# raise NotImplementedError
# return self.objective(xs_and_us)
# def parametrized_constraints(self, xs_and_us, params):
# raise NotImplementedError
# return self.constraints(xs_and_us)
@dataclass
class IndirectMethodOptimizer(object):
"""
Abstract class for implementing indirect method trajectory_optimizers, i.e. trajectory_optimizers that relies on the Pontryagin's maximum principle
"""
hp: HParams
"""The collection of hyperparameters for the experiment"""
cfg: Config
"""Configuration options that should not impact results"""
bounds: jnp.ndarray
"""Bounds (lower, upper) over the state variables, followed by the bounds over the controls"""
guess: jnp.ndarray # Initial guess on x_t, u_t and adj_t
"""Initial guess for the state, control and adjoint variables"""
unravel: Callable[[jnp.ndarray], Tuple[jnp.ndarray, jnp.ndarray]]
"""Callable to unravel the pytree -- separate decision variable array into states and controls"""
require_adj: bool = True
"""(bool, optional) -- Does this trajectory optimizer require adjoint dynamics in order to work?"""
def solve(self) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:
"""Solve method"""
raise NotImplementedError
def stopping_criterion(self, x_iter: Tuple[jnp.ndarray, jnp.ndarray], u_iter: Tuple[jnp.ndarray, jnp.ndarray],
adj_iter: Tuple[jnp.ndarray, jnp.ndarray], delta: float = 0.001) -> bool:
"""
Criterion for stopping the optimization iterations.
"""
x, old_x = x_iter
u, old_u = u_iter
adj, old_adj = adj_iter
stop_x = jnp.abs(x).sum(axis=0) * delta - jnp.abs(x - old_x).sum(axis=0)
stop_u = jnp.abs(u).sum(axis=0) * delta - jnp.abs(u - old_u).sum(axis=0)
stop_adj = jnp.abs(adj).sum(axis=0) * delta - jnp.abs(adj - old_adj).sum(axis=0)
return jnp.min(jnp.hstack((stop_u, stop_x, stop_adj))) < 0
| 5,442
| 37.330986
| 149
|
py
|
myriad
|
myriad-main/myriad/trajectory_optimizers/__init__.py
|
# (c) 2021 Nikolaus Howe
from typing import Union
from myriad.trajectory_optimizers.base import TrajectoryOptimizer, IndirectMethodOptimizer
from myriad.trajectory_optimizers.collocation.trapezoidal import TrapezoidalCollocationOptimizer
from myriad.trajectory_optimizers.collocation.hermite_simpson import HermiteSimpsonCollocationOptimizer
from myriad.trajectory_optimizers.forward_backward_sweep import FBSM
from myriad.trajectory_optimizers.shooting import MultipleShootingOptimizer
from myriad.config import Config, HParams, QuadratureRule, OptimizerType
def get_optimizer(hp: HParams, cfg: Config, system#: Union[FiniteHorizonControlSystem, IndirectFHCS]
) -> Union[TrajectoryOptimizer, IndirectMethodOptimizer]:
""" Helper function to fetch the desired optimizer for system resolution"""
if hp.optimizer == OptimizerType.COLLOCATION:
if hp.quadrature_rule == QuadratureRule.TRAPEZOIDAL:
optimizer = TrapezoidalCollocationOptimizer(hp, cfg, system)
elif hp.quadrature_rule == QuadratureRule.HERMITE_SIMPSON:
optimizer = HermiteSimpsonCollocationOptimizer(hp, cfg, system)
else:
raise KeyError
elif hp.optimizer == OptimizerType.SHOOTING:
optimizer = MultipleShootingOptimizer(hp, cfg, system)
elif hp.optimizer == OptimizerType.FBSM:
optimizer = FBSM(hp, cfg, system)
else:
raise KeyError
return optimizer
# def __call__(self, *args, **kwargs) -> Union[FiniteHorizonControlSystem, IndirectFHCS]:
# return self.value(*args, **kwargs)
| 1,523
| 46.625
| 103
|
py
|
myriad
|
myriad-main/myriad/trajectory_optimizers/shooting.py
|
# (c) 2021 Nikolaus Howe
import jax
import jax.numpy as jnp
import numpy as np
from jax.flatten_util import ravel_pytree
from myriad.config import Config, HParams, IntegrationMethod
from myriad.custom_types import Control, Params, Timestep
from myriad.systems import FiniteHorizonControlSystem
from myriad.utils import integrate_in_parallel, integrate_time_independent, integrate_time_independent_in_parallel
from myriad.trajectory_optimizers.base import TrajectoryOptimizer
class MultipleShootingOptimizer(TrajectoryOptimizer):
def __init__(self, hp: HParams, cfg: Config, system: FiniteHorizonControlSystem, key: jax.random.PRNGKey = None):
# TODO: make the key live in the hparams
"""
An optimizer that uses performs direct multiple shooting.
For reference, see https://epubs.siam.org/doi/book/10.1137/1.9780898718577
Args:
hp: Hyperparameters
cfg: Additional hyperparameters
system: The system on which to perform the optimization
"""
num_steps = hp.intervals * hp.controls_per_interval
step_size = system.T / num_steps
interval_size = system.T / hp.intervals
state_shape = system.x_0.shape[0]
control_shape = system.bounds.shape[0] - state_shape
midpoints_const = 2 if hp.integration_method == IntegrationMethod.RK4 else 1
if key is None:
self.key = jax.random.PRNGKey(hp.seed)
else:
self.key = key
#################
# Initial Guess #
#################
# Controls
# TODO: decide if we like this way of guessing controls. If yes, then add it to the other trajectory_optimizers too.
self.key, subkey = jax.random.split(self.key)
u_lower = system.bounds[-1, 0]
u_upper = system.bounds[-1, 1]
controls_guess = jnp.zeros((midpoints_const * num_steps + 1, control_shape))
# if jnp.isfinite(u_lower) and jnp.isfinite(u_upper):
# controls_guess += jax.random.normal(subkey, (midpoints_const * num_steps + 1, control_shape)) * (
# u_upper - u_lower) * 0.05
print("the controls guess is", controls_guess.shape)
# States
if system.x_T is not None:
row_guesses = []
# For the state variables which have a required end state, interpolate between start and end;
# otherwise, use rk4 with initial controls as a first guess at intermediate and end state values
for i in range(0, len(system.x_T)):
if system.x_T[i] is not None:
row_guess = jnp.linspace(system.x_0[i], system.x_T[i], num=hp.intervals + 1).reshape(-1, 1)
else:
_, row_guess = integrate_time_independent(system.dynamics, system.x_0,
controls_guess[::midpoints_const * hp.controls_per_interval],
interval_size,
hp.intervals, hp.integration_method)
row_guess = row_guess[:, i].reshape(-1, 1)
row_guesses.append(row_guess)
x_guess = jnp.hstack(row_guesses)
else:
_, x_guess = integrate_time_independent(system.dynamics, system.x_0,
controls_guess[::midpoints_const * hp.controls_per_interval],
interval_size, hp.intervals, hp.integration_method)
guess, unravel = ravel_pytree((x_guess, controls_guess))
assert len(x_guess) == hp.intervals + 1 # we have one state decision var for each node, including start and end
self.x_guess, self.u_guess = x_guess, controls_guess
# Augment the dynamics so we can integrate cost the same way we do state
def augmented_dynamics(x_and_c: jnp.ndarray, u: float, t: float) -> jnp.ndarray:
"""
Augments the dynamics with the cost function, so that all can be integrated together
Args:
x_and_c: State and current cost (current cost doesn't affect the cost calculation)
u: Control
t: Time
custom_dynamics: Optional custom dynamics to replace system dynamics
Returns:
The cost of applying control u to state x at time t
"""
x, c = x_and_c[:-1], x_and_c[-1]
return jnp.append(system.dynamics(x, u), system.cost(x, u, t))
# Augment the dynamics so we can integrate cost the same way we do state
def parametrized_augmented_dynamics(params: Params, x_and_c: jnp.ndarray, u: Control, t: Timestep) -> jnp.ndarray:
# TODO: docstring
x, c = x_and_c[:-1], x_and_c[-1]
return jnp.append(system.parametrized_dynamics(params, x, u), system.parametrized_cost(params, x, u, t))
def reorganize_controls(us): # This still works, even for higher-order control shape
"""
Reorganize controls into per-interval arrays
Go from having controls like (num_controls + 1, control_shape) (left)
to like (hp.intervals, num_controls_per_interval + 1, control_shape) (right)
[ 1. , 1.1] [ 1. , 1.1]
[ 2. , 2.1] [ 2. , 2.1]
[ 3. , 3.1] [ 3. , 3.1]
[ 4. , 4.1] [ 4. , 4.1]
[ 5. , 5.1]
[ 6. , 6.1] [ 4. , 4.1]
[ 7. , 7.1] [ 5. , 5.1]
[ 8. , 8.1] [ 6. , 6.1]
[ 9. , 9.1] [ 7. , 7.1]
[10. , 10.1]
[ 7. , 7.1]
[ 8. , 8.1]
[ 9. , 9.1]
[10. , 10.1]
Args:
us: Controls
Returns:
Controls organized into per-interval arrays
"""
new_controls = jnp.hstack(
[us[:-1].reshape(hp.intervals, midpoints_const * hp.controls_per_interval, control_shape),
us[::midpoints_const * hp.controls_per_interval][1:][:, jnp.newaxis]])
# Needed for single shooting
if len(new_controls.shape) == 3 and new_controls.shape[2] == 1:
new_controls = new_controls.squeeze(axis=2)
return new_controls
def reorganize_times(ts):
"""
Reorganize times into per-interval arrays
Args:
ts: Times
Returns:
Times organized into per-interval arrays
"""
new_times = jnp.hstack([ts[:-1].reshape(hp.intervals, hp.controls_per_interval),
ts[::hp.controls_per_interval][1:][:, jnp.newaxis]])
return new_times
def parametrized_objective(params: Params, variables: jnp.ndarray) -> float:
# TODO: docstring
xs, us = unravel(variables)
reshaped_controls = reorganize_controls(us)
t = jnp.linspace(0., system.T, num=num_steps + 1)
t = reorganize_times(t)
starting_xs_and_costs = jnp.hstack([xs[:-1], jnp.zeros(len(xs[:-1])).reshape(-1, 1)])
def dynamics(x_and_c: jnp.ndarray, u: Control, t: Timestep):
return parametrized_augmented_dynamics(params, x_and_c, u, t)
# Integrate cost in parallel
states_and_costs, _ = integrate_in_parallel(
dynamics, starting_xs_and_costs, reshaped_controls,
step_size, hp.controls_per_interval, t, hp.integration_method)
costs = jnp.sum(states_and_costs[:, -1])
if system.terminal_cost:
last_augmented_state = states_and_costs[-1]
costs += system.terminal_cost_fn(last_augmented_state[:-1], us[-1])
return costs
def objective(variables: jnp.ndarray) -> float:
"""
Calculate the objective of a trajectory
Args:
variables: Raveled states and controls
Returns:
The objective of the trajectory
"""
# print("dynamics are", system.dynamics)
# The commented code runs faster, but only does a linear interpolation for cost.
# Better to have the interpolation match the integration scheme,
# and just use Euler / Heun if we need shooting to be faster
# xs, us = unravel(variables)
# t = jnp.linspace(0, system.T, num=N_x+1)[:-1] # Support cost function with dependency on t
# t = jnp.repeat(t, hp.controls_per_interval)
# _, x = integrate(system.dynamics, system.x_0, u, h_u, N_u)
# x = x[:-1]
# if system.terminal_cost:
# return jnp.sum(system.terminal_cost_fn(x[-1], u[-1])) + h_u * jnp.sum(vmap(system.cost)(x, u, t))
# else:
# return h_u * jnp.sum(vmap(system.cost)(x, u, t))
# ---
xs, us = unravel(variables)
reshaped_controls = reorganize_controls(us)
t = jnp.linspace(0., system.T, num=num_steps + 1)
t = reorganize_times(t)
starting_xs_and_costs = jnp.hstack([xs[:-1], jnp.zeros(len(xs[:-1])).reshape(-1, 1)])
# Integrate cost in parallel
states_and_costs, _ = integrate_in_parallel(
augmented_dynamics, starting_xs_and_costs, reshaped_controls,
step_size, hp.controls_per_interval, t, hp.integration_method)
costs = jnp.sum(states_and_costs[:, -1])
if system.terminal_cost:
last_augmented_state = states_and_costs[-1]
costs += system.terminal_cost_fn(last_augmented_state[:-1], us[-1])
return costs
def parametrized_constraints(params: Params, variables: jnp.ndarray) -> jnp.ndarray:
"""
Calculate the constraint violations of a trajectory
Args:
variables: Raveled states and controls
params: Dict of parameters for the model
Returns:
Constraint violations of trajectory
"""
def dynamics(x_t: jnp.ndarray, u_t: jnp.ndarray):
return system.parametrized_dynamics(params, x_t, u_t)
xs, us = unravel(variables)
px, _ = integrate_time_independent_in_parallel(dynamics, xs[:-1], reorganize_controls(us), step_size,
hp.controls_per_interval, hp.integration_method)
return jnp.ravel(px - xs[1:])
def constraints(variables: jnp.ndarray) -> jnp.ndarray:
"""
Calculate the constraint violations of a trajectory
Args:
variables: Raveled states and controls
Returns:
Constraint violations of trajectory
"""
xs, us = unravel(variables)
px, _ = integrate_time_independent_in_parallel(system.dynamics, xs[:-1], reorganize_controls(us), step_size,
hp.controls_per_interval, hp.integration_method)
return jnp.ravel(px - xs[1:])
############################
# State and Control Bounds #
############################
# State decision variables at every node
x_bounds = np.zeros((hp.intervals + 1, system.bounds.shape[0] - control_shape, 2))
x_bounds[:, :, :] = system.bounds[:-control_shape]
# Starting state
x_bounds[0, :, :] = jnp.expand_dims(system.x_0, 1)
# Ending state
if system.x_T is not None:
for i in range(len(system.x_T)):
if system.x_T[i] is not None:
x_bounds[-1, i, :] = system.x_T[i]
# Reshape for call to 'minimize'
x_bounds = x_bounds.reshape((-1, 2))
# Control decision variables at every node, and if RK4, also at midpoints
u_bounds = np.empty(((midpoints_const * num_steps + 1) * control_shape, 2)) # Include midpoints too
for i in range(control_shape, 0, -1):
u_bounds[(control_shape - i) * (midpoints_const * num_steps + 1):(control_shape - i + 1) * (
midpoints_const * num_steps + 1)] = system.bounds[-i]
# Reshape for call to 'minimize'
u_bounds = u_bounds.reshape((-1, 2))
# print("u bounds", u_bounds)
# Stack all bounds together for the NLP solver
bounds = jnp.vstack((x_bounds, u_bounds))
self.x_bounds, self.u_bounds = x_bounds, u_bounds
super().__init__(hp, cfg, objective, parametrized_objective, constraints, parametrized_constraints,
bounds, guess, unravel)
| 11,796
| 41.283154
| 120
|
py
|
myriad
|
myriad-main/myriad/trajectory_optimizers/collocation/hermite_simpson.py
|
# (c) 2021 Nikolaus Howe
import jax.numpy as jnp
import numpy as np
from jax import vmap
from jax.flatten_util import ravel_pytree
from typing import Tuple
from myriad.config import Config, HParams
from myriad.custom_types import Control, Controls, Cost, DState, DStates, Params, State, States, Timestep
from myriad.systems import FiniteHorizonControlSystem
from myriad.trajectory_optimizers.base import TrajectoryOptimizer
class HermiteSimpsonCollocationOptimizer(TrajectoryOptimizer):
def __init__(self, hp: HParams, cfg: Config, system: FiniteHorizonControlSystem) -> None:
"""
An optimizer that uses direct Hermite-Simpson collocation.
For reference, see https://epubs.siam.org/doi/10.1137/16M1062569.
Note that we are keeping the knot points and the midpoints together
in one big array, instead of separating them. This improves compatibility
with the other trajectory_optimizers.
Args:
hp: Hyperparameters
cfg: Additional hyperparameters
system: The system on which to perform the optimization
"""
interval_duration = system.T / hp.intervals
state_shape = system.x_0.shape[0]
control_shape = system.bounds.shape[0] - state_shape
###########################
# State and Control Guess #
###########################
# Initial guess for controls
u_guess = jnp.zeros((2 * hp.intervals + 1, control_shape))
# Initial guess for state
if system.x_T is not None:
x_guess = jnp.linspace(system.x_0, system.x_T, num=2 * hp.intervals + 1)
else:
x_guess = jnp.ones(shape=(2 * hp.intervals + 1, state_shape)) * 0.1
initial_variables = (x_guess, u_guess)
guess, unravel_decision_variables = ravel_pytree(initial_variables)
self.x_guess, self.u_guess = x_guess, u_guess
############################
# State and Control Bounds #
############################
# Bounds for states
x_bounds = np.zeros((2 * hp.intervals + 1, system.bounds.shape[0] - control_shape, 2))
x_bounds[:, :, :] = system.bounds[:-control_shape]
# Starting state
x_bounds[0, :, :] = jnp.expand_dims(system.x_0, 1)
# Ending state
if system.x_T is not None:
for i in range(len(system.x_T)):
if system.x_T[i] is not None:
x_bounds[-1, i, :] = system.x_T[i]
# Reshape for call to 'minimize'
x_bounds = x_bounds.reshape((-1, 2))
# Bounds for controls
u_bounds = np.empty(((2 * hp.intervals + 1) * control_shape, 2)) # Include midpoints too
for i in range(control_shape, 0, -1):
u_bounds[(control_shape - i) * (2 * hp.intervals + 1):(control_shape - i + 1) * (
2 * hp.intervals + 1)] = system.bounds[-i]
# Reshape for call to 'minimize'
u_bounds = u_bounds.reshape((-1, 2))
# Stack all bounds together for the NLP solver
bounds = jnp.vstack((x_bounds, u_bounds))
self.x_bounds, self.u_bounds = x_bounds, u_bounds
# Helper function
def get_start_and_next_states_and_controls(variables: jnp.ndarray) -> Tuple[States, States, States,
Controls, Controls, Controls]:
"""
Extracts start, mid, and ending arrays of decision variables
Args:
variables: Raveled state and control variables
Returns:
(start xs, mid xs, end xs, start us, mid us, end us)
"""
xs, us = unravel_decision_variables(variables)
# States
knot_point_xs = xs[::2]
start_xs = knot_point_xs[:-1]
end_xs = knot_point_xs[1:]
mid_point_xs = xs[1::2]
# Controls
knot_point_us = us[::2]
start_us = knot_point_us[:-1]
end_us = knot_point_us[1:]
mid_point_us = us[1::2]
return start_xs, mid_point_xs, end_xs, start_us, mid_point_us, end_us
# Calculates midpoint constraint on-the-fly
def hs_defect(state: State, mid_state: State, next_state: State,
control: Control, mid_control: Control, next_control: Control) -> DState:
"""
Hermite-Simpson collocation constraints
Args:
state: State at start of interval
mid_state: State at midpoint of interval
next_state: State at end of interval
control: Control at start of interval
mid_control: Control at midpoint of interval
next_control: Control at end of interval
Returns:
Hermite-Simpson defect of the interval
"""
rhs = next_state - state
lhs = (interval_duration / 6) * (system.dynamics(state, control)
+ 4 * system.dynamics(mid_state, mid_control)
+ system.dynamics(next_state, next_control))
return rhs - lhs
# Calculates midpoint constraint on-the-fly
def parametrized_hs_defect(params: Params,
state: State, mid_state: State, next_state: State,
control: Control, mid_control: Control, next_control: Control) -> DState:
"""
Hermite-Simpson collocation constraints
Args:
state: State at start of interval
mid_state: State at midpoint of interval
next_state: State at end of interval
control: Control at start of interval
mid_control: Control at midpoint of interval
next_control: Control at end of interval
params: Custom model parameters
Returns:
Hermite-Simpson defect of the interval
"""
rhs = next_state - state
lhs = (interval_duration / 6) * (system.parametrized_dynamics(params, state, control)
+ 4 * system.parametrized_dynamics(params, mid_state, mid_control)
+ system.parametrized_dynamics(params, next_state, next_control))
return rhs - lhs
def hs_interpolation(state: State, mid_state: State, next_state: State,
control: Control, mid_control: Control, next_control: Control) -> DState:
"""
Calculate Hermite-Simpson interpolation constraints
Args:
state: State at start of interval
mid_state: State at midpoint of interval
next_state: State at end of interval
control: Control at start of interval
mid_control: Control at midpoint of interval (unused)
next_control: Control at end of interval
Returns:
Interpolation constraint
"""
return (mid_state
- (1 / 2) * (state + next_state)
- (interval_duration / 8) * (system.dynamics(state, control)
- system.dynamics(next_state, next_control)))
def parametrized_hs_interpolation(params: Params,
state: State, mid_state: State, next_state: State,
control: Control, mid_control: Control, next_control: Control) -> DState:
"""
Calculate Hermite-Simpson interpolation constraints
Args:
state: State at start of interval
mid_state: State at midpoint of interval
next_state: State at end of interval
control: Control at start of interval
mid_control: Control at midpoint of interval (unused)
next_control: Control at end of interval
params: Custom model parameters
Returns:
Interpolation constraint
"""
return (mid_state
- (1 / 2) * (state + next_state)
- (interval_duration / 8) * (system.parametrized_dynamics(params, state, control)
- system.parametrized_dynamics(params, next_state, next_control)))
# This is the "J" from the tutorial (6.5)
def hs_cost(state: State, mid_state: State, next_state: State,
control: Control, mid_control: Control, next_control: Control,
start_time: Timestep, mid_time: Timestep, next_time: Timestep) -> Cost:
"""
Calculate the Hermite-Simpson cost.
Args:
state: State at start of interval
mid_state: State at midpoint of interval
next_state: State at end of interval
control: Control at start of interval
mid_control: Control at midpoint of interval
next_control: Control at end of interval
start_time: Time at start of interval
mid_time: Time at midpoint of interval
next_time: Time at end of interval
Returns:
Hermite-Simpson cost of interval
"""
return (interval_duration / 6) * (system.cost(state, control, start_time)
+ 4 * system.cost(mid_state, mid_control, mid_time)
+ system.cost(next_state, next_control, next_time))
def parametrized_hs_cost(params: Params,
state: State, mid_state: State, next_state: State,
control: Control, mid_control: Control, next_control: Control,
start_time: Timestep, mid_time: Timestep, next_time: Timestep) -> Cost:
"""
Calculate the Hermite-Simpson cost.
Args:
state: State at start of interval
mid_state: State at midpoint of interval
next_state: State at end of interval
control: Control at start of interval
mid_control: Control at midpoint of interval
next_control: Control at end of interval
start_time: Time at start of interval
mid_time: Time at midpoint of interval
next_time: Time at end of interval
params: Custom model parameters
Returns:
Hermite-Simpson cost of interval
"""
return (interval_duration / 6) * (system.parametrized_cost(params, state, control, start_time)
+ 4 * system.parametrized_cost(params, mid_state, mid_control, mid_time)
+ system.parametrized_cost(params, next_state, next_control, next_time))
#######################
# Cost and Constraint #
#######################
def objective(variables: jnp.ndarray) -> Cost:
"""
Calculate the Hermite-Simpson objective for this trajectory
Args:
variables: Raveled states and controls
Returns:
Objective of trajectory
"""
unraveled_vars = get_start_and_next_states_and_controls(variables)
all_times = jnp.linspace(0, system.T, num=2 * hp.intervals + 1) # Support cost function with dependency on t
start_and_end_times = all_times[::2]
start_times = start_and_end_times[:-1]
end_times = start_and_end_times[1:]
mid_times = all_times[1::2]
return jnp.sum(vmap(hs_cost)(*unraveled_vars, start_times, mid_times, end_times))
def parametrized_objective(params: Params, variables: jnp.ndarray) -> Cost:
"""
Calculate the Hermite-Simpson objective for this trajectory
Args:
variables: Raveled states and controls
params: Custom model parameters
Returns:
Objective of trajectory
"""
unraveled_vars = get_start_and_next_states_and_controls(variables)
all_times = jnp.linspace(0, system.T, num=2 * hp.intervals + 1) # Support cost function with dependency on t
start_and_end_times = all_times[::2]
start_times = start_and_end_times[:-1]
end_times = start_and_end_times[1:]
mid_times = all_times[1::2]
return jnp.sum(vmap(parametrized_hs_cost, in_axes=(None, 0, 0, 0, 0, 0, 0))(params,
*unraveled_vars, start_times,
mid_times, end_times))
# TODO: test to make sure this actually works ^
def hs_equality_constraints(variables: jnp.ndarray) -> DStates:
"""
Calculate the equality constraint violations for this trajectory (does not include midpoint constraints)
Args:
variables: Raveled states and controls
Returns:
Equality constraint violations of trajectory
"""
unraveled_vars = get_start_and_next_states_and_controls(variables)
return jnp.ravel(vmap(hs_defect)(*unraveled_vars))
def parametrized_hs_equality_constraints(params: Params, variables: jnp.ndarray) -> DStates:
"""
Calculate the equality constraint violations for this trajectory (does not include midpoint constraints)
Args:
variables: Raveled states and controls
params: Custom model parameters
Returns:
Equality constraint violations of trajectory
"""
unraveled_vars = get_start_and_next_states_and_controls(variables)
return jnp.ravel(vmap(parametrized_hs_defect, in_axes=(None, 0, 0, 0, 0, 0, 0))(params, *unraveled_vars))
def hs_interpolation_constraints(variables: jnp.ndarray) -> DStates:
"""
Calculate the midpoint constraint violations for this trajectory
Args:
variables: Raveled states and controls
Returns:
Midpoint constraint violations of trajectory
"""
unraveled_vars = get_start_and_next_states_and_controls(variables)
return jnp.ravel(vmap(hs_interpolation)(*unraveled_vars))
def parametrized_hs_interpolation_constraints(params: Params, variables: jnp.ndarray) -> DStates:
"""
Calculate the midpoint constraint violations for this trajectory
Args:
variables: Raveled states and controls
params: Custom model parameters
Returns:
Midpoint constraint violations of trajectory
"""
unraveled_vars = get_start_and_next_states_and_controls(variables)
return jnp.ravel(vmap(parametrized_hs_interpolation, in_axes=(None, 0, 0, 0, 0, 0, 0))(params, *unraveled_vars))
def constraints(variables: jnp.ndarray) -> DStates:
"""
Calculate all constraint violations for this trajectory
Args:
variables: Raveled states and controls
Returns:
All constraint violations of trajectory
"""
equality_defects = hs_equality_constraints(variables)
interpolation_defects = hs_interpolation_constraints(variables)
return jnp.hstack((equality_defects, interpolation_defects))
def parametrized_constraints(params: Params, variables: jnp.ndarray) -> DStates:
"""
Calculate all constraint violations for this trajectory
Args:
variables: Raveled states and controls
params: Custom model parameters
Returns:
All constraint violations of trajectory
"""
equality_defects = parametrized_hs_equality_constraints(params, variables)
interpolation_defects = parametrized_hs_interpolation_constraints(params, variables)
return jnp.hstack((equality_defects, interpolation_defects))
super().__init__(hp, cfg, objective, parametrized_objective, constraints, parametrized_constraints,
bounds, guess, unravel_decision_variables)
| 15,045
| 41.744318
| 118
|
py
|
myriad
|
myriad-main/myriad/trajectory_optimizers/collocation/__init__.py
| 0
| 0
| 0
|
py
|
|
myriad
|
myriad-main/myriad/trajectory_optimizers/collocation/trapezoidal.py
|
# (c) 2021 Nikolaus Howe
import jax.numpy as jnp
import numpy as np
from jax import vmap
from jax.flatten_util import ravel_pytree
from myriad.config import Config, HParams
from myriad.custom_types import Control, Cost, DState, Params, State, Timestep, DStates
from myriad.trajectory_optimizers.base import TrajectoryOptimizer
from myriad.systems import FiniteHorizonControlSystem
from myriad.utils import integrate_time_independent
class TrapezoidalCollocationOptimizer(TrajectoryOptimizer):
def __init__(self, hp: HParams, cfg: Config, system: FiniteHorizonControlSystem) -> None:
"""
An optimizer that uses direct trapezoidal collocation.
For reference, see https://epubs.siam.org/doi/10.1137/16M1062569
Args:
hp: Hyperparameters
cfg: Additional hyperparameters
system: The system on which to perform the optimization
"""
num_intervals = hp.intervals # Segments
h = system.T / num_intervals # Segment length
state_shape = system.x_0.shape[0]
control_shape = system.bounds.shape[0] - state_shape
# print("the control shape is", control_shape)
###########################
# State and Control Guess #
###########################
u_guess = jnp.zeros((num_intervals + 1, control_shape))
if system.x_T is not None:
# We need to handle the cases where a terminal bound is specified only for some state variables, not all
row_guesses = []
for i in range(0, len(system.x_T)):
if system.x_T[i] is not None:
row_guess = jnp.linspace(system.x_0[i], system.x_T[i], num=num_intervals + 1).reshape(-1, 1)
else:
_, row_guess = integrate_time_independent(system.dynamics, system.x_0,
u_guess, h, num_intervals, hp.integration_method)
row_guess = row_guess[:, i].reshape(-1, 1)
row_guesses.append(row_guess)
x_guess = jnp.hstack(row_guesses)
else: # no final state requirement
_, x_guess = integrate_time_independent(system.dynamics, system.x_0,
u_guess, h, num_intervals, hp.integration_method)
guess, unravel_decision_variables = ravel_pytree((x_guess, u_guess))
self.x_guess, self.u_guess = x_guess, u_guess
############################
# State and Control Bounds #
############################
# Control bounds
u_bounds = np.empty(((num_intervals + 1) * control_shape, 2))
for i in range(control_shape, 0, -1):
u_bounds[(control_shape - i) * (num_intervals + 1)
:(control_shape - i + 1) * (num_intervals + 1)] = system.bounds[-i]
# Reshape to work with NLP solver
u_bounds = u_bounds.reshape((-1, 2))
# State bounds
x_bounds = np.empty((num_intervals + 1, system.bounds.shape[0] - control_shape, 2))
x_bounds[:, :, :] = system.bounds[:-control_shape]
x_bounds[0, :, :] = np.expand_dims(system.x_0, 1)
if system.x_T is not None:
x_bounds[-control_shape, :, :] = np.expand_dims(system.x_T, 1)
# Reshape to work with NLP solver
x_bounds = x_bounds.reshape((-1, 2))
# Put control and state bounds together
bounds = jnp.vstack((x_bounds, u_bounds))
self.x_bounds, self.u_bounds = x_bounds, u_bounds
def trapezoid_cost(x_t1: State, x_t2: State,
u_t1: Control, u_t2: Control,
t1: Timestep, t2: Timestep) -> Cost:
"""
Args:
x_t1: State at start of interval
x_t2: State at end of interval
u_t1: Control at start of interval
u_t2: Control at end of interval
t1: Time at start of interval
t2: Time at end of interval
Returns:
Trapezoid cost of the interval
"""
return (h / 2) * (system.cost(x_t1, u_t1, t1) + system.cost(x_t2, u_t2, t2))
def parametrized_trapezoid_cost(params: Params,
x_t1: State, x_t2: State,
u_t1: Control, u_t2: Control,
t1: Timestep, t2: Timestep) -> Cost:
"""
Args:
x_t1: State at start of interval
x_t2: State at end of interval
u_t1: Control at start of interval
u_t2: Control at end of interval
t1: Time at start of interval
t2: Time at end of interval
params: Custom model parameters
Returns:
Trapezoid cost of the interval
"""
return (h / 2) * (system.parametrized_cost(params, x_t1, u_t1, t1)
+ system.parametrized_cost(params, x_t2, u_t2, t2))
def objective(variables: jnp.ndarray) -> Cost:
"""
The objective function.
Args:
variables: Raveled state and decision variables
Returns:
The sum of the trapezoid costs across the whole trajectory
"""
x, u = unravel_decision_variables(variables)
t = jnp.linspace(0, system.T, num=num_intervals + 1) # Support cost function with dependency on t
cost = jnp.sum(vmap(trapezoid_cost)(x[:-1], x[1:], u[:-1], u[1:], t[:-1], t[1:]))
if system.terminal_cost:
cost += jnp.sum(system.terminal_cost_fn(x[-1], u[-1]))
return cost
def parametrized_objective(params: Params, variables: jnp.ndarray) -> Cost:
"""
The objective function.
Args:
variables: Raveled state and decision variables
params: Custom model parameters
Returns:
The sum of the trapezoid costs across the whole trajectory
"""
x, u = unravel_decision_variables(variables)
t = jnp.linspace(0, system.T, num=num_intervals + 1) # Support cost function with dependency on t
cost = jnp.sum(vmap(parametrized_trapezoid_cost, in_axes=(None, 0, 0, 0, 0, 0, 0))(params,
x[:-1], x[1:],
u[:-1], u[1:],
t[:-1], t[1:]))
if system.terminal_cost:
cost += jnp.sum(system.terminal_cost_fn(x[-1], u[-1]))
return cost
# TODO: should the terminal cost function also take parameters?
# probably yes... (will need to fix this in shooting and hs too then)
def trapezoid_defect(x_t1: State, x_t2: State, u_t1: Control, u_t2: Control) -> DState:
"""
Args:
x_t1: State at start of interval
x_t2: State at end of interval
u_t1: Control at start of interval
u_t2: Control at end of interval
Returns:
Trapezoid defect of the interval
"""
left = (h / 2) * (system.dynamics(x_t1, u_t1) + system.dynamics(x_t2, u_t2))
right = x_t2 - x_t1
return left - right
def parametrized_trapezoid_defect(params: Params,
x_t1: State, x_t2: State,
u_t1: Control, u_t2: Control) -> DState:
"""
Args:
x_t1: State at start of interval
x_t2: State at end of interval
u_t1: Control at start of interval
u_t2: Control at end of interval
params: Custom model parameters
Returns:
Trapezoid defect of the interval
"""
left = (h / 2) * (system.parametrized_dynamics(params, x_t1, u_t1)
+ system.parametrized_dynamics(params, x_t2, u_t2))
right = x_t2 - x_t1
return left - right
def constraints(variables: jnp.ndarray) -> DStates:
"""
The constraints function.
Args:
variables: Raveled state and decision variables
Returns:
An array of the defects of the whole trajectory
"""
x, u = unravel_decision_variables(variables)
return jnp.ravel(vmap(trapezoid_defect)(x[:-1], x[1:], u[:-1], u[1:]))
def parametrized_constraints(params: Params, variables: jnp.ndarray) -> DStates:
"""
The constraints function.
Args:
variables: Raveled state and decision variables
params: Custom model parameters
Returns:
An array of the defects of the whole trajectory
"""
x, u = unravel_decision_variables(variables)
return jnp.ravel(vmap(parametrized_trapezoid_defect, in_axes=(None, 0, 0, 0, 0, 0, 0))(params,
x[:-1], x[1:],
u[:-1], u[1:]))
super().__init__(hp, cfg, objective, parametrized_objective, constraints, parametrized_constraints,
bounds, guess, unravel_decision_variables)
| 8,760
| 40.719048
| 110
|
py
|
myriad
|
myriad-main/myriad/experiments/e2e_sysid.py
|
# (c) 2021 Nikolaus Howe
import jax
import jax.numpy as jnp
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import optax
import pickle as pkl
from pathlib import Path
from typing import Tuple
from myriad.config import HParams, Config, SystemType, NLPSolverType
from myriad.custom_types import Params, DParams
from myriad.defaults import learning_rates, param_guesses
from myriad.trajectory_optimizers import get_optimizer
from myriad.plotting import plot
from myriad.systems import get_name
from myriad.utils import integrate_time_independent, get_state_trajectory_and_cost, get_defect
NUM_UNROLLED = 10
# NOTE: we have a choice to make about whether we consider only
# a single trajectory at a time, or if we have a whole
# batch of trajectories (each with its own start state, and
# each with its own optimal controls) from which we sample
# at each iteration of the algorithm.
def run_endtoend(hp, cfg, num_epochs=10_000):
if hp.system not in param_guesses:
print("We do not currently support that kind of system for sysid. Exiting...")
return
data_path = f'datasets/{hp.system.name}/e2e_sysid/'
Path(data_path).mkdir(parents=True, exist_ok=True)
true_us_name = 'true_opt_us'
true_xs_name = 'true_opt_xs'
params_path = f'params/{hp.system.name}/e2e_sysid/'
Path(params_path).mkdir(parents=True, exist_ok=True)
params_name = f'e2e_parametric.p'
plots_path = f'plots/{hp.system.name}/e2e_sysid/'
Path(plots_path).mkdir(parents=True, exist_ok=True)
guesses_path = f'intermediate_guesses/{hp.system.name}/e2e_sysid/'
Path(guesses_path).mkdir(parents=True, exist_ok=True)
losses_path = f'losses/{hp.system.name}/e2e_sysid/'
Path(losses_path).mkdir(parents=True, exist_ok=True)
true_system = hp.system()
optimizer = get_optimizer(hp, cfg, true_system)
# Get the true optimal controls (and state),
# which we will try to imitate
try:
with open(data_path + true_us_name, 'rb') as myfile:
true_opt_us = jnp.array(pkl.load(myfile))
with open(data_path + true_xs_name, 'rb') as myfile:
true_opt_xs = jnp.array(pkl.load(myfile))
print("successfully loaded the saved optimal trajectory")
# plt.plot(true_opt_us)
# plt.plot(true_opt_xs)
# plt.show()
except Exception as e:
print("We haven't saved the optimal trajectory for this system yet, so we'll do that now")
true_solution = optimizer.solve()
true_opt_us = true_solution['u']
print("true opt us", true_opt_us.shape)
_, true_opt_xs = integrate_time_independent(
true_system.dynamics, true_system.x_0, true_opt_us, hp.stepsize, hp.num_steps, hp.integration_method)
print("true opt xs", true_opt_xs.shape)
with open(data_path + true_us_name, 'wb') as myfile:
pkl.dump(true_opt_us, myfile)
with open(data_path + true_xs_name, 'wb') as myfile:
pkl.dump(true_opt_xs, myfile)
try:
params = pkl.load(open(params_path + params_name, 'rb'))
print("It seems we've already trained for this system, so we'll go straight to evaluation.")
except FileNotFoundError as e:
print("unable to find the params, so we'll guess "
"and then optimize and save")
# Make a guess for our parameters
params = param_guesses[hp.system]
# solution_guess = optimizer.solve_with_params(params)
# xs_and_us = solution_guess['xs_and_us']
# lmbdas = solution_guess['lambda']
xs_and_us = optimizer.guess
lmbdas = jnp.zeros_like(optimizer.constraints(optimizer.guess))
# Save the initial parameter guess so we can reset to it later
original_xs_and_us = jnp.array(xs_and_us)
original_lmbdas = jnp.array(lmbdas)
# Parameter optimizer
opt = optax.adam(hp.adam_lr) # 1e-4
opt_state = opt.init(params)
# Control/state/duals optimizer
eta_x = hp.eta_x
eta_v = hp.eta_lmbda
if hp.system in learning_rates:
eta_x = learning_rates[hp.system]['eta_x']
eta_v = learning_rates[hp.system]['eta_v']
bounds = optimizer.bounds
@jax.jit
def lagrangian(xs_and_us: jnp.ndarray, lmbdas: jnp.ndarray, params: Params) -> float:
return (optimizer.parametrized_objective(params, xs_and_us)
+ lmbdas @ optimizer.parametrized_constraints(params, xs_and_us))
@jax.jit
def step(x: jnp.ndarray, lmbda: jnp.ndarray, params: Params) -> Tuple[jnp.ndarray, jnp.ndarray]:
x_bar = jnp.clip(x - eta_x * jax.grad(lagrangian, argnums=0)(x, lmbda, params),
a_min=bounds[:, 0], a_max=bounds[:, 1])
x_new = jnp.clip(x - eta_x * jax.grad(lagrangian, argnums=0)(x_bar, lmbda, params),
a_min=bounds[:, 0], a_max=bounds[:, 1])
lmbda_new = lmbda + eta_v * jax.grad(lagrangian, argnums=1)(x_new, lmbda, params)
return x_new, lmbda_new
@jax.jit
def step_x(x: jnp.ndarray, lmbda: jnp.ndarray, params: Params) -> jnp.ndarray:
x_bar = jnp.clip(x - eta_x * jax.grad(lagrangian, argnums=0)(x, lmbda, params),
a_min=bounds[:, 0], a_max=bounds[:, 1])
x_new = jnp.clip(x - eta_x * jax.grad(lagrangian, argnums=0)(x_bar, lmbda, params),
a_min=bounds[:, 0], a_max=bounds[:, 1])
return x_new
@jax.jit
def step_lmbda(x: jnp.ndarray, lmbda: jnp.ndarray, params: Params) -> jnp.ndarray:
lmbda_new = lmbda + eta_v * jax.grad(lagrangian, argnums=1)(x, lmbda, params)
return lmbda_new
jac_x = jax.jit(jax.jacobian(step_x, argnums=(0, 1, 2)))
jac_lmbda = jax.jit(jax.jacobian(step_lmbda, argnums=(0, 1, 2)))
jac_x_p = jax.jit(jax.jacobian(step_x, argnums=2))
jac_lmbda_p = jax.jit(jax.jacobian(step_lmbda, argnums=2))
# Update the primals and duals using the current model,
# and also return the Jacobians of them with respect to the parameters.
@jax.jit
def many_steps_grad(xs_and_us: jnp.ndarray, lmbdas: jnp.ndarray, params: Params) -> DParams:
zx = jac_x_p(xs_and_us, lmbdas, params)
zx = jax.tree_util.tree_map(lambda x: x * 0., zx)
zlmbda = jac_lmbda_p(xs_and_us, lmbdas, params)
zlmbda = jax.tree_util.tree_map(lambda x: x * 0., zlmbda)
@jax.jit
def body_fun(i, vars):
xs_and_us, lmbdas, zx, zlmbda = vars
dx, dlmbda, dp = jac_x(xs_and_us, lmbdas, params)
x_part = jax.tree_util.tree_map(lambda el: dx @ el, zx)
lmbda_part = jax.tree_util.tree_map(lambda el: dlmbda @ el, zlmbda)
zx = jax.tree_util.tree_map(lambda a, b, c: a + b + c, dp, x_part, lmbda_part)
# zx = jax.tree_util.tree_multimap(lambda a, b, c: a + b + c, dp, x_part, lmbda_part)
xs_and_us = step_x(xs_and_us, lmbdas, params)
dx, dlmbda, dp = jac_lmbda(xs_and_us, lmbdas, params)
x_part = jax.tree_util.tree_map(lambda el: dx @ el, zx)
lmbda_part = jax.tree_util.tree_map(lambda el: dlmbda @ el, zlmbda)
zlmbda = jax.tree_util.tree_map(lambda a, b, c: a + b + c, dp, x_part, lmbda_part)
# zlmbda = jax.tree_util.tree_multimap(lambda a, b, c: a + b + c, dp, x_part, lmbda_part)
lmbdas = step_lmbda(xs_and_us, lmbdas, params)
return xs_and_us, lmbdas, zx, zlmbda
xs_and_us, lmbdas, zx, zlmbda = jax.lax.fori_loop(0, NUM_UNROLLED, body_fun, (xs_and_us, lmbdas, zx, zlmbda))
return zx
# Imitation loss for the optimal controls
# def control_imitation_loss(params: Params, xs_and_us: jnp.ndarray, lmbdas: jnp.ndarray, epoch: int):
# for _ in range(NUM_UNROLLED):
# xs_and_us, lmbdas = step(xs_and_us, lmbdas, params)
# xs, us = optimizer.unravel(xs_and_us)
#
# diff = us - true_opt_us
# sq_diff = diff * diff
# long = jnp.mean(sq_diff, axis=1) # average all axes except time
# discount = (1 - 1 / (1 + jnp.exp(2 + 0.00001 * epoch))) ** jnp.arange(len(long))
# if hp.system in [SystemType.MOUNTAINCAR, SystemType.PENDULUM]:
# print("min discount", discount[-1])
# else:
# discount = 1.
# return jnp.mean(long * discount)
@jax.jit
def simple_imitation_loss(xs_and_us: jnp.ndarray, epoch):
xs, us = optimizer.unravel(xs_and_us)
diff = us - true_opt_us
sq_diff = diff * diff
long = jnp.mean(sq_diff, axis=1) # average all axes except time
discount = (1 - 1 / (1 + jnp.exp(2 + 0.000001 * epoch))) ** jnp.arange(len(long))
if hp.system in [SystemType.BACTERIA, SystemType.MOUNTAINCAR, SystemType.CARTPOLE, SystemType.PENDULUM]:
print("min discount", discount[-1])
else:
discount = 1.
return jnp.mean(long * discount)
@jax.jit
def lookahead_update(params: Params, opt_state: optax.OptState,
xs_and_us: jnp.ndarray, lmbdas: jnp.ndarray, epoch: int) -> Tuple[Params, optax.OptState]:
dx_dp = many_steps_grad(xs_and_us, lmbdas, params)
dJ_dx = jax.grad(simple_imitation_loss)(xs_and_us, epoch)
dJdp = jax.tree_util.tree_map(lambda x: dJ_dx @ x, dx_dp)
updates, opt_state = opt.update(dJdp, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, opt_state
# Use these to record the guesses
ts = []
primal_guesses = []
dual_guesses = []
# Use this to record the losses
imitation_losses = []
print("starting guess of params", params)
save_and_reset_time = 1_000
record_things_time = 10
for epoch in range(num_epochs):
if epoch % save_and_reset_time == 0:
# Check if the next params already exist (in which case we go straight to them)
try:
cur_params_name = f'{epoch + save_and_reset_time}e2e_parametric.p'
params = pkl.load(open(params_path + cur_params_name, 'rb'))
print("It seems we've already trained up to the next epoch, so we'll go straight there")
epoch += save_and_reset_time
continue
except FileNotFoundError as e:
pass
# Record more around the very start
record_things_time = 1
print("saving current params")
pkl.dump(params, open(params_path + str(epoch) + params_name, 'wb'))
print("saving guesses so far")
pkl.dump(ts, open(guesses_path + str(epoch) + 'ts', 'wb'))
pkl.dump(primal_guesses, open(guesses_path + str(epoch) + 'primals', 'wb'))
pkl.dump(dual_guesses, open(guesses_path + str(epoch) + 'duals', 'wb'))
print("saving imitation losses")
pkl.dump(imitation_losses, open(losses_path + str(epoch) + '_losses', 'wb'))
print("resetting guess")
# Reset the guess to a different random small amount
hp.key, subkey = jax.random.split(hp.key)
optimizer = get_optimizer(hp, cfg, true_system)
xs_and_us = optimizer.guess
lmbdas = original_lmbdas
if epoch % record_things_time == 0:
# Only have high-density recording around the start of each guess
if epoch >= 10:
record_things_time = 50
# Save the current params
ts.append(epoch)
primal_guesses.append(np.array(xs_and_us))
dual_guesses.append(np.array(lmbdas))
# Save the current imitation loss
cur_loss = simple_imitation_loss(xs_and_us, epoch)
imitation_losses.append(cur_loss)
print("loss", cur_loss)
print("params", params)
# Take step(s) with the model
for _ in range(NUM_UNROLLED):
xs_and_us, lmbdas = step(xs_and_us, lmbdas, params)
# Now update to prepare for next steps
params, opt_state = lookahead_update(params, opt_state, xs_and_us, lmbdas, epoch)
print("Saving the final params", params)
pkl.dump(params, open(params_path + params_name, 'wb'))
print("Saving the final guesses")
pkl.dump(ts, open(guesses_path + str(num_epochs - 1) + 'ts', 'wb'))
pkl.dump(primal_guesses, open(guesses_path + str(num_epochs - 1) + 'primals', 'wb'))
pkl.dump(dual_guesses, open(guesses_path + str(num_epochs - 1) + 'duals', 'wb'))
print("Saving the final losses")
pkl.dump(imitation_losses, open(losses_path + str(num_epochs - 1) + 'losses', 'wb'))
#######################
# Imitation loss plot #
#######################
b = matplotlib.get_backend()
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
plt.rcParams["figure.figsize"] = (4, 3.3)
# Plot the imitation loss over time (params are already open, but putting this here for clarity)
params = pkl.load(open(params_path + params_name, 'rb'))
print("the params are", params)
losses = pkl.load(open(losses_path + str(num_epochs - 1) + 'losses', 'rb'))
ts = pkl.load(open(guesses_path + str(num_epochs - 1) + 'ts', 'rb'))
primal_guesses = pkl.load(open(guesses_path + str(num_epochs - 1) + 'primals', 'rb'))
# Plot the imitation loss over time
plt.plot(ts, losses)
plt.grid()
plt.xlabel('iteration')
plt.ylabel('imitation loss')
plt.title("Imitation Loss")
plt.tight_layout()
plt.savefig(plots_path + f'imitation_loss.{cfg.file_extension}', bbox_inches='tight')
plt.close()
#####################
# Control loss plot #
#####################
# Plot the control performance over time
print("Plotting control performance over time")
true_state_trajectory, optimal_cost = get_state_trajectory_and_cost(hp, true_system, true_system.x_0, true_opt_us)
parallel_get_state_trajectory_and_cost = jax.vmap(get_state_trajectory_and_cost, in_axes=(None, None, None, 0))
ar_primal_guesses = jnp.array(primal_guesses)
# parallel_unravel = jax.vmap(optimizer.unravel, in_axes=0)
# long_uus = jnp.array(long_uus)
# _, uus = parallel_unravel(ar_primal_guesses)
# NOTE: for some reason, the above approach stopped working with a jax update.
# Manually going through the loop works fine.
long_uus = []
for uu in ar_primal_guesses:
long_uus.append(optimizer.unravel(uu)[1])
uus = jnp.array(long_uus)
xxs, cs = parallel_get_state_trajectory_and_cost(hp, true_system, true_system.x_0, uus)
plt.axhline(optimal_cost, color='grey', linestyle='dashed')
plt.plot(ts, cs)
plt.grid()
plt.xlabel('iteration')
plt.ylabel('cost')
plt.title("Trajectory Cost")
plt.tight_layout()
plt.savefig(plots_path + f'control_performance.{cfg.file_extension}', bbox_inches='tight')
plt.close()
#######################
# Final planning plot #
#######################
# Plot the performance of planning with the final model
print("Plotting final planning performance")
hp = HParams(nlpsolver=NLPSolverType.EXTRAGRADIENT)
cfg = Config()
true_system = hp.system()
optimizer = get_optimizer(hp, cfg, true_system)
learned_solution = optimizer.solve_with_params(params)
learned_x, learned_c = get_state_trajectory_and_cost(hp, true_system, true_system.x_0, learned_solution['u'])
learned_defect = get_defect(true_system, learned_x)
true_x, true_c = get_state_trajectory_and_cost(hp, true_system, true_system.x_0, true_opt_us)
true_defect = get_defect(true_system, true_x)
plot(hp, true_system,
data={'x': true_opt_xs,
'other_x': learned_x,
'u': true_opt_us,
'other_u': learned_solution['u'],
'cost': true_c,
'other_cost': learned_c,
'defect': true_defect,
'other_defect': learned_defect},
labels={'x': ' (true state from controls planned with true model)',
'other_x': ' (true state from controls planned with learned model)',
'u': ' (planned with true model)',
'other_u': ' (planned with learned model)'},
styles={'x': '-',
'other_x': 'x-',
'u': '-',
'other_u': 'x-'},
widths={'x': 3,
'other_x': 1,
'u': 3,
'other_u': 1},
save_as=plots_path + f'planning_with_model.{cfg.file_extension}',
figsize=cfg.figsize)
#####################
# Decision var plot #
#####################
# Plot showing how the guess converges to the optimal trajectory
matplotlib.use(b)
plt.rcParams["figure.figsize"] = (7, 5.6)
print("Plotting convergence")
title = get_name(hp)
if title is not None:
plt.suptitle(title)
plt.suptitle(r"Intermediate Trajectories" + r" $-$ " + title)
plt.subplot(2, 1, 1)
plt.grid()
# Plot intermediate controls with transparency
for xs in xxs:
plt.plot(xs, color='orange', alpha=0.01)
plt.ylabel('state (x)')
plt.plot(true_opt_xs, label="true state from controls planned with true model", lw=3)
# Plot the final state curve
plt.plot(xxs[-1], 'x-', label="true state from final controls")
plt.legend(loc='upper right')
# Plot controls
plt.subplot(2, 1, 2)
plt.plot(true_opt_us, label="planned with true model", lw=3)
# Plot intermediate controls with transparency
for us in uus:
plt.plot(us, color='orange', alpha=0.01)
# Plot the final control curve
plt.ylabel('control (u)')
plt.xlabel('time (s)')
plt.plot(uus[-1], 'x-', label="controls at the end of training")
plt.legend(loc='upper right')
plt.grid()
plt.tight_layout()
plt.savefig(plots_path + 'e2e_cool_plot.png', dpi=300, bbox_inches='tight')
if __name__ == "__main__":
hp, cfg = HParams(), Config()
run_endtoend(hp, cfg)
| 17,345
| 37.892377
| 116
|
py
|
myriad
|
myriad-main/myriad/experiments/node_mle_sysid.py
|
# (c) Nikolaus Howe 2021
from __future__ import annotations
import csv
import jax
import jax.numpy as jnp
import numpy as np
import pickle as pkl
from pathlib import Path
from myriad.config import Config, HParams, IntegrationMethod
from myriad.neural_ode.create_node import NeuralODE
from myriad.neural_ode.node_training import train
from myriad.trajectory_optimizers import get_optimizer
from myriad.plotting import plot, plot_losses
from myriad.systems.neural_ode.node_system import NodeSystem
from myriad.utils import get_state_trajectory_and_cost, integrate_time_independent, sample_x_init
def run_node_mle_sysid(hp: HParams, cfg: Config) -> None:
# Instantiate the neural ode. We'll keep updating its parameters
# (either by training or by loading from save)
node = NeuralODE(hp, cfg)
true_opt = get_optimizer(hp, cfg, node.system)
true_solution = true_opt.solve()
learned_system = NodeSystem(node, node.system)
learned_opt = get_optimizer(hp, cfg, learned_system)
official_trained_for = 0
for experiment_number in range(0, hp.num_experiments):
print(f"### EXPERIMENT {experiment_number} ###")
official_trained_for += hp.num_epochs
actual_trained_for = official_trained_for # overwrite if not exact (if we have the info)
losses_path = f'losses/{hp.system.name}/node_mle_sysid/'
Path(losses_path).mkdir(parents=True, exist_ok=True)
losses_name = f'noise_{hp.noise_level}_smoothed_{hp.to_smooth}_' \
f'{hp.train_size}_{hp.val_size}_{hp.test_size}_exp_{experiment_number}.l'
params_path = f'params/{hp.system.name}/node_mle_sysid/'
Path(params_path).mkdir(parents=True, exist_ok=True) # create the directory if it doesn't already exist
params_name = f'noise_{hp.noise_level}_smoothed_{hp.to_smooth}_' \
f'{hp.train_size}_{hp.val_size}_{hp.test_size}_exp_{experiment_number}.p'
plots_path = f'plots/{hp.system.name}/node_mle_sysid/'
progress_plots_path = f'plots/{hp.system.name}/node_mle_sysid/progress_plots/'
Path(progress_plots_path).mkdir(parents=True, exist_ok=True)
plots_name = f'noise_{hp.noise_level}_smoothed_{hp.to_smooth}_' \
f'{hp.train_size}_{hp.val_size}_{hp.test_size}_exp_{experiment_number}'
data_path = f'datasets/{hp.system.name}/node_mle_sysid/'
Path(data_path).mkdir(parents=True, exist_ok=True) # create the directory if it doesn't already exist
data_name = f'noise_{hp.noise_level}_smoothed_{hp.to_smooth}_' \
f'{hp.train_size}_{hp.val_size}_{hp.test_size}_exp_{experiment_number}.d'
try:
node.load_params(params_path + params_name)
except FileNotFoundError as e:
print("unable to find the params file, so we'll train our"
"model to learn some, and then save them")
# If the datasets already exist, then load it.
# If it doesn't then we augment the dataset
try:
node.load_dataset(data_path + data_name)
except FileNotFoundError as e:
print("unable to find dataset for this experiment, so we'll make our own")
# (unless it's the first one, in which case we use the
# dataset which is already there)
if experiment_number > 0:
print("We will now augment the dataset. Currently, the train data are", node.train_data.shape)
node.augment_datasets()
print("After augmenting, the train data are", node.train_data.shape)
# Save the dataset for the next time
pkl.dump(node.full_data, open(data_path + data_name, 'wb'))
# Now, we train on this dataset, until early stopping
node.key, subkey = jax.random.split(node.key)
# Perform the training
end_epoch = train(node, save_as=progress_plots_path + plots_name, extension=cfg.file_extension)
actual_trained_for = official_trained_for - node.hp.num_epochs + end_epoch
# TODO: do we care how many epochs it trained for?
# start_epoch += increment * node.train_size
# Save the learned parameters
node.save_params(params_path + params_name)
# Save the losses for this experiment
# print("saving train and val losses for experiment", experiment_number)
with open(losses_path + losses_name, 'w') as f:
write = csv.writer(f)
for i, t in enumerate(node.losses['ts']):
write.writerow([t, node.losses['train_loss'][i], node.losses['validation_loss'][i]])
if cfg.plot:
#################
# Planning plot #
#################
learned_solution = learned_opt.solve_with_params(node.params)
true_x, true_c = get_state_trajectory_and_cost(hp, node.system, node.system.x_0, true_solution['u'])
if node.system.x_T is not None:
true_defect = []
for i, s in enumerate(true_x[-1]):
if node.system.x_T[i] is not None:
true_defect.append(s - node.system.x_T[i])
true_defect = np.array(true_defect)
else:
true_defect = None
learned_x, learned_c = get_state_trajectory_and_cost(hp, node.system, node.system.x_0, learned_solution['u'])
if node.system.x_T is not None:
learned_defect = []
for i, s in enumerate(learned_x[-1]):
if node.system.x_T[i] is not None:
learned_defect.append(s - node.system.x_T[i])
learned_defect = np.array(learned_defect)
else:
learned_defect = None
planning_plot_name = f'noise_{hp.noise_level}_smoothed_{hp.to_smooth}_' \
f'{hp.train_size}_{hp.val_size}_{hp.test_size}_' \
f'exp_{experiment_number}_planning.{cfg.file_extension}'
plot(hp, node.system,
data={'x': true_x,
'other_x': learned_x,
'u': true_solution['u'],
'other_u': learned_solution['u'],
'cost': true_c,
'other_cost': learned_c,
'defect': true_defect,
'other_defect': learned_defect},
labels={'x': ' (true state from controls planned with true model)',
'other_x': ' (true state from controls planned with learned model)',
'u': ' (planned with true model)',
'other_u': ' (planned with learned model)'},
styles={'x': '-',
'other_x': 'x-',
'u': '-',
'other_u': 'x-'},
widths={'x': 3,
'other_x': 1,
'u': 3,
'other_u': 1},
save_as=plots_path + planning_plot_name,
figsize=cfg.figsize)
###############
# Losses plot #
###############
print("plotting losses for experiment", experiment_number)
losses_plot_name = f'noise_{hp.noise_level}_smoothed_{hp.to_smooth}_' \
f'{hp.train_size}_{hp.val_size}_{hp.test_size}_' \
f'exp_{experiment_number}_training.{cfg.file_extension}'
if cfg.plot:
plot_losses(node.hp, losses_path + losses_name, save_as=plots_path + losses_plot_name)
###################
# Prediction plot #
###################
x_0 = sample_x_init(hp, n_batch=1)[0] # remove the leading (batch) axis
print("x0", x_0.shape, x_0)
us = np.random.uniform(low=node.system.bounds[-1, 0],
high=node.system.bounds[-1, 1],
size=(hp.num_steps + 1, hp.control_size))
us = jnp.array(us)
_, predicted_states1 = integrate_time_independent(
node.system.dynamics, x_0, us, hp.stepsize, hp.num_steps, IntegrationMethod.HEUN
)
_, predicted_states2 = integrate_time_independent(
learned_system.dynamics, x_0, us, hp.stepsize, hp.num_steps, IntegrationMethod.HEUN
)
Path(plots_path).mkdir(parents=True, exist_ok=True)
save_name = f'noise_{hp.noise_level}_smoothed_{hp.to_smooth}_' \
f'{hp.train_size}_{hp.val_size}_{hp.test_size}_exp_{experiment_number}' \
f'_prediction.{cfg.file_extension}'
plot(hp, node.system,
data={'x': predicted_states1,
'other_x': predicted_states2,
'u': us},
labels={'x': ' (true state trajectory)',
'other_x': ' (state trajectory predicted by learned model)',
'u': ' (chosen uniformly at random)'},
styles={'x': '-',
'other_x': '-x',
'u': '-'},
widths={'x': 3,
'other_x': 1,
'u': 1},
save_as=plots_path + save_name,
figsize=cfg.figsize)
| 8,692
| 42.034653
| 115
|
py
|
myriad
|
myriad-main/myriad/experiments/__init__.py
| 0
| 0
| 0
|
py
|
|
myriad
|
myriad-main/myriad/experiments/mle_sysid.py
|
# (c) 2021 Nikolaus Howe
from __future__ import annotations
import csv
import jax
import jax.numpy as jnp
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import optax
import pickle as pkl
from pathlib import Path
from typing import Dict, Tuple, Union
from myriad.config import Config, HParams, IntegrationMethod, SystemType
from myriad.defaults import param_guesses
from myriad.trajectory_optimizers import get_optimizer
from myriad.plotting import plot, plot_losses
from myriad.utils import integrate_time_independent, integrate_time_independent_in_parallel, \
get_state_trajectory_and_cost, get_defect, sample_x_init, generate_dataset
def run_mle_sysid(hp: HParams, cfg: Config) -> None:
if hp.system not in param_guesses:
print("We do not currently support that kind of system for sysid. Exiting...")
return
test_system = hp.system()
# Create, or load, a train and validation (and test, unused) set.
dataset_size = hp.train_size + hp.val_size + hp.test_size
file_path = f'datasets/{hp.system.name}/mle_sysid/'
Path(file_path).mkdir(parents=True, exist_ok=True)
file_name = f'noise_{hp.noise_level}_{hp.train_size}_{hp.val_size}_{hp.test_size}.d'
try:
dataset = pkl.load(open(file_path + file_name, 'rb'))
dataset = jnp.array(dataset)
print("loaded the dataset from file")
except FileNotFoundError as e:
print("unable to find the file, so we're making our own")
dataset = generate_dataset(hp, cfg)
pkl.dump(dataset, open(file_path + file_name, 'wb'))
assert dataset.shape == (dataset_size, hp.num_steps + 1, hp.state_size + hp.control_size)
if cfg.verbose:
print("full dataset", dataset.shape)
assert np.isfinite(dataset).all()
# Perform the learning
train_set, val_set, test_set = dataset[:hp.train_size], dataset[hp.train_size:-hp.test_size], dataset[-hp.test_size:]
if cfg.verbose:
print("train set", train_set.shape)
print("val set", val_set.shape)
print("test set", test_set.shape)
losses_path = f'losses/{hp.system.name}/mle_sysid/'
Path(losses_path).mkdir(parents=True, exist_ok=True)
losses_name = f'noise_{hp.noise_level}_{hp.train_size}_{hp.val_size}_{hp.test_size}.l'
params_path = f'params/{hp.system.name}/mle_sysid/'
Path(params_path).mkdir(parents=True, exist_ok=True)
params_name = f'noise_{hp.noise_level}_smoothed_{hp.to_smooth}_{hp.train_size}_{hp.val_size}_{hp.test_size}.p'
plots_path = f'plots/{hp.system.name}/mle_sysid/'
Path(plots_path).mkdir(parents=True, exist_ok=True)
plots_name = f'noise_{hp.noise_level}_{hp.train_size}_{hp.val_size}_{hp.test_size}'
try:
if cfg.load_params_if_saved:
params = pkl.load(open(params_path + params_name, 'rb'))
print("loaded params from file")
else:
raise FileNotFoundError
except FileNotFoundError as e:
print("unable to find the params file, so we'll train "
"our model to learn some, and then save them")
# Make an initial guess for the system parameters
params = param_guesses[hp.system]
# Initialize optimizer
opt = optax.adam(1e-3)
opt_state = opt.init(params)
# Calculate the MSE between the simulated trajectory and the real one
@jax.jit
def loss(given_params, dataset, epoch):
# print("the given params are", given_params)
def dynamics(x, u):
return test_system.parametrized_dynamics(given_params, x, u)
train_xs = dataset[:, :, :hp.state_size]
train_us = dataset[:, :, hp.state_size:]
start_xs = train_xs[:, 0, :]
# if cfg.verbose:
# print("train xs", train_xs.shape)
# print("train us", train_us.shape)
# print("start train xs", start_xs.shape)
_, predicted_states = integrate_time_independent_in_parallel(
dynamics, start_xs, train_us, hp.stepsize, hp.num_steps, IntegrationMethod.HEUN
)
# assert jnp.isfinite(predicted_states).all()
# if cfg.verbose:
# print("the predicted states are", predicted_states.shape)
# print(predicted_states)
# Calculate the loss, using a discount factor
# to incentivize learning the earlier part of the
# trajectory first. This seems to avoid local minima.
# print('predicted', predicted_states.shape)
# print('true', train_xs.shape)
diff = predicted_states - train_xs
sq_diff = diff * diff
long = jnp.mean(sq_diff, axis=(0, 2)) # average all axes except time
discount = (1 - 1 / (1 + jnp.exp(2 + 0.000001 * epoch))) ** jnp.arange(len(long))
if hp.system in [SystemType.BACTERIA, SystemType.MOUNTAINCAR, SystemType.CARTPOLE]:
print("min discount", discount[-1])
else:
discount = 1.
return jnp.mean(long * discount)
# Gradient descent on the loss function already in scope
@jax.jit
def update(params: Dict[str, Union[float, jnp.ndarray]],
opt_state: optax.OptState, minibatch: jnp.ndarray,
epoch: int) \
-> Tuple[Dict[str, Union[float, jnp.ndarray]], optax.OptState]:
grads = jax.grad(loss)(params, minibatch, epoch)
updates, opt_state = opt.update(grads, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, opt_state
# MLE train
epochs = []
train_losses = []
val_losses = []
best_val_loss = None
best_params = None
check_frequency = 500
count = 0
for epoch in range(hp.num_epochs * 10):
if epoch % check_frequency == 0:
cur_loss = loss(params, train_set, epoch)
val_loss = loss(params, val_set, epoch)
epochs.append(epoch)
train_losses.append(cur_loss)
val_losses.append(val_loss)
if cfg.verbose:
print("loss", cur_loss)
print("val loss", val_loss)
if np.isnan(cur_loss):
print("current params", params)
print("train set", train_set)
with open('t_set', 'wb') as afile:
pkl.dump(train_set, afile)
raise SystemExit
# print("params", params)
# writer.add_scalar('loss/train', cur_loss, epoch)
# writer.add_scalar('loss/val', val_loss, epoch)
# Break if we have converged
if best_val_loss is None or val_loss < best_val_loss:
best_val_loss = val_loss
best_params = params
count = 0
else:
if count > hp.early_stop_threshold:
print("stopping early at epoch", epoch)
break
# If we're still going, increase the count
count += check_frequency
if epoch % 2500 == 0:
# Plot the situation
first_xs = train_set[0, :, :hp.state_size]
first_us = train_set[0, :, hp.state_size:]
@jax.jit
def dynamics(x, u):
return test_system.parametrized_dynamics(params, x, u)
_, predicted_states = integrate_time_independent(
dynamics, first_xs[0], first_us, hp.stepsize, hp.num_steps, IntegrationMethod.HEUN
)
# if cfg.verbose:
# print("plotting xs", first_xs.shape)
# print("plotting us", first_us.shape)
# Plot states
plt.subplot(2, 1, 1)
plt.plot(first_xs, label="true xs")
plt.plot(predicted_states, label="predicted xs")
plt.legend()
# Plot controls
plt.subplot(2, 1, 2)
plt.plot(first_us, label="true us")
plt.legend()
# Save the plot
plt.savefig(f"{plots_path + plots_name}_epoch_{epoch}.png")
plt.close()
# Update the params
params, opt_state = update(params, opt_state, train_set, epoch)
print("saving the final params", best_params)
pkl.dump(best_params, open(params_path + params_name, 'wb'))
print("saving the train and val losses")
with open(losses_path + losses_name, 'w') as f:
write = csv.writer(f)
for i, ep in enumerate(epochs):
write.writerow([ep, train_losses[i], val_losses[i]])
# Use the best params for the plotting, etc.
params = best_params
print("the final params are", params)
# Now we compare the performance when using the learned model for planning,
# compared with the performance of using the original model for planning.
true_system = hp.system()
learned_system = hp.system(**params)
# Uncomment the following 12 lines if you want to verify the performance on the
# dataset that was used for training
# (note: this should _not_ be used as a form of evaluation!)
# dataset = pkl.load(open(file_path, 'rb'))
# dataset = jnp.array(dataset)
# first_xs = dataset[0, :, :state_size]
# first_us = dataset[0, :, state_size:]
# _, predicted_states1 = integrate_time_independent(
# p1.dynamics, first_xs[0], first_us, stepsize, num_steps, IntegrationMethod.HEUN
# )
# _, predicted_states2 = integrate_time_independent(
# p2.dynamics, first_xs[0], first_us, stepsize, num_steps, IntegrationMethod.HEUN
# )
# print("first xs", first_xs.shape)
# print("first us", first_us.shape)
# Test imitation on random controls and a random start point
x_0 = sample_x_init(hp, n_batch=1)[0] # remove the leading (batch) axis
print("x0", x_0.shape, x_0)
us = np.random.uniform(low=true_system.bounds[-1, 0],
high=true_system.bounds[-1, 1],
size=(hp.num_steps + 1, hp.control_size))
us = jnp.array(us)
_, predicted_states1 = integrate_time_independent(
true_system.dynamics, x_0, us, hp.stepsize, hp.num_steps, IntegrationMethod.HEUN
)
_, predicted_states2 = integrate_time_independent(
learned_system.dynamics, x_0, us, hp.stepsize, hp.num_steps, IntegrationMethod.HEUN
)
save_path = f'plots/{hp.system.name}/mle_sysid/'
Path(save_path).mkdir(parents=True, exist_ok=True)
save_name = f'{hp.train_size}_{hp.val_size}_' \
f'noise_{hp.noise_level}_{hp.test_size}_prediction.{cfg.file_extension}'
plot(hp, true_system,
data={'x': predicted_states1,
'other_x': predicted_states2,
'u': us},
labels={'x': ' (true state trajectory)',
'other_x': ' (state trajectory predicted by learned model)',
'u': ' (chosen uniformly at random)'},
styles={'x': '-',
'other_x': 'x-',
'u': '-'},
widths={'x': 3,
'other_x': 1,
'u': 1},
save_as=save_path + save_name,
figsize=cfg.figsize)
# plt.figure(figsize=(9, 7))
# plt.plot(predicted_states1, '.', label="true")
# plt.plot(predicted_states2, label="predicted")
# plt.title("Imitation")
# plt.legend()
# plt.show()
#
# Perform optimal control using the learned dynamics and the real dynamics
true_opt = get_optimizer(hp, cfg, true_system)
true_solution = true_opt.solve()
learned_opt = get_optimizer(hp, cfg, learned_system)
learned_solution = learned_opt.solve()
true_x, true_c = get_state_trajectory_and_cost(hp, true_system, true_system.x_0, true_solution['u'])
true_defect = get_defect(true_system, true_x)
learned_x, learned_c = get_state_trajectory_and_cost(hp, true_system, true_system.x_0, learned_solution['u'])
learned_defect = get_defect(true_system, learned_x)
save_path = f'plots/{hp.system.name}/mle_sysid/'
Path(save_path).mkdir(parents=True, exist_ok=True)
save_name = f'noise_{hp.noise_level}_{hp.train_size}_{hp.val_size}_{hp.test_size}_planning.{cfg.file_extension}'
plot(hp, true_system,
data={'x': true_x,
'other_x': learned_x,
'u': true_solution['u'],
'other_u': learned_solution['u'],
'cost': true_c,
'other_cost': learned_c,
'defect': true_defect,
'other_defect': learned_defect},
labels={'x': ' (true state from controls planned with true model)',
'other_x': ' (true state from controls planned with learned model)',
'u': ' (planned with true model)',
'other_u': ' (planned with learned model)'},
styles={'x': '-',
'other_x': 'x-',
'u': '-',
'other_u': 'x-'},
widths={'x': 3,
'other_x': 1,
'u': 3,
'other_u': 1},
save_as=save_path + save_name,
figsize=cfg.figsize)
losses_plot_name = f'noise_{hp.noise_level}_{hp.train_size}_{hp.val_size}_{hp.test_size}' \
f'_training.{cfg.file_extension}'
plot_losses(hp, losses_path + losses_name, save_as=save_path + losses_plot_name)
| 12,626
| 36.247788
| 119
|
py
|
myriad
|
myriad-main/myriad/experiments/node_e2e_sysid.py
|
# (c) 2021 Nikolaus Howe
import jax
import jax.numpy as jnp
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import optax
import pickle as pkl
from pathlib import Path
from typing import Tuple
from myriad.config import HParams, Config, NLPSolverType
from myriad.defaults import learning_rates, param_guesses
from myriad.neural_ode.create_node import NeuralODE
from myriad.custom_types import Params, DParams
from myriad.trajectory_optimizers import get_optimizer
from myriad.plotting import plot
from myriad.systems.neural_ode.node_system import NodeSystem
from myriad.systems import get_name
from myriad.utils import integrate_time_independent, get_state_trajectory_and_cost, get_defect
def run_node_endtoend(hp, cfg, num_epochs=10_000, load_specific_epoch_params=None):
if hp.system not in param_guesses:
print("We do not currently support that kind of system for sysid. Exiting...")
return
data_path = f'datasets/{hp.system.name}/node_e2e_sysid/'
Path(data_path).mkdir(parents=True, exist_ok=True)
true_us_name = 'true_opt_us'
true_xs_name = 'true_opt_xs'
params_path = f'params/{hp.system.name}/node_e2e_sysid/'
Path(params_path).mkdir(parents=True, exist_ok=True)
params_name = f'node_e2e.p'
plots_path = f'plots/{hp.system.name}/node_e2e_sysid/'
Path(plots_path).mkdir(parents=True, exist_ok=True)
guesses_path = f'intermediate_guesses/{hp.system.name}/node_e2e_sysid/'
Path(guesses_path).mkdir(parents=True, exist_ok=True)
losses_path = f'losses/{hp.system.name}/node_e2e_sysid/'
Path(losses_path).mkdir(parents=True, exist_ok=True)
node = NeuralODE(hp, cfg, mle=False)
true_system = hp.system() # use the default params here
true_optimizer = get_optimizer(hp, cfg, true_system)
node_system = NodeSystem(node=node, true_system=true_system)
node_optimizer = get_optimizer(hp, cfg, node_system)
# Get the true optimal controls (and state),
# which we will try to imitate
try:
with open(data_path + true_us_name, 'rb') as myfile:
true_opt_us = jnp.array(pkl.load(myfile))
with open(data_path + true_xs_name, 'rb') as myfile:
true_opt_xs = jnp.array(pkl.load(myfile))
print("successfully loaded the saved optimal trajectory")
except Exception as e:
print("We haven't saved the optimal trajectory for this system yet, so we'll do that now")
true_solution = true_optimizer.solve()
true_opt_us = true_solution['u']
print("true opt us", true_opt_us.shape)
_, true_opt_xs = integrate_time_independent(
true_system.dynamics, true_system.x_0, true_opt_us, hp.stepsize, hp.num_steps, hp.integration_method)
print("true opt xs", true_opt_xs.shape)
with open(data_path + true_us_name, 'wb') as myfile:
pkl.dump(true_opt_us, myfile)
with open(data_path + true_xs_name, 'wb') as myfile:
pkl.dump(true_opt_xs, myfile)
try:
node.load_params(params_path + params_name)
print("It seems we've already trained for this system, so we'll go straight to evaluation.")
except FileNotFoundError as e:
print("unable to find the params, so we'll guess "
"and then optimize and save")
xs_and_us = true_optimizer.guess
lmbdas = jnp.zeros_like(true_optimizer.constraints(true_optimizer.guess))
# As a sanity check, use the true optimal controls and see if we diverge from them
# opt_xs_and_us = pkl.load(open('bleble', 'rb'))
# print('xs_and_us', xs_and_us.shape)
# Save these so we can reset them later
original_xs_and_us = jnp.array(xs_and_us)
original_lmbdas = jnp.array(lmbdas)
# Parameter optimization
opt = optax.adam(1e-4)
opt_state = opt.init(node.params)
# Control/state/duals optimizer
eta_x = hp.eta_x
eta_v = hp.eta_lmbda
if hp.system in learning_rates:
eta_x = learning_rates[hp.system]['eta_x']
eta_v = learning_rates[hp.system]['eta_v']
bounds = true_optimizer.bounds
@jax.jit
def lagrangian(xs_and_us: jnp.ndarray, lmbdas: jnp.ndarray, params: Params) -> float:
return (node_optimizer.parametrized_objective(params, xs_and_us)
+ lmbdas @ node_optimizer.parametrized_constraints(params, xs_and_us))
@jax.jit
def step(x: jnp.ndarray, lmbda: jnp.ndarray, params: Params) -> Tuple[jnp.ndarray, jnp.ndarray]:
x_bar = jnp.clip(x - eta_x * jax.grad(lagrangian, argnums=0)(x, lmbda, params),
a_min=bounds[:, 0], a_max=bounds[:, 1])
x_new = jnp.clip(x - eta_x * jax.grad(lagrangian, argnums=0)(x_bar, lmbda, params),
a_min=bounds[:, 0], a_max=bounds[:, 1])
lmbda_new = lmbda + eta_v * jax.grad(lagrangian, argnums=1)(x_new, lmbda, params)
return x_new, lmbda_new
@jax.jit
def step_x(x: jnp.ndarray, lmbda: jnp.ndarray, params: Params) -> jnp.ndarray:
x_bar = jnp.clip(x - eta_x * jax.grad(lagrangian, argnums=0)(x, lmbda, params),
a_min=bounds[:, 0], a_max=bounds[:, 1])
x_new = jnp.clip(x - eta_x * jax.grad(lagrangian, argnums=0)(x_bar, lmbda, params),
a_min=bounds[:, 0], a_max=bounds[:, 1])
return x_new
@jax.jit
def step_lmbda(x: jnp.ndarray, lmbda: jnp.ndarray, params: Params) -> jnp.ndarray:
lmbda_new = lmbda + eta_v * jax.grad(lagrangian, argnums=1)(x, lmbda, params)
return lmbda_new
jac_x = jax.jit(jax.jacobian(step_x, argnums=(0, 1, 2)))
jac_lmbda = jax.jit(jax.jacobian(step_lmbda, argnums=(0, 1, 2)))
jac_x_p = jax.jit(jax.jacobian(step_x, argnums=2))
jac_lmbda_p = jax.jit(jax.jacobian(step_lmbda, argnums=2))
@jax.jit
def many_steps_grad(xs_and_us: jnp.ndarray, lmbdas: jnp.ndarray, params: Params) -> DParams:
zx = jac_x_p(xs_and_us, lmbdas, params)
zx = jax.tree_util.tree_map(lambda x: x * 0., zx)
zlmbda = jac_lmbda_p(xs_and_us, lmbdas, params)
zlmbda = jax.tree_util.tree_map(lambda x: x * 0., zlmbda)
@jax.jit
def body_fun(i, vars):
xs_and_us, lmbdas, zx, zlmbda = vars
dx, dlmbda, dp = jac_x(xs_and_us, lmbdas, params)
x_part = jax.tree_util.tree_map(lambda el: jnp.tensordot(dx, el, axes=(1, 0)), zx)
lmbda_part = jax.tree_util.tree_map(lambda el: jnp.tensordot(dlmbda, el, axes=(1, 0)), zlmbda)
zx = jax.tree_util.tree_map(lambda a, b, c: a + b + c, dp, x_part, lmbda_part) # multimap
xs_and_us = step_x(xs_and_us, lmbdas, params)
dx, dlmbda, dp = jac_lmbda(xs_and_us, lmbdas, params)
x_part = jax.tree_util.tree_map(lambda el: jnp.tensordot(dx, el, axes=(1, 0)), zx)
lmbda_part = jax.tree_util.tree_map(lambda el: jnp.tensordot(dlmbda, el, axes=(1, 0)), zlmbda)
zlmbda = jax.tree_util.tree_map(lambda a, b, c: a + b + c, dp, x_part, lmbda_part) # multimap
lmbdas = step_lmbda(xs_and_us, lmbdas, params)
return xs_and_us, lmbdas, zx, zlmbda
xs_and_us, lmbdas, zx, zlmbda = jax.lax.fori_loop(0, hp.num_unrolled, body_fun, (xs_and_us, lmbdas, zx, zlmbda))
return zx
# @jax.jit
# def control_imitation_loss(params: Params, init_xs_and_us: jnp.ndarray, init_lmbdas: jnp.ndarray):
# xs_and_us_new, lmbda_new = step(init_xs_and_us, init_lmbdas, params)
# xs, us = true_optimizer.unravel(xs_and_us_new)
# return jnp.mean((us - true_opt_us) ** 2) # same loss as "Diff. MPC"
@jax.jit
def simple_imitation_loss(xs_and_us: jnp.ndarray, epoch: int):
xs, us = true_optimizer.unravel(xs_and_us)
diff = us - true_opt_us
sq_diff = diff * diff
long = jnp.mean(sq_diff, axis=1)
discount = (1 - 1 / (1 + jnp.exp(2 + 0.000001 * epoch))) ** jnp.arange(len(long))
if hp.system in []:
print("min discount", discount[-1])
else:
discount = 1.
return jnp.mean(long * discount)
@jax.jit
def lookahead_update(params: Params, opt_state: optax.OptState, xs_and_us: jnp.ndarray,
lmbdas: jnp.ndarray, epoch: int) -> Tuple[Params, optax.OptState]:
dloop_dp = many_steps_grad(xs_and_us, lmbdas, params)
dx_dloop = jax.grad(simple_imitation_loss)(xs_and_us, epoch)
dJdp = jax.tree_util.tree_map(lambda x: jnp.tensordot(dx_dloop, x, axes=(0, 0)), dloop_dp)
updates, opt_state = opt.update(dJdp, opt_state)
new_params = optax.apply_updates(params, updates)
return new_params, opt_state
# Use to record the guesses
ts = []
primal_guesses = []
dual_guesses = []
# Use to record the losses
imitation_losses = []
# print("true params", true_params)
# print("starting guess of params", node.params)
# u_lower = true_system.bounds[hp.state_size:, 0]
# u_upper = true_system.bounds[hp.state_size:, 1]
record_things_time = 10
save_and_reset_time = 1000
for epoch in range(num_epochs):
if epoch % save_and_reset_time == 0:
# Record more around the very start
record_things_time = 1
print("saving current params")
pkl.dump(node.params, open(params_path + str(epoch) + params_name, 'wb'))
print("saving guesses so far")
pkl.dump(ts, open(guesses_path + str(epoch) + 'ts', 'wb'))
pkl.dump(primal_guesses, open(guesses_path + str(epoch) + 'primals', 'wb'))
pkl.dump(dual_guesses, open(guesses_path + str(epoch) + 'duals', 'wb'))
print("saving imitation losses")
pkl.dump(imitation_losses, open(losses_path + str(epoch) + '_losses', 'wb'))
print("resetting guess")
# Reset the guess to a different random small amount
hp.key, subkey = jax.random.split(hp.key)
optimizer = get_optimizer(hp, cfg, true_system)
xs_and_us = optimizer.guess
lmbdas = original_lmbdas
if epoch % record_things_time == 0:
# Only have high-density recording around the start of each guess
if epoch >= 10:
record_things_time = 10
xs, cur_us = true_optimizer.unravel(xs_and_us)
plt.ion()
fig = plt.figure()
# if a_plt is None:
ax1 = fig.add_subplot(211)
a_plt = ax1.plot(true_opt_xs, label="true opt xs")
b_plt = ax1.plot(xs, label="xs from given controls")
plt.legend()
ax2 = fig.add_subplot(212)
c_plt = ax2.plot(true_opt_us, label="true opt us")
d_plt = ax2.plot(cur_us, label="current us")
plt.legend()
# plt.show()
# else:
# b_plt[0].set_ydata(predicted_states[:, 0])
# b_plt[1].set_ydata(predicted_states[:, 1])
# b_plt = ax1.plot(np.sin(np.arange(epoch, epoch+10)), label="predicted xs")
plt.savefig(f"{plots_path}progress_epoch_{epoch}.png")
plt.close()
fig.canvas.draw()
fig.canvas.flush_events()
# Save the current params
ts.append(epoch)
primal_guesses.append(np.array(xs_and_us))
dual_guesses.append(np.array(lmbdas))
# Save the current imitation loss
cur_loss = simple_imitation_loss(xs_and_us, epoch)
imitation_losses.append(cur_loss)
print(epoch, "loss", cur_loss)
# Take step(s) with the model
for _ in range(hp.num_unrolled):
xs_and_us, lmbdas = step(xs_and_us, lmbdas, node.params)
# Use the new technique for updating
node.params, opt_state = lookahead_update(node.params, opt_state, xs_and_us, lmbdas, epoch)
print("Saving the final params", node.params)
pkl.dump(node.params, open(params_path + params_name, 'wb'))
print("Saving the final guesses")
pkl.dump(ts, open(guesses_path + str(num_epochs - 1) + 'ts', 'wb'))
pkl.dump(primal_guesses, open(guesses_path + str(num_epochs - 1) + 'primals', 'wb'))
pkl.dump(dual_guesses, open(guesses_path + str(num_epochs - 1) + 'duals', 'wb'))
print("Saving the final losses")
pkl.dump(imitation_losses, open(losses_path + str(num_epochs - 1) + 'losses', 'wb'))
if cfg.plot:
# Plot the imitation loss over time # params are already open, but putting this here for clarity
if load_specific_epoch_params is not None:
node.load_params(params_path + str(load_specific_epoch_params) + params_name)
losses = pkl.load(open(losses_path + str(load_specific_epoch_params) + '_losses', 'rb'))
ts = pkl.load(open(guesses_path + str(load_specific_epoch_params) + 'ts', 'rb'))
primal_guesses = pkl.load(open(guesses_path + str(load_specific_epoch_params) + 'primals', 'rb'))
else:
node.load_params(params_path + params_name)
losses = pkl.load(open(losses_path + str(num_epochs - 1) + 'losses', 'rb'))
ts = pkl.load(open(guesses_path + str(num_epochs - 1) + 'ts', 'rb'))
primal_guesses = pkl.load(open(guesses_path + str(num_epochs - 1) + 'primals', 'rb'))
# Check the lengths
if len(losses) % 100 == 0: # 10000:
print("clipping losses")
losses = losses[:-1]
if len(ts) % 100 == 0: # == 10000:
print("clipping ts")
ts = ts[:-1]
if len(primal_guesses) % 100 == 0: # == 10000:
print("clipping primal guesses")
primal_guesses = primal_guesses[:-1]
# assert len(losses) == 999
# assert len(ts) == 999
# assert len(primal_guesses) == 999
##################
# Imitation loss #
##################
b = matplotlib.get_backend()
matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})
plt.rcParams["figure.figsize"] = (4, 3.3)
# Plot the imitation loss over time
plt.plot(ts, losses)
plt.grid()
plt.xlabel('iteration')
plt.ylabel('imitation loss')
plt.title("Imitation Loss")
plt.tight_layout()
plt.savefig(plots_path + f'imitation_loss.{cfg.file_extension}', bbox_inches='tight')
plt.close()
#######################
# Control performance #
#######################
true_state_trajectory, optimal_cost = get_state_trajectory_and_cost(hp, true_system, true_system.x_0, true_opt_us)
# Plot the control performance over time
print("Plotting control performance over time")
parallel_get_state_trajectory_and_cost = jax.vmap(get_state_trajectory_and_cost, in_axes=(None, None, None, 0))
parallel_unravel = jax.vmap(true_optimizer.unravel, in_axes=0)
ar_primal_guesses = np.array(primal_guesses)
_, uus = parallel_unravel(ar_primal_guesses)
xxs, cs = parallel_get_state_trajectory_and_cost(hp, true_system, true_system.x_0, uus)
plt.axhline(optimal_cost, color='grey', linestyle='dashed')
plt.plot(ts, cs)
plt.grid()
plt.xlabel('iteration')
plt.ylabel('cost')
plt.title("Trajectory Cost")
plt.tight_layout()
plt.savefig(plots_path + f'control_performance.{cfg.file_extension}', bbox_inches='tight')
plt.close()
# Save the plot
# plt.savefig(f"{plots_path + plots_name}_epoch_{epoch}.png")
# plt.close()
# Plot the performance of planning with the final model
print("Plotting final planning performance")
hp = HParams(nlpsolver=NLPSolverType.EXTRAGRADIENT)
cfg = Config()
node_optimizer = get_optimizer(hp, cfg, node_system)
learned_solution = node_optimizer.solve_with_params(node.params)
learned_x, learned_c = get_state_trajectory_and_cost(hp, true_system, true_system.x_0, learned_solution['u'])
learned_defect = get_defect(true_system, learned_x)
true_x, true_c = get_state_trajectory_and_cost(hp, true_system, true_system.x_0, true_opt_us)
true_defect = get_defect(true_system, true_x)
plot(hp, true_system,
data={'x': true_opt_xs,
'other_x': learned_x,
'u': true_opt_us,
'other_u': learned_solution['u'],
'cost': true_c,
'other_cost': learned_c,
'defect': true_defect,
'other_defect': learned_defect},
labels={'x': ' (true state from controls planned with true model)',
'other_x': ' (true state from controls planned with learned model)',
'u': ' (planned with true model)',
'other_u': ' (planned with learned model)'},
styles={'x': '-',
'other_x': 'x-',
'u': '-',
'other_u': 'x-'},
widths={'x': 3,
'other_x': 1,
'u': 3,
'other_u': 1},
save_as=plots_path + f'planning_with_model.{cfg.file_extension}',
figsize=cfg.figsize)
#####################
# Decision var plot #
#####################
# Plot showing how the guess converges to the optimal trajectory
matplotlib.use(b)
plt.rcParams["figure.figsize"] = (7, 5.6)
print("Plotting convergence")
# plt.suptitle("Intermediate Trajectories")
title = get_name(hp)
if title is not None:
plt.suptitle(title)
plt.suptitle(r"Intermediate Trajectories" + r" $-$ " + title)
plt.subplot(2, 1, 1)
plt.grid()
# Plot intermediate controls with transparency
for xs in xxs:
plt.plot(xs, color='orange', alpha=0.01)
plt.ylabel('state (x)')
plt.plot(true_opt_xs, label="true state from controls planned with true model", lw=3)
# Plot the final state curve
plt.plot(xxs[-1], 'x-', label="true state from final controls")
plt.legend(loc='upper right')
# Plot controls
plt.subplot(2, 1, 2)
plt.plot(true_opt_us, label="planned with true model", lw=3)
# Plot intermediate controls with transparency
for us in uus:
plt.plot(us, color='orange', alpha=0.01)
# Plot the final control curve
plt.ylabel('control (u)')
plt.xlabel('time (s)')
plt.plot(uus[-1], 'x-', label="controls at the end of training")
plt.legend(loc='upper right')
plt.grid()
plt.tight_layout()
plt.savefig(plots_path + 'node_e2e_cool_plot.png', dpi=300, bbox_inches='tight')
if __name__ == "__main__":
hp = HParams()
cfg = Config()
run_node_endtoend(hp, cfg)
| 18,018
| 38.342795
| 118
|
py
|
plotspec
|
plotspec-master/h2.py
|
from pdb import set_trace as stop #Use stop() for debugging
#from scipy import *
from pylab import *
from matplotlib.backends.backend_pdf import PdfPages #For outputting a pdf with multiple pages (or one page)
from mpl_toolkits.mplot3d import Axes3D #For making 3D plots
from astropy.modeling import models, fitting #Import astropy models and fitting for fitting linear functions for tempreatures (e.g. rotation temp)
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
from scipy.stats import linregress
import copy
from scipy.linalg import lstsq
from bottleneck import *
from astropy.io import ascii
from numpy import random
from numpy.random import randn
#from numba import jit #Import numba
#Global variables, modify
default_single_temp = 1500.0 #K
default_single_temp_y_intercept = 22.0
alpha = arange(0.0, 10.0, 0.01) #Save range of power laws to fit extinction curve [A_lambda = A_lambda0 * (lambda/lambda0)^alpha
lambda0 = 2.12 #Wavelength in microns for normalizing xgthe power law exctinoction curve, here it is set to the K-badn at 2.12 um
wave_thresh = 0.2 #Set wavelength threshold (here 0.1 um) for trying to measure extinction, we need the line pairs to be far enough apart we can get a handle on the extinction
#Global variables, do not modify
#cloudy_dir = '/Volumes/home/CLOUDY/'
#cloudy_dir = '/Volumes/IGRINS_Data/CLOUDY/'
#cloudy_dir = '/Users/kfkaplan/Desktop/CLOUDY/'
cloudy_dir = '/Volumes/IGRINS_Data_Backup/CLOUDY/'
# cloudy_dir = '/Users/kkaplan1/Desktop/workathome_igrins_data/CLOUDY/'
data_dir = 'data/' #Directory where H2 data is stored for cloudy
# energy_table = data_dir + 'energy_X.dat' #Name of table where Cloudy stores data on H2 electronic ground state rovibrational energies
# transition_table = data_dir + 'transprob_X.dat' #Name of table where Cloudy stores data on H2 transition probabilities (Einstein A coeffs.)
energy_table = data_dir + 'roueff_2019_energies.dat' #Path to table that stores data on H2 electronic ground state rovibrational energies from Table 2 in Roueff et al. (2019)
roueff_2019_table = data_dir + 'roueff_2019_table2.tsv' #Path to table storing theoretical molecular data from Table 2 in Roueff et al. (2019)
k = 0.69503476 #Botlzmann constant k in units of cm^-1 K^-1 (from http://physics.nist.gov/cuu/Constants/index.html)
h = 6.6260755e-27 #Plank constant in erg s, used for converting energy in wave numbers to cgs
c = 2.99792458e10 #Speed of light in cm s^-1, used for converting energy in wave numbers to cgs
#Make array of color names
max_color = 15.0 #Set maximum color index
color_gradient = cm.jet(arange(max_color)/max_color) #Set up color list from a canned color bar found in python
color_list = ['black','gray','darkorange','blue','red','green','orange','magenta','darkgoldenrod','purple','deeppink','darkolivegreen', 'cyan','yellow','beige']
symbol_list = ['o','v','8','x','s','*','h','D','^','8','1','o','o','o','o','o','o','o'] #Symbol list for rotation ladders on black and white Boltzmann plot
#for c in matplotlib.colors.cnames:
#color_list.append(c)
# def plot_with_subtracted_temperature(transitions):
# # row and column sharing
# f, axs = subplots(8, 2, sharex='col', sharey='row')v
# for x in range(8):
# axs[i]
#This function is similar to bootstrapping but the values of y are varied in each iteration by a random amount based on their 1-sigma errors
#I have no idea if this is statistically valid but we'll find out if it works
# def wiggle_bootstrap_temp_fit(x, y, guess, sigma, bounds, n=10000):
# b = zeros(n) #Define arrays that will hold the results
# T = zeros(n)
# b_error = zeros(n)
# T_error = zeros(n)
# #wiggled_y = zeros(len(y))
# for i in range(n): #Loop through each iteration
# #for j in range(len(y)):
# # wiggled_y[j] = gauss(y[j], sigma[j])
# wiggled_y = y + randn(len(y))*sigma
# fit, cov = curve_fit(single_temp_func, x, wiggled_y, guess, bounds=bounds) #Run curve_fit with y varied
# b[i], T[i] = fit #Grab the results
# #b_err, T_err = sqrt(diag(cov))
# return mean(b), mean(T), std(b), std(T) #Return the results
def fit_exponential_for_temp(x, y, sigma, n=10000, guess=array([500.0, 2000.0, 100.0])):
# guess = array([500.0, 2000.0, 1.0]) #Setup initial guesses and bounds for curve_fit
upper_bound = array([1e9, 2e7, 1e5])
lower_bound = array([0.,0., 0.,])
goodpix = isfinite(y) & (y != 0.) #Mask unused or bad pixels
x_filtered = x[goodpix]
y_filtered = y[goodpix]
sample_size = len(y_filtered)
sigma_filtered = sigma[goodpix]
fit, cov = curve_fit(exponential_form_temp_func, x_filtered, y_filtered, p0=guess, bounds=[lower_bound, upper_bound])
A, T, b = fit
A_err, T_err, b_err = sqrt(diag(cov))
# fit_init = models.Exponential1D(amplitude=A, tau=-T)#+ models.Const1D(amplitude=b)
# fitter =T_err fitting.LevMarLSQFitter(calc_uncertainties=True)
# for i in range(10): #Iterate a few times to ensure a good fit
# fit = fitter(fit_init, x_filtered, y_filtered, weights=1/sigma_filtered)
# fit_init = models.Exponential1D(amplitude=fit.amplitude, tau=fit.tau)#+ models.Const1D(amplitude=fit.amplitude_1)
# T = -fit.tau.value
# try:
# T_err = fit.stds['tau']
# except:
# T_err = nan
# b = log(fit.amplitude)
# try:
# b_err = log(fit.stds['amplitude'])
# except:
# b_err = nan
# breakpoint()
# best_fit = copy.deepcopy(fit)
# if sample_size > 2: #Jackknife sanity check
# a = arange(sample_size) #Jackknife test
# T_array = zeros(sample_size-1)
# A_array = zeros(sample_size-1)
# for i in range(sample_size-1):
# resample = concatenate([a[:i], a[(i+1):]]) #random_integers(0, sample_size-1, sample_size)
# fit = fitter(fit_init, x_filtered[resample], y_filtered[resample], weights=1/sigma_filtered[resample])
# T_array[i] = -fit.tau.value
# A_array[i] = fit.amplitude.value
# print('Jackknife technique sanity check')
# print('b = ', mean(log(A_array)), '+/-', std(log(A_array)))
# print('T = ', mean(T_array), '+/-', std(T_array))
# n = 10000 #Wobble bootstrap or flux resampling test
# T_array = zeros(n)
# A_array = zeros(n)
# best_fit_y = best_fit(x_filtered)
# weights = 1/sigma_filtered
# for i in range(n):
# #vary_by = randn(sample_size)*sigma_filtered
# #sigma_varied_by = sqrt(sigma_filtered**2 + vary_by**2)
# try:
# fit = fitter(fit_init, x_filtered, best_fit_y+randn(sample_size)*sigma_filtered, weights=weights)
# T_array[i] = -fit.tau.value
# A_array[i] = fit.amplitude.value
# except:
# print('Ooops a bad fit was found. Moving on.')
# print('Wobble bootstrap sanity check')
# goodfits = T_array > 0.
# print('b = ', mean(log(A_array[goodfits])), '+/-', std(log(A_array[goodfits])))
# print('T = ', mean(T_array[goodfits]), '+/-', std(T_array[goodfits]))
#breakpoint()
return b, T, b_err, T_err
def bootstrap_temp_fit(x, y, guess, n=10000):
b = zeros(n) #Define arrays that will hold the results
T = zeros(n)
x_new = (x[:,newaxis] * ones(shape(y))).flatten()
y_new = y.flatten()
goodpix = isfinite(y_new)
x_new = x_new[goodpix]
y_new = y_new[goodpix]
sample_size = len(y_new)
line_init = models.Linear1D(intercept=guess[0], slope=guess[1])
fitter = fitting.LinearLSQFitter(calc_uncertainties=False)
for i in range(n): #Loop through each iteration
print(i,'/',n)
sample = random_integers(0, sample_size-1, sample_size)
fit = fitter(line_init, x_new[sample], y_new[sample])
b[i] = fit.intercept.value
T[i] = -1.0/fit.slope.value
breakpoint()
return mean(b), mean(T), std(b), std(T) #Return the results
def get_surface(h2obj, v_range=[2,13], s2n_cut=-1.0): #Find and plot the "fundamental plane"
x = h2obj.J.u #Set up x,y,z for all data points
y = h2obj.V.u
z = log(h2obj.N)
#surf = find_surface(x,y,z) #Fit surface
# Fit the data using astropy.modeling
i = (h2obj.s2n > s2n_cut) & (h2obj.N > 0.) & (h2obj.V.u >= v_range[0]) & (h2obj.V.u <= v_range[1]) #Find datapoints in high enough vibration states
j = (h2obj.s2n > s2n_cut) & (h2obj.N > 0.) #Find all useful datapoints
p_init = models.Polynomial2D(degree=1)
fit_p = fitting.LevMarLSQFitter()
p = fit_p(p_init, x[i], y[i] ,z[i])
print(p)
stop()
surf_obj = make_line_list() #Set up H2 line object to store results from fit
surf_obj.N = e**p(surf_obj.J.u, surf_obj.V.u) #Store results from fit
return surf_obj #Return H2 object storing surface fit
# def find_surface(x, y, z, iterations=10): #Iteratively fit surface
# tot_delta_z = zeros(len(z)) #Store all changes in delta_z, and x and y slopes
# z = copy.deepcopy(z) #Make sure we don't modify the original
# for i in range(iterations): #Loop through number of iterations
# #delta_z = median(z) #Find delta z
# #z = z - delta_z #Subtract delta z
# #stop()
# #tot_delta_z = tot_delta_z + delta_z #Store total change in z direction
# fit_x = linregress(x, z) #Do al inear fit to x and get the difference
# delta_z = x*fit_x.slope + fit_x.intercept
# #stop()
# z = z - delta_z #Subtract delta z
# tot_delta_z = tot_delta_z + delta_z #Store total change in z direction
# fit_y = linregress(y, z)
# delta_z = y*fit_y.slope + fit_y.intercept
# #stop()
# z = z - delta_z #Subtract delta z
# tot_delta_z = tot_delta_z + delta_z #Store total change in z direction
# stop()
# return(tot_delta_z) #Return the z value of the surface
def import_black_and_van_dishoeck(): #Read in line intensities for model 14 from Black & van Dishoeck (1987) table 3, then set column densities to that model
file_name = 'data/black_and_van_dishoeck_1987_table3.dat' #Name of electronic table
labels = loadtxt(file_name, usecols=(0,), dtype='str', unpack=True, delimiter='\t') #Read in H2 line labels
intensities = loadtxt(file_name, usecols=(1,), dtype='float', unpack=True, delimiter='\t') #Read in intensities of each line for model 14
model = make_line_list() #Create object
model.read_model(labels, intensities) #Stick intenities into this line list object
model.calculate_column_density() #Calculate column densities from model 14
model.normalize()
return model #Return object
#Read in an ascii file in the format line \t flux \t sigma, normalize, and caclulate column densities from the fluxs given
def import_data(file_name, normalize_to='5-3 O(3)'):
labels = loadtxt(file_name, usecols=(0,), dtype='str', unpack=True, delimiter='\t') #Read in H2 line labels
flux, sigma = loadtxt(file_name, usecols=(1,2,), dtype='float', unpack=True, delimiter='\t') #Read in line fluxes and uncertainities
h = make_line_list() #Create object
h.read_data(labels, flux, sigma) #Stick fluxes and uncertainities into this line list object
h.calculate_column_density(normalize=False) #Calculate column densities from data
h.normalize(label=normalize_to) #Normalize to the line defined above
return h #Return object
#This defintion reads in the data from Takahashi & Uehara 2001 and creates an H2 transitions object storing the data, this is for comparing
#the IGRINS data to formation pumping models
def read_takahashi_uehara_2001_model():
labels = loadtxt('h2_models/takahashi_uehara_2001.dat', unpack=True, dtype='str', delimiter='\t', usecols=(0,)) #Read in line list wavelengths
data_ice_A, data_ice_B, data_Si_A, data_Si_B, data_C_A, data_C_B = loadtxt('h2_models/takahashi_uehara_2001.dat', unpack=True, dtype='f', delimiter='\t', usecols=(1,2,3,4,5,6,))
ice_A = make_line_list() #Make line lists for icy mantel, Si, and Carbonacious dust types from the models
ice_B = make_line_list()
Si_A = make_line_list()
Si_B = make_line_list()
C_A = make_line_list()
C_B = make_line_list()
for i in range(len(labels)): #Loop through each line in the table
match = ice_A.label == labels[i] #Match H2 transition objects to the line and set the flux to the intensity in the table
ice_A.F[match] = data_ice_A[i] #Take intensity from table and paint it onto the flux for the respective lines for the respective models
ice_B.F[match] = data_ice_B[i]
Si_A.F[match] = data_Si_A[i]
Si_B.F[match] = data_Si_B[i]
C_A.F[match] = data_C_A[i]
C_B.F[match] = data_C_B[i]
ice_A.calculate_column_density() #Given the intensities, now convert to column densities
ice_B.calculate_column_density()
Si_A.calculate_column_density()
Si_B.calculate_column_density()
C_A.calculate_column_density()
C_B.calculate_column_density()
for i in range(len(labels)): #Loop through each line in the table
match = ice_A.label == labels[i] #Match H2 transition objects to the line and set the column density N to what is in the table
same_upper_level = (ice_A.V.u == ice_A.V.u[match]) & (ice_A.J.u == ice_A.J.u[match]) #Find all lines from the same upper state
ice_A.N[same_upper_level] = ice_A.N[match]
ice_B.N[same_upper_level] = ice_B.N[match]
Si_A.N[same_upper_level] = Si_A.N[match]
Si_B.N[same_upper_level] = Si_B.N[match]
C_A.N[same_upper_level] = C_A.N[match]
C_B.N[same_upper_level] = C_B.N[match]
return(ice_A, ice_B, Si_A, Si_B, C_A, C_B) #Return objects
#def multi_temp_function(x, c1, c2, c3, c4, c5, T1, T2, T3, T4, T5): #Function of 5 temperatures and coefficients for fitting boltzmann diagrams of gas with multiple thermal components
# return c1*exp(-x/T1) + c2*exp(-x/T2) + c3*exp(-x/T3) + c4*exp(-x/T4) + c5*exp(-x/T5)
def multi_temp_func(x,b, c1, c2, c3, T1, T2, T3): #Function of 3 temperatures and coefficients for fitting boltzmann diagrams of gas with multiple thermal components
return b + log(c1*e**(-x/T1) + c2*e**(-x/T2)+ c3*e**(-x/T3))
def single_temp_func(x,b,T): #Function of a single temperature for fitting noltzmann diagrams for gas with a single thermal component
return b - (x/T)
def exponential_form_temp_func(x, A, T, b): #Function for a single temperature for fitting a Boltzmann diagram for gas with aa single thermal component, but in exponential form
return b + A*exp(-x/T)
def linear_function(x, m, b): #Define a linear function for use with scipy.optimize curve_fit, for fitting rotation temperatures
return m*x + b
#Since scipy sucks, find uncertainity in slope for just two points
def two_point_slope_uncertainity(x,y,sig_y):
#slope = (y[1]-y[0])/(x[1]-x[0]) #get slope through two points
extreme_1 = (y[1]+sig_y[1]-y[0]-sig_y[0])/(x[1]-x[0]) #Get one extreme slope
extreme_2 = (y[1]-sig_y[1]-y[0]+sig_y[0])/(x[1]-x[0]) #Get other extreme slope
sig_slope = abs(extreme_1 - extreme_2) / 2.0 #Average two extremes together
return sig_slope
def make_line_list():
#Read in molecular data
# level_V, level_J = loadtxt(energy_table, usecols=(0,1), unpack=True, dtype='int', skiprows=1) #Read in data for H2 ground state rovibrational energy levels
# level_E = loadtxt(energy_table, usecols=(2,), unpack=True, dtype='float', skiprows=1)
# trans_Vu, trans_Ju, trans_Vl, trans_Jl = loadtxt(transition_table, usecols=(1,2,4,5), unpack=True, dtype='int', skiprows=1) #Read in data for the transitions (ie. spectral lines which get created by the emission of a photon)
# trans_A = loadtxt(transition_table, usecols=(6,), unpack=True, dtype='float', skiprows=1) #Read in data for the transitions (ie. spectral lines which get created by the emission of a photon)
# n_transitions = len(trans_Vu) #Number of transitions
#Organize molecular data into objects storing J, V, Energy, and A values
# J_obj = J(trans_Ju, trans_Jl) #Create object storing upper and lower J levels for each transition
# V_obj = V(trans_Vu, trans_Vl) #Create object storing upper and lower V levels for each transition
# A = trans_A
# E_u = zeros(n_transitions)
# E_l = zeros(n_transitions)
# for i in range(n_transitions):
# E_u[i] = level_E[ (level_V == trans_Vu[i]) & (level_J == trans_Ju[i]) ]
# E_l[i] = level_E[ (level_V == trans_Vl[i]) & (level_J == trans_Jl[i]) ]
# E_obj = E(E_u, E_l) #Create object for storing energies of upper and lower rovibrational levels for each transition
#Create and return the transitions object which stores all the information for each transition
t = ascii.read(roueff_2019_table, data_start=3)
J_obj = J(t['Ju'].data, t['Jl'].data) #Create object storing upper and lower J levels for each transition
V_obj = V(t['vu'].data, t['vl'].data) #Create object storing upper and lower V levels for each transition
E_obj = E(36118.0695+t['Eu'], 36118.0695+t['Eu']-t['sigma']) #Create object for storing energies of upper and lower rovibrational levels for each transition
A = t['A'] #Grab transition probabilities
transitions = h2_transitions(J_obj, V_obj, E_obj, A) #Create main transitions object
return transitions #Return transitions object
#Calculate a weighted mean for extinction (A_V)
def calculate_exctinction(transitions, use_Av = [0.0,50.0]):
A_lambda = array([ 0.482, 0.282, 0.175, 0.112, 0.058]) #(A_lambda / A_V) extinction curve from Rieke & Lebofsky (1985) Table 3
l = array([ 0.806, 1.22 , 1.63 , 2.19 , 3.45 ]) #Wavelengths for extinction curve from Rieke & Lebofsky (1985)
extinction_curve = interp1d(l, A_lambda, kind='quadratic') #Create interpolation object for extinction curve from Rieke & Lebofsky (1985)
n_doubles_found = 0 #Count doubles (pair from same upper state)
n_trips_found = 0 #Count trips
i = (transitions.F != 0.0) & (transitions.s2n > 3.0) #Find only transitions where a significant measurement of the column density was made (e.g. lines where flux was measured)
J_upper_found = unique(transitions.J.u[i]) #Find J for all (detected) transition upper states
V_upper_found = unique(transitions.V.u[i]) #Find V for all (detected) transition upper states
lines_found = []
Avs = [] #Store Avs found
sigma_Avs = [] #store uncertainity in Avs
for V in V_upper_found: #Check each upper V for pairs
for J in J_upper_found: #Check each upper J for pairs
match_upper_states = (transitions.J.u[i] == J) & (transitions.V.u[i] == V) #Find all transitions from the same upper J and V state
waves = transitions.wave[i][match_upper_states] #Store wavelengths of all found transitions
s = argsort(waves) #sort by wavelength
waves = waves[s]
labels = transitions.label[i][match_upper_states][s]
if len(waves) == 2 and abs(waves[0]-waves[1]) > wave_thresh: #If a single pair of lines from the same upper state are found, calculate differential extinction for this single pair
print('For '+labels[0]+'/'+labels[1]+' '+str(waves[0])+'/'+str(waves[1])+':')
ratio_of_ratios = transitions.flux_ratio(labels[0], labels[1], sigma=True) / transitions.intrinsic_ratio(labels[0], labels[1])
Av = -2.5*log10(ratio_of_ratios[0][0])/(extinction_curve(waves[0])-extinction_curve(waves[1])) #Calculate exctinction in Av
sigma_Av = abs(-2.5 * (ratio_of_ratios[1][0])/(Av*log(10.0))) #Calculate uncertainity in extinction in Av
print('Observed/intrinsic = %4.2f' % ratio_of_ratios[0][0] + ' +/- %4.2f' % (ratio_of_ratios[1][0]))
print('Calculated A_V = ', Av)
lines_found.append(labels[0]) #Store line labels of lines found
lines_found.append(labels[1])
if Av > use_Av[0] and Av < use_Av[1]: #If Av is within a reasonable range
Avs.append(Av) #Store Avs
sigma_Avs.append(sigma_Av) #Store sigma Avs
elif len(waves) == 3: #If three liens are found from the same upper state, calculate differential extinction from differences between all three lines
lines_found.append(labels[0]) #Store line labels of lines found
lines_found.append(labels[1])
lines_found.append(labels[2])
#Pair 1
if abs(waves[0] - waves[1]) > wave_thresh: #check if pair of lines are far enough apart
print('For '+labels[0]+'/'+labels[1]+' '+str(waves[0])+'/'+str(waves[1])+':')
ratio_of_ratios = transitions.flux_ratio(labels[0], labels[1], sigma=True) / transitions.intrinsic_ratio(labels[0], labels[1])
Av = -2.5*log10(ratio_of_ratios[0][0])/(extinction_curve(waves[0])-extinction_curve(waves[1])) #Calculate exctinction in Av
sigma_Av = abs(-2.5 * (ratio_of_ratios[1][0])/(Av*log(10.0))) #Calculate uncertainity in extinction in Av
print('Observed/intrinsic = %4.2f' % ratio_of_ratios[0][0] + ' +/- %4.2f' % (ratio_of_ratios[1][0]))
print('Calculated A_V = ', Av)
if Av > use_Av[0] and Av < use_Av[1]: #If Av is within a reasonable range
Avs.append(Av) #Store Avs
sigma_Avs.append(sigma_Av) #Store sigma Avs
#Pair 2
if abs(waves[0] - waves[2]) > wave_thresh: #check if pair of lines are far enoug7h apart
print('For '+labels[0]+'/'+labels[2]+' '+str(waves[0])+'/'+str(waves[2])+':')
ratio_of_ratios = transitions.flux_ratio(labels[0], labels[2], sigma=True) / transitions.intrinsic_ratio(labels[0], labels[2])
Av = -2.5*log10(ratio_of_ratios[0][0])/(extinction_curve(waves[0])-extinction_curve(waves[2])) #Calculate exctinction in Av
sigma_Av = abs(-2.5 * (ratio_of_ratios[1][0])/(Av*log(10.0))) #Calculate uncertainity in extinction in Av
print('Observed/intrinsic = %4.2f' % ratio_of_ratios[0][0] + ' +/- %4.2f' % (ratio_of_ratios[1][0]))
print('Calculated A_V = ', Av)
if Av > use_Av[0] and Av < use_Av[1]: #If Av is within a reasonable range
Avs.append(Av) #Store Avs
sigma_Avs.append(sigma_Av) #Store sigma Avs
#Pair 3
print('For '+labels[1]+'/'+labels[2]+' '+str(waves[1])+'/'+str(waves[2])+':')
ratio_of_ratios = transitions.flux_ratio(labels[1], labels[2], sigma=True) / transitions.intrinsic_ratio(labels[1], labels[2])
Av = -2.5*log10(ratio_of_ratios[0][0])/(extinction_curve(waves[1])-extinction_curve(waves[2])) #Calculate exctinction in Av
sigma_Av = abs(-2.5 * (ratio_of_ratios[1][0])/(Av*log(10.0))) #Calculate uncertainity in extinction in Av
print('Observed/intrinsic = %4.2f' % ratio_of_ratios[0][0] + ' +/- %4.2f' % (ratio_of_ratios[1][0]))
print('Calculated A_V = ', Av)
if Av > use_Av[0] and Av < use_Av[1]: #If Av is within a reasonable range
Avs.append(Av) #Store Avs
sigma_Avs.append(sigma_Av) #Store sigma Avs
if abs(waves[1] - waves[2]) > wave_thresh: #check if pair of lines are far enough apart
n_trips_found += 1
print('Number of pairs from same upper state = ', n_doubles_found)
print('Number of tripples from same upper state = ', n_trips_found)
Avs = array(Avs) #Convert to numpy arrays to do vector math to figure out weighted mean
sigma_Avs = array(sigma_Avs)
weights = sigma_Avs**-2
summed_weights = nansum(weights)
weighted_mean_Av = nansum(Avs * weights) / summed_weights
weighted_sigma_Av = sqrt(1.0 / summed_weights)
print('Weighted mean Av = %4.2f' % weighted_mean_Av + ' +/- %4.2f' % weighted_sigma_Av)
return weighted_mean_Av, weighted_sigma_Av
#Simple algorithim to vary alpha (exctinction curve power law) and A_k and do a chi sq
#minimization to find the best fit for the observed - intrinsic line ratios
def find_best_extinction_correction(h_in, s2n_cut=1.0):
#First find all the line pairs and store their indicies
pair_a = [] #Store index numbers of one of a set of line pairs from the same upper state
pair_b = [] #Store index numbers of the other of a set of line pairs from the same upper state
i = (h_in.F != 0.0) & (h_in.s2n > 0.5) #Find only transitions where a significant measurement of the column density was made (e.g. lines where flux was measured)
J_upper_found = unique(h_in.J.u[i]) #Find J for all (detected) transition upper states
V_upper_found = unique(h_in.V.u[i]) #Find V for all (detected) transition upper states
for V in V_upper_found: #Check each upper V for pairs
for J in J_upper_found: #Check each upper J for pairs
i = (h_in.F != 0.0) & (h_in.s2n > s2n_cut) #Find only transitions where a significant measurement of the column density was made (e.g. lines where flux was measured)
match_upper_states = (h_in.J.u[i] == J) & (h_in.V.u[i] == V) #Find all transitions from the same upper J and V state
waves = h_in.wave[i][match_upper_states] #Store wavelengths of all found transitions
s = argsort(waves) #sort by wavelength
waves = waves[s]
labels = h_in.label[i][match_upper_states][s]
if len(waves) == 2 and abs(waves[0]-waves[1]) > wave_thresh: #If a single pair of lines from the same upper state are found, calculate observed vs. intrinsic ratio
pair_a.append(where(h_in.wave == waves[0])[0][0])
pair_b.append(where(h_in.wave == waves[1])[0][0])
elif len(waves) == 3: #If three liens are found from the same upper state, calculate differential extinction from differences between all three lines
#Pair 1
if abs(waves[0] - waves[1]) > wave_thresh:
pair_a.append(where(h_in.wave == waves[0])[0][0])
pair_b.append(where(h_in.wave == waves[1])[0][0])
#Pair 2
if abs(waves[0] - waves[2]) > wave_thresh: #check if pair of lines are far enoug7h apart
pair_a.append(where(h_in.wave == waves[0])[0][0])
pair_b.append(where(h_in.wave == waves[2])[0][0])
if abs(waves[1] - waves[2]) > wave_thresh: #check if pair of lines are far enough apart
pair_a.append(where(h_in.wave == waves[1])[0][0])
pair_b.append(where(h_in.wave == waves[2])[0][0])
pair_a = array(pair_a) #Turn lists of indicies into arrays of indicies
pair_b = array(pair_b)
chisqs = [] #Store chisq for each possible extinction and extinction law
alphas = [] #Store alphas for each possible exctinction and extinction law
A_Ks = [] #Store extinctions for each possible exctinction and exctinction law
for a in arange(0.5,3.0,0.1): #Loop through different exctinction law powers
for A_K in arange(0.0,5.0,0.01): #Loop through different possible K band exctinctions
h = copy.deepcopy(h_in) #Make a copy of the input h2 line object
A_lambda = A_K * h.wave**(-a) / lambda0**(-a) #Calculate an extinction correction
h.F *= 10**(0.4*A_lambda) #Apply extinction correction
h.calculate_column_density() #Calculate column densities from each transition, given the guess at extinction correction
chisq = nansum((h.N[pair_a] - h.N[pair_b])**2 / h.N[pair_b]) #Calculate chisq from all line pairs that arise from same upper states
chisqs.append(chisq) #Store chisq and corrisponding variables for extinction correction
alphas.append(a)
A_Ks.append(A_K)
chisqs = array(chisqs) #Convert lists to arrays
alphas = array(alphas)
A_Ks = array(A_Ks)
best_fit = chisqs == nanmin(chisqs) #Find the minimum chisq and best fit alpha and A_K
best_fit_A_K = A_Ks[best_fit]
best_fit_alpha = alphas[best_fit]
print('Best fit alpha =', best_fit_alpha) #Print results so user can see
print('Best fit A_K = ', best_fit_A_K)
A_lambda = best_fit_A_K * h_in.wave**(-best_fit_alpha) / lambda0**(-best_fit_alpha) #Calculate an extinction correction
h_in.F *= 10**(0.4*A_lambda) #Apply extinction correction
h_in.calculate_column_density() #Calculate column densities from each transition, given the new extinction correction
#Test extinction correction by animating stepping through alpha and A_K space and making v_plots of the results
def animate_extinction_correction(h_in):
with PdfPages('animate_extinction_correction.pdf') as pdf:
for a in [0.5,1.0,1.5,2.0,2.5,3.0]:
for A_K in arange(0.0,3.0,0.1):
h = copy.deepcopy(h_in)
A_lambda = A_K * h.wave**(-a) / lambda0**(-a)
h.F *= 10**(0.4*A_lambda)
h.calculate_column_density()
h.v_plot()
suptitle('alpha = '+str(a)+' A_K = '+str(A_K))
pdf.savefig()
#Iterate adding extinction curves until you are satisfied
def iterate_extinction_curve(transitions):
a = input('What value of alpha do you want to use? ') #Prompt from user what alpha should be
A_K = input('What value of A_K do you want to use? ') #Prompt user for A_K
while a != 0.0: #Loop until user is satisfied with extinction correction
fit_extinction_curve(transitions, a=a, A_K=A_K) #Try fitting previously inputted extinction curve, plot results
a = input('What value of alpha do you want to use? (0. to stop iteration) ') #Prompt from user what alpha should be
A_K = input('What value of A_K do you want to use? ') #Prompt user for A_K
#Definition that takes all the H2 lines with determined column densities and calculates as many differential extinctions as it can between
#pairs of lines that come from the same upper state, and fits an extinction curve (power law here) to them
def fit_extinction_curve(transitions, a=0.0, A_K=0.0):
figure(1)
clf() #Clear plot field
figure(2)
clf() #Clear plot field
figure(3)
clf() #Clear plot field
n_doubles_found = 0 #Count doubles (pair from same upper state)
n_trips_found = 0 #Count trips
#i = (transitions.N != 0.0) & (transitions.s2n > 10.0) #Find only transitions where a significant measurement of the column density was made (e.g. lines where flux was measured)
i = (transitions.F != 0.0) & (transitions.s2n > 0.5) #Find only transitions where a significant measurement of the column density was made (e.g. lines where flux was measured)
J_upper_found = unique(transitions.J.u[i]) #Find J for all (detected) transition upper states
V_upper_found = unique(transitions.V.u[i]) #Find V for all (detected) transition upper states
pairs = [] #Set up array of line pairs for measuring the differential extinction A_lamba1-lambda2
observed_to_intrinsic = []
wave_sets = []
for V in V_upper_found: #Check each upper V for pairs
for J in J_upper_found: #Check each upper J for pairs
match_upper_states = (transitions.J.u[i] == J) & (transitions.V.u[i] == V) #Find all transitions from the same upper J and V state
waves = transitions.wave[i][match_upper_states] #Store wavelengths of all found transitions
#N = transitions.N[i][match_upper_states] #Store all column densities for found transitions
F = transitions.F[i][match_upper_states]
Fsigma = transitions.sigma[i][match_upper_states]
intrinsic_constants = (transitions.g[i][match_upper_states] * transitions.E.diff()[i][match_upper_states] * transitions.A[i][match_upper_states]) #Get constants for calculating the intrinsic ratios
#Nsigma = transitions.Nsigma[i][match_upper_states] #Grab uncertainity in column densities
if len(waves) == 2 and abs(waves[0]-waves[1]) > wave_thresh: #If a single pair of lines from the same upper state are found, calculate differential extinction for this single pair
A_delta_lambda = -2.5*log10((F[0]/F[1]) / (intrinsic_constants[0]/intrinsic_constants[1])) #Calculate differential extinction between two H2 lines
sigma_A_delta_lambda = (2.5 / log(10.0)) * sqrt( (Fsigma[0]/F[0])**2 + (Fsigma[1]/F[1])**2 ) #Calculate uncertainity in the differential extinction between two H2 lines
pair = differential_extinction([waves[0], waves[1]], A_delta_lambda, sigma_A_delta_lambda) #Store wavelengths, differential extinction, and uncertainity in a differential_extinction object
pairs.append(pair) #Save a single pair
wave_sets.append(waves)
n_doubles_found = n_doubles_found + 1
observed_to_intrinsic.append((F[0]/F[1]) / (intrinsic_constants[0]/intrinsic_constants[1]))
elif len(waves) == 3: #If three liens are found from the same upper state, calculate differential extinction from differences between all three lines
#Pair 1
if abs(waves[0] - waves[1]) > wave_thresh: #check if pair of lines are far enough apart
A_delta_lambda = -2.5*log10((F[0]/F[1]) / (intrinsic_constants[0]/intrinsic_constants[1])) #Calculate differential extinction between two H2 lines
sigma_A_delta_lambda = (2.5 / log(10.0)) * sqrt( (Fsigma[0]/F[0])**2 + (Fsigma[1]/F[1])**2 ) #Calculate uncertainity in the differential extinction between two H2 lines
pair = differential_extinction([waves[0], waves[1]], A_delta_lambda, sigma_A_delta_lambda) #Store wavelengths, differential extinction, and uncertainity in a differential_extinction object
pairs.append(pair) #Save a single pair
observed_to_intrinsic.append((F[0]/F[1]) / (intrinsic_constants[0]/intrinsic_constants[1]))
#Pair 2
if abs(waves[0] - waves[2]) > wave_thresh: #check if pair of lines are far enoug7h apart
A_delta_lambda = -2.5*log10((F[0]/F[2]) / (intrinsic_constants[0]/intrinsic_constants[2])) #Calculate differential extinction between two H2 lines
sigma_A_delta_lambda = (2.5 / log(10.0)) * sqrt( (Fsigma[0]/F[0])**2 + (Fsigma[2]/F[2])**2 ) #Calculate uncertainity in the differential extinction between two H2 lines
pair = differential_extinction([waves[0], waves[2]], A_delta_lambda, sigma_A_delta_lambda) #Store wavelengths, differential extinction, and uncertainity in a differential_extinction object
pairs.append(pair) #Save a single pair
observed_to_intrinsic.append((F[0]/F[2]) / (intrinsic_constants[0]/intrinsic_constants[2]))
#Pair 3
if abs(waves[1] - waves[2]) > wave_thresh: #check if pair of lines are far enough apart
A_delta_lambda = -2.5*log10((F[1]/F[2]) / (intrinsic_constants[1]/intrinsic_constants[2])) #Calculate differential extinction between two H2 lines
sigma_A_delta_lambda = (2.5 / log(10.0)) * sqrt( (Fsigma[1]/F[1])**2 + (Fsigma[2]/F[2])**2 ) #Calculate uncertainity in the differential extinction between two H2 lines
pair = differential_extinction([waves[1], waves[2]], A_delta_lambda, sigma_A_delta_lambda) #Store wavelengths, differential extinction, and uncertainity in a differential_extinction object
pairs.append(pair) #Save a single pair
observed_to_intrinsic.append((F[1]/F[2]) / (intrinsic_constants[1]/intrinsic_constants[2]))
wave_sets.append(waves)
n_trips_found += 1
for pair in pairs: #Loop through each pair
if pair.s2n > 3.0:
pair.fit_curve()
figure(1)
plot(alpha, pair.A_K, color=color_list[V], label = 'V = '+str(V) + ' J = ' + str(J))
plot(alpha, pair.A_K + pair.sigma_A_K, '--', color=color_list[V])
plot(alpha, pair.A_K - pair.sigma_A_K, '--', color=color_list[V])
f = interp1d(alpha, pair.A_K)
g = interp1d(alpha, pair.sigma_A_K)
print('V = ', str(V), 'J = ', str(J),' at alpha=2, A_K = ', f(2.0), '+/-', g(2.0))
print('for pair at waves', str(pair.waves[0]), ' & ', str(pair.waves[1]), ' A_delta_lambda=', str(pair.A))
pairs = []
xlabel('Alpha')
ylabel('$A_K$')
legend()
ylim([0,20])
#show()
#figure(2)
#clf()
#for pair in pairs: #Loop through each pair
# plot(pair.waves, [0,10**(0.4*pair.A)])
##show()
#print('V=', V)
#pairs = []
stop()
if a == 0.0: #If user does not specify alpha to use
a = input('What value of alpha do you want to use? ') #Prompt from user what alpha should be
if A_K == 0.0: #If user doesn onot specify what the K-band extinction A_K should be
A_K = input('What value of A_K do you want to use? ') #Prompt user for A_K
A_lambda = A_K * transitions.wave**(-a) / lambda0**(-a)
transitions.F *= 10**(0.4*A_lambda)
transitions.sigma *= 10**(0.4*A_lambda)
#suptitle('V = ' + str(V))
#stop()
print('Number of pairs from same upper state = ', n_doubles_found)
print('Number of tripples from same upper state = ', n_trips_found)
#Test printing intrinsic ratios, for debugging/diagnosing extinction
def test_intrinsic_ratios(transitions):
A_lambda = array([ 0.482, 0.282, 0.175, 0.112, 0.058]) #(A_lambda / A_V) extinction curve from Rieke & Lebofsky (1985) Table 3
l = array([ 0.806, 1.22 , 1.63 , 2.19 , 3.45 ]) #Wavelengths for extinction curve from Rieke & Lebofsky (1985)
extinction_curve = interp1d(l, A_lambda, kind='quadratic') #Create interpolation object for extinction curve from Rieke & Lebofsky (1985)
clf()
n_doubles_found = 0 #Count doubles (pair from same upper state)
n_trips_found = 0 #Count trips
#i = (transitions.N != 0.0) & (transitions.s2n > 10.0) #Find only transitions where a significant measurement of the column density was made (e.g. lines where flux was measured)
i = (transitions.F != 0.0) & (transitions.s2n > 0.5) #Find only transitions where a significant measurement of the column density was made (e.g. lines where flux was measured)
J_upper_found = unique(transitions.J.u[i]) #Find J for all (detected) transition upper states
V_upper_found = unique(transitions.V.u[i]) #Find V for all (detected) transition upper states
lines_found = []
for V in V_upper_found: #Check each upper V for pairs
for J in J_upper_found: #Check each upper J for pairs
match_upper_states = (transitions.J.u[i] == J) & (transitions.V.u[i] == V) #Find all transitions from the same upper J and V state
waves = transitions.wave[i][match_upper_states] #Store wavelengths of all found transitions
s = argsort(waves) #sort by wavelength
#N = transitions.N[i][match_upper_states] #Store all column densities for found transitions
#F = transitions.F[i][match_upper_states]
#Fsigma = transitions.sigma[i][match_upper_states]
#intrinsic_constants = (transitions.g[i][match_upper_states] * transitions.E.diff()[i][match_upper_states] * transitions.A[i][match_upper_states]) #Get constants for calculating the intrinsic ratios
waves = waves[s]
labels = transitions.label[i][match_upper_states][s]
#Nsigma = transitions.Nsigma[i][match_upper_states] #Grab uncertainity in column densities
if len(waves) == 2 and abs(waves[0]-waves[1]) > wave_thresh: #If a single pair of lines from the same upper state are found, calculate differential extinction for this single pair
print('For '+labels[0]+'/'+labels[1]+' '+str(waves[0])+'/'+str(waves[1])+':')
#print(' Observed ratio: ', transitions.flux_ratio(labels[0], labels[1], sigma=True))
#print(' Intrinsic ratio:', transitions.intrinsic_ratio(labels[0], labels[1]))
ratio_of_ratios = transitions.flux_ratio(labels[0], labels[1], sigma=True) / transitions.intrinsic_ratio(labels[0], labels[1])
print('Observed/intrinsic = %4.2f' % ratio_of_ratios[0][0] + ' +/- %4.2f' % (ratio_of_ratios[1][0]))
print('Calculated A_V = ', -2.5*log10(ratio_of_ratios)/(extinction_curve(waves[0])-extinction_curve(waves[1])))
plot([waves[0],waves[1]], [ratio_of_ratios[0], 1.])
lines_found.append(labels[0]) #Store line labels of lines found
lines_found.append(labels[1])
elif len(waves) == 3: #If three liens are found from the same upper state, calculate differential extinction from differences between all three lines
lines_found.append(labels[0]) #Store line labels of lines found
lines_found.append(labels[1])
lines_found.append(labels[2])
#Pair 1
if abs(waves[0] - waves[1]) > wave_thresh: #check if pair of lines are far enough apart
print('For '+labels[0]+'/'+labels[1]+' '+str(waves[0])+'/'+str(waves[1])+':')
#print(' Observed ratio: ', transitions.flux_ratio(labels[0], labels[1], sigma=True))
#print(' Intrinsic ratio:', transitions.intrinsic_ratio(labels[0], labels[1]))
ratio_of_ratios = transitions.flux_ratio(labels[0], labels[1], sigma=True) / transitions.intrinsic_ratio(labels[0], labels[1])
print('Observed/intrinsic = %4.2f' % ratio_of_ratios[0][0] + ' +/- %4.2f' % (ratio_of_ratios[1][0]))
print('Calculated A_V = ', -2.5*log10(ratio_of_ratios)/(extinction_curve(waves[0])-extinction_curve(waves[1])))
plot([waves[0],waves[1]], [ratio_of_ratios[0], 1.])
#Pair 2
if abs(waves[0] - waves[2]) > wave_thresh: #check if pair of lines are far enoug7h apart
print('For '+labels[0]+'/'+labels[2]+' '+str(waves[0])+'/'+str(waves[2])+':')
#print(' Observed ratio: ', transitions.flux_ratio(labels[0], labels[2], sigma=True))
#print(' Intrinsic ratio:', transitions.intrinsic_ratio(labels[0], labels[2]))
ratio_of_ratios = transitions.flux_ratio(labels[0], labels[2], sigma=True) / transitions.intrinsic_ratio(labels[0], labels[2])
print('Observed/intrinsic = %4.2f' % ratio_of_ratios[0][0] + ' +/- %4.2f' % (ratio_of_ratios[1][0]))
print('Calculated A_V = ', -2.5*log10(ratio_of_ratios)/(extinction_curve(waves[0])-extinction_curve(waves[2])))
plot([waves[0],waves[2]], [ratio_of_ratios[0], 1.])
#Pair 3
print('For '+labels[1]+'/'+labels[2]+' '+str(waves[1])+'/'+str(waves[2])+':')
#print(' Observed ratio: ', transitions.flux_ratio(labels[1], labels[2], sigma=True))
#print(' Intrinsic ratio:', transitions.intrinsic_ratio(labels[1], labels[2]))
ratio_of_ratios = transitions.flux_ratio(labels[1], labels[2], sigma=True) / transitions.intrinsic_ratio(labels[1], labels[2])
print('Observed/intrinsic = %4.2f' % ratio_of_ratios[0][0] + ' +/- %4.2f' % (ratio_of_ratios[1][0]))
print('Calculated A_V = ', -2.5*log10(ratio_of_ratios)/(extinction_curve(waves[1])-extinction_curve(waves[2])))
plot([waves[1],waves[2]], [ratio_of_ratios[0], 1.])
if abs(waves[1] - waves[2]) > wave_thresh: #check if pair of lines are far enough apart
n_trips_found += 1
#stop()
print('Number of pairs from same upper state = ', n_doubles_found)
print('Number of tripples from same upper state = ', n_trips_found)
#return lines_found
##Store differential extinction between two transitions from the same upper state
class differential_extinction:
def __init__(self, waves, A, sigma): #Input the lambda, flux, and sigma of two different lines as paris [XX,XX]
self.waves = waves #Store the wavleneghts as lamda[0] and lambda[1]
self.A = A #Store differential extinction
self.sigma = sigma #Store uncertainity in differential extinction A
self.s2n = A / sigma
def fit_curve(self):
constants = lambda0**alpha / ( self.waves[0]**(-alpha) - self.waves[1]**(-alpha) ) #Calculate constants to mulitply A_delta_lambda by to get A_K
self.A_K = self.A * constants #calculate extinction for a given power law alpha
self.sigma_A_K = self.sigma * constants #calculate extinction for a given power law alpha
def import_cloudy(model=''): #Import cloudy model from cloudy directory
h = make_line_list() #Make H2 transitions object
# paths = open(cloudy_dir + 'process_model/input.dat') #Read in current model from process_model/input.dat
# input_model = paths.readline().split(' ')[0]
# distance = float(paths.readline().split(' ')[0])
# inner_radius = float(paths.readline().split(' ')[0])
# slit_area = float(paths.readline().split(' ')[0])
# data_dir = paths.readline().split(' ')[0]
# plot_dir = paths.readline().split(' ')[0]
# table_dir = paths.readline().split(' ')[0]
# paths.close()
# if model == '': #If no model is specified by the user, read in model set in process_model/input.dat
# model = input_model
#READ IN LEVEL COLUMN DENSITY FILE
# filename = data_dir+model+".h2col" #Name of file to open
# v, J, E, N, N_over_g, LTE_N, LTE_N_over_g = loadtxt(filename, skiprows=4, unpack=True) #Read in H2 column density file
# for i in range(len(v)): #Loop through each rovibrational energy level
# found_transitions = (h.V.u == v[i]) & (h.J.u == J[i]) #Find all rovibrational transitions that match the upper v and J
# h.N[found_transitions] = N[i] #Set column density of transitions
#READ IN LINE EMISSION FILE AND CONVERT LINE EMISSION TO COLUMN DENSITIES
#filename = data_dir+model+'.h2.lines'
filename = cloudy_dir + '/run/' +model+'.h2.lines'
line, wl_lab = loadtxt(filename, unpack=True, dtype='S', delimiter='\t', usecols=(0,8))
Ehi, Vhi, Jhi, Elo, Vlo, Jlo = loadtxt(filename, unpack=True, dtype='int', delimiter='\t', usecols=(1,2,3,4,5,6))
wl_mic, log_L, I_ratio, Excit, gu_h_nu_aul = loadtxt(filename, unpack=True, dtype='float', delimiter='\t', usecols=(7,9,10,11,12))
L=10**log_L #Convert log luminosity to linear units
for i in range(len(L)): #Loop through each transition
h.F[(h.V.u == Vhi[i]) & (h.V.l == Vlo[i]) & (h.J.u == Jhi[i]) & (h.J.l == Jlo[i])] = L[i] #Find current transition in h2 transitions object for list of H2 lines cloudy outputs and set flux to be equal to the luminosity of the line outputted by cloudy
h.calculate_column_density()
#h.normalize() #Normalize to the 5-3 O(3) line
return(h)
def combine_models(model1, model2, weight1, weight2): #Combine two models scaling each by their given weights
combined_model = make_line_list() #Make new object for combination of both nmodels
i = (model1.N > 0.) & (model2.N > 0.) #Use only models that include the same lines in each
combined_model.N[i] = model1.N[i] * weight1 + model2.N[i] * weight2 #Combine the level column densities weighted by the given weights
return combined_model #Return the single combined model
def import_emissivity(x_range=[4.25e17, 4.5e17], dr=5e15): #Import Cloudy model emmisivity adn integrate using given range
paths = open(cloudy_dir + 'process_model/input.dat') #Read in current model
model = paths.readline().split(' ')[0]
distance = float(paths.readline().split(' ')[0])
inner_radius = float(paths.readline().split(' ')[0])
slit_area = float(paths.readline().split(' ')[0])
data_dir = paths.readline().split(' ')[0]
plot_dir = paths.readline().split(' ')[0]
table_dir = paths.readline().split(' ')[0]
paths.close()
filename = data_dir+model+".line_emiss"
#read_labels = loadtxt(filename, dtype=str, comments='$', delimiter='\t', unpack=True) #Read labels
read_data = loadtxt(filename, dtype=float, comments='#', delimiter='\t', unpack=True, skiprows=1) #Read data
read_H_band_wavelengths = loadtxt(data_dir+model+'.100lines_hband.waves', dtype=float) #Read H and K band wavelengths
read_K_band_wavelengths = loadtxt(data_dir+model+'.100lines_kband.waves', dtype=float)
read_H_band_labels = loadtxt(data_dir+model+'.100lines_hband.lines', dtype=str, delimiter='~') #Read H and K band kabeks
read_K_band_labels = loadtxt(data_dir+model+'.100lines_kband.lines', dtype=str, delimiter='~')
read_wavelengths = air_to_vac( concatenate([read_H_band_wavelengths, read_K_band_wavelengths]) ) #Combine H & K band wavelengths into one array and convert from air to vaccume waves
#read_wavelengths = concatenate([read_H_band_wavelengths, read_K_band_wavelengths])
emiss = emissivity(concatenate([read_H_band_labels, read_K_band_labels]), read_data[0,:], read_wavelengths, 10**read_data[1:,:]) #Put everything in an emissivity object
emiss.set_H2_labels() #Set H2 labels to proper spectroscopic notation
f = emiss.integrate_slab(x_range[0], x_range[1], dr=dr) #Integrate slab
h = make_line_list()
h.read_model(emiss.labels, f) #Convert model into H2 object
#return emiss #Return the emissivity object to the user
h.calculate_column_density()
return h #Return H2 object storing the integrated model line emissivity
#Convert wavelenghts in air (outputted by Cloudy) into Vacuum (what IGRINS sees)
#Based on APOGEE Technical Note "Conversion from vacuum to standard air wavelengths" by Prieto (2011)
def air_to_vac(l):
a, b1, b2, c1, c2 = [0.0, 5.792105e-2, 1.67917e-3, 238.0185, 57.362] #Coeffecients from Ciddor (1996)
n = 1 + a + (b1 / (c1-l**-2)) + (b2 / (c2-l**-2))
l_vac = l * n
return(l_vac)
#Class for storing all the data from a cloudy emissivity file
class emissivity:
def __init__(self, labels, depth, waves, flux): #When first runnnig this class...
self.labels = labels #Store line labels under "labels"
self.depth = depth #Store depth (or radius) into cloud under "depth"
self.waves = waves #Store wavelengths of lines under "waves"
self.flux = flux #Store fluxes as a function of depth into cloud
def get(self, label): #return depth and flux (unpacked) for a chosen line
index = self.labels == label #Find line
return array([self.depth, self.flux[index,:][0]]) #Return depth and flux of line
def plot(self, label): #Simple plot of a given line emissivity vs. depth
line = self.get(label) #Grab depth, emissivity of specified line
plot(line[0], line[1]) #Plot depth vs. emissivity
show() #Show plot
def set_H2_labels(self): #Set H2 labels to standard spectroscopic notation
h2_line_labels = loadtxt(cloudy_dir+'process_model/IGRINS_H2_line_list.dat', usecols=(1,), delimiter="\t", dtype='string') #Load spectroscopic notation for H2 lines
h2_line_wave = loadtxt(cloudy_dir+'process_model/IGRINS_H2_line_list.dat', usecols=(0,), delimiter="\t") #Load
for i in range(len(self.labels)):
if self.labels[i].astype('|S4') == 'H2 ':
wave_diff = abs(h2_line_wave - self.waves[i])
j = wave_diff == min(wave_diff) #Find index of nearest H2 line
self.labels[i] = h2_line_labels[j][0] #Replace Cloudy H2 label with proper spectroscopic notation label
def integrate_slab(self, inner_radius, outer_radius, dr=1e12): #Integrate up emission from a slab between inner and outer radii
#goodpix = (self.depth >= inner_radius) & (self.depth <= outer_radius) #Find pixels in radii range
#interp_emissivity = interp1d(self.depth[goodpix], self.flux[:,goodpix], axis=1, bounds_error=False, kind='linear') #Interpolate over all lines
interp_emissivity = interp1d(self.depth, self.flux, axis=1, bounds_error=False, kind='linear')
r = arange(inner_radius, outer_radius, dr) #Set up radius grid to interpolate over
interp_flux = nansum(interp_emissivity(r)*dr, axis=1) #Sum up all line fluxes
return interp_flux
def slice(self, radius): #grab line emissivities at slice through cloud
interp_emissivity = interp1d(self.depth, self.flux, axis=1, bounds_error=False, kind='linear') #Interpolate over all lines
interp_flux = interp_emissivity(radius) #Grab fluxes and specific radius
return interp_flux
# def excitation_diagrams(self, xstep=1e16, y_range=[-6,2], fname=plot_dir+model+'slices_excitation_diagrams.pdf'): #Make a PDF where each page is an excitation diagram is an integrated slice of a Cloudy model
# h = h2.make_line_list() #Set up object for storing H2 transitions
# #interp_emissivity = interp1d(self.depth, self.flux, axis=1, bounds_error=False, kind='linear')
# #r = arange(inner_radius, outer_radius, dr) #Set up radius grid to interpolate over
# with PdfPages(fname) as pdf: #Set up saving a PDF
# for i, x in enumerate(self.depth):
# #for x in arange(0., max(self.depth)-xstep, xstep): #Loop through each slice of the model up to the maximum depth
# # stop()
# #f = self.integrate_slab(x, x+xstep, dr=xstep/1e3) #Integrate up flux in each slice from line emissivities
# #dr = xstep / 1e3
# #r = arange(x, x+xstep, dr) #Set up radius grid to interpolate over
# #f = nansum(interp_emissivity(r)*dr, axis=1) #Sum up all line fluxes
# h.read_model(self.labels, self.flux[:,i]) #Read fluxes from slices into H2 object
# h.calculate_column_density() #Calcualte column density for H2 rovibrational lines
# h.v_plot(s2n_cut=-1.0, show_labels=True, savepdf=False, show_legend=False, y_range=y_range)
# title('Slice = ' + str(x) + ' cm') #Set title show show what depth we are at in the model
# pdf.savefig() #Output page of pdf
#Class to store information on H2 transition, with flux can calculate column density
class h2_transitions:
def __init__(self, J, V, E, A):
s = argsort(E.u) #Sort all arrays by energy for easy plotting later
J.sort(s)
V.sort(s)
E.sort(s)
A = A[s]
n_lines = len(A) #Number of lines
self.n_lines = n_lines
self.J = J #Read in J object to store upper and lower J states
self.V = V #Read in V object to store upper and lower V states
self.E = E #Read in E object to store energy of upper state
self.A = A #Read in Einstein A coeff. (transition probability) of line
self.F = zeros(n_lines) #Set initial flux of line
self.N = zeros(n_lines) #Set initial column density for each line
self.Nsigma = zeros(n_lines) #Set uncertainity in the column demnsity for each line
self.sigma = zeros(n_lines) #Set initial sigma (uncertainity) for each line
self.s2n = zeros(n_lines) #Set initial signal-to-noise ratio for each line
self.label = self.makelabel() #Make label of spectroscpic notation
g_ortho_para = 1.0 + 2.0 * (J.u % 2.0 == 1.0) #Calculate the degenearcy for ortho or para hydrogen
self.g = g_ortho_para * (2.0*J.u+1.0) #Store degeneracy
self.T = E.u / k #Store "temperature" of the energy of the upper state
self.wave = E.getwave() #Store wavelength of transitions
self.path = '' #Store path for saving excitation diagram and other files, read in when reading in region with definition set_Flux
self.rot_T = zeros(n_lines) #Store rotation temperature from fit
self.model_ratio = zeros(n_lines) #store ratio to model, if a model fit is performed
self.sig_rot_T = zeros(n_lines) #Store uncertainity in rotation temperature fit
self.res_rot_T = zeros(n_lines) #Store residuals from offset of line fitting rotation temp
self.sig_res_rot_T = zeros(n_lines) #Store uncertainity in residuals from fitting rotation temp (e.g. using covariance matrix)
def generate_states(self): #Returns a states object based on the average column densities of the levels in this transitions object
s = states()
for J in range(1,25):
for V in range(1,15): #Loop through each V ladder
use_these = (self.J.u == J) & (self.V.u==V) & (self.N > 0.)
if any(use_these):
s.N[(s.J==J) & (s.V==V)] = exp(nanmean(log(self.N[use_these])))
return s
def tin(self, v, J): #Find and return indicies of transitions into a given level defined by v and J
return where((self.V.l == v) & (self.J.l == J))
def tout(self, v, J): #FInd and return indicies of transitions out of a given level defined by v and J(self.V.u == v) & (self.J.u == J)
return where((self.V.u == v) & (self.J.u == J))
def calculate_column_density(self, normalize=False): #Calculate the column density and uncertainity for a line's given upper state from the flux and appropriate constants
##self.N = self.F / (self.g * self.E.u * h * c * self.A)
##self.Nsigma = self.sigma / (self.g * self.E.u * h * c * self.A)
#self.N = self.F / (self.g * self.E.diff() * h * c * self.A)
#self.Nsigma = self.sigma / (self.g * self.E.diff() * h * c * self.A)
self.N = 4 * pi * self.F / (self.E.diff() * h * c * self.A)
self.Nsigma = 4 * pi * self.sigma / (self.E.diff() * h * c * self.A)
#self.T_monte_carlo = self.T[:,newaxis] * ones([self.n_lines, 10000])
#self.N_monte_carlo = self.N[:,newaxis] + self.Nsigma[:,newaxis] * randn(self.n_lines, 10000)#zero([self.n_lines, 10000])
if normalize: #By default normalize to the 1-0 S(1) line, set normalize = False if using absolute flux calibrated data
self.normalize()
#N_10_S1 = self.N[self.label == '1-0 S(1)'] #Grab column density derived from 1-0 S(1) line
#self.N = self.N / N_10_S1 #Normalize column densities
#self.Nsigma = self.Nsigma / N_10_S1 #Normalize uncertainity
def calculate_flux(self): #Calculate flux for a given calculated column density (ie. if you set it to thermalize)
#self.F = self.N * self.g * self.E.diff() * h * c * self.A
self.F = self.N * self.E.diff() * h * c * self.A
def generate_synthetic_spectrum(self, wave_range=[1.45,2.45], pixel_size=1e-5, line_fwhm=7.5, centroid=0.): #Generate a synthetic 1D spectrum based on stored flux values in this object, can be used to synthesize spectra from Cloudy models, or thermal gas generated by the "thermalize" command
#n_pixels = (wave_range[1] - wave_range[0])/pixel_size #Calcualte number of pixels in 1D sythetic spectrum
#velocity_grid = arange(-500,500,0.01) #Create velocity grid
c_km_s = c / 1e5 #Get speed of light in km/s
sigma = line_fwhm / 2.0*sqrt(2.0*log(2.0)) #Convert FWHM into sigma for a gaussian
alpha = 2.0*sigma**2 #Calcualte alpha for gaussian
beta = (1.0/sqrt(pi*alpha)) #Calculate another part (here called beta) for the gaussian profile
#line_profile = beta * exp(-((velocity_grid-centroid)**2/(alpha))) #Calculate normalizeable line profile in velocity space
wave = arange(wave_range[0], wave_range[1], pixel_size) #Create wavelength array for 1D synthetic spectrum
flux = zeros(len(wave)) #Create flux array for 1D synthetic spectrum
for i in range(len(self.wave)):
current_wavelength = self.wave[i]
if (current_wavelength > wave_range[0]) and (current_wavelength < wave_range[1]):
#Interpolate line profile into wavelength space
#velocity_grid = c_km_s * ((wave/current_wavelength) - 1.0) #Create velocity grid from wavelength grid
line_profile = beta * exp(-((c_km_s * ((wave/current_wavelength) - 1.0)-centroid)**2/(alpha))) #Calculate gaussian line profile in wavelength space
flux = flux + self.F[i]*line_profile #Build up line on flux grid
return wave, flux #Return wavlelength and flux grids
def normalize(self, label='5-3 O(3)', value=1., by_g=False):
i = self.label == label
if self.N[i] > 0.: #Check if line even exists
if by_g: #Also include the quantum degeneracy in the normalization if the user desires
normalize_by_this = self.N[i] / self.g[i]
else: #Default
normalize_by_this = self.N[i] / value #/ self.g[i]#Grab column density of line to normalize by
#self.N_monte_carlo /= normalize_by_this #Do the normalization
self.N /= normalize_by_this #Do the normalization
self.Nsigma /= normalize_by_this #Ditto on the uncertainity
else:
print("ERROR: Attempted to normalize by the " + label + " line, but it appears to not exist. No normalization done. Try a different line?")
def thermalize(self, temperature): #Set all column densities to be thermalized at the specified temperature, normalized to the 1-0 S(1) line
exponential = self.g * exp(-self.T/temperature) #Calculate boltzmann distribution for user given temperature, used to populate energy levels
boltzmann_distribution = exponential / nansum(exponential) #Create a normalized boltzmann distribution
self.N = boltzmann_distribution #Set column densities to the boltzmann distribution
#self.normalize() #Normalize to the 1-0 S(1) line
self.calculate_flux() #Calculate flux of new lines after thermalization
def makelabel(self): #Make labels for each transition in spectroscopic notation.
labels = []
for i in range(self.n_lines):
labels.append(self.V.label[i] + ' ' + self.J.label[i])
return array(labels)
def intrinsic_ratio(self, line_label_1, line_label_2): #Return the intrinsic flux ratio of two transitions that arise from the same upper state
line_1 = self.label == line_label_1 #Find index to transition 1
line_2 = self.label == line_label_2 #Find index to transition 2
if (self.V.u[line_1] != self.V.u[line_2]) or (self.J.u[line_1] != self.J.u[line_2]): #Test if both transitions came from the upper state and catch error if not
print("ERROR: Both of these transitions do not arise from the same upper state.")
return(0.0) #Return 0 if the transitions do not arise from the same upper state
ratio = (self.E.diff()[line_1] * self.A[line_1]) / (self.E.diff()[line_2] * self.A[line_2]) #Calculate intrinsic ratio of the two transitions
return(ratio) #return the intrinsic ratio
def flux_ratio(self, line_label_1, line_label_2, sigma=False): #Return flux ratio of any two lines
line_1 = self.label == line_label_1 #Find index to transition 1
line_2 = self.label == line_label_2 #Find index to transition 2
ratio = self.F[line_1] / self.F[line_2]
if sigma: #If user specifies they want the uncertainity returned, return the ratio, and the uncertainity
uncertainity = sqrt(ratio**2 * ((self.sigma[line_1]/self.F[line_1])**2 + (self.sigma[line_2]/self.F[line_2])**2) )
return ratio, uncertainity
else:
return self.F[line_1] / self.F[line_2]
return ratio #Return observed line flux ratio only (no uncertainity)
def upper_state(self, label, wave_range = [0,999999999.0]): #Given a label in spectroscopic notation, list transitions with same upper state (and a wavelength range if specified)
i = self.label == label
Ju = self.J.u[i]
Vu = self.V.u[i]
found_transitions = (self.wave > wave_range[0]) & (self.wave < wave_range[1]) & (self.J.u == Ju) & (self.V.u == Vu)
label_subset = self.label[found_transitions]
wave_subset = self.wave[found_transitions]
for i in range(len(label_subset)):
print(label_subset[i] + '\t' + str(wave_subset[i]))
#print(self.label[found_transitions])#Find all matching transitions in the specified wavelength range with a matching upper J and V state
#print(self.wave[found_transitions])
def set_flux(self, region): #Set the flux of a single line or multiple lines given the label for it, e.g. h2.set_flux('1-0 S(1)', 456.156)
if hasattr(region, 'path'): #Error catch
self.path = region.path #Set path to
n = len(region.label)
if n == 1: #If only a single line
matched_line = (self.label == region.label)
if any(matched_line): #If any matches are found...
self.F[matched_line] = region.flux #Set flux for a single line
self.s2n[matched_line] = region.s2n #Set S/N for a single line
self.sigma[matched_line] = region.sigma #Set sigma (uncertainity) for a single line
else: #if multiple lines
for i in range(n): #Loop through each line\
matched_line = (self.label == region.label[i])
if any(matched_line): #If any matches are found...
self.F[matched_line] = region.flux[i] #And set flux
self.sigma[matched_line] = region.sigma[i] #Set sigma (uncertainity) for a single line
if hasattr(region, 's2n'): #Error catch
self.s2n[matched_line] = region.s2n[i] #Set S/N for a single line
else:
self.s2n[matched_line] = region.flux[i] / region.sigma[i] #Set S/N for a single line if .s2n is not existant, just calculate from flux/sigma
def read_model(self, labels, flux): #Read in fluxes from model
for i in range(len(labels)): #Loop through each line
matched_line = (self.label == labels[i]) #Match to H2 line
self.F[matched_line] = flux[i] #Set flux to flux from model
def read_data(self, labels, flux, sigma):
for i in range(len(labels)): #Loop through each line
matched_line = (self.label == labels[i]) #Match to H2 line
self.F[matched_line] = flux[i] #Set flux to flux from data
self.sigma[matched_line] = sigma[i] #Set uncertainity to uncertainity from data
self.s2n[matched_line] = flux[i] / sigma[i] #Calculate S/N
def quick_plot(self): #Create quick boltzmann diagram for previewing and testing purposes
nonzero = self.N != 0.0
clf()
plot(self.T[nonzero], log(self.N[nonzero]), 'o')
ylabel("Column Density log$_e$(N/g) [cm$^{-2}$]", fontsize=18)
xlabel("Excitation Energy (E/k) [K]", fontsize=18)
show()
def make_latex_table(self, output_filename, s2n_cut = 3.0, normalize_to='5-3 O(3)'): #Output a latex table of column densities for each H2 line
lines = []
#lines.append(r"\begin{table}") #Set up table header
lines.append(r"\begin{longtable}{llrrrrr}")
lines.append(r"\caption{\htwo{} rovibrational state column densities}{} \label{tab:coldens} \\")
#lines.append("\begin{scriptsize}")
#lines.append(r"\begin{tabular}{cccc}")
lines.append(r"\hline")
lines.append(r"$\lambda_{\mbox{\tiny vacuum}}$ & \htwo{} line ID & $v_u$ & $J_u$ & $E_u/k$ & $\log_{10}\left(A_{ul}\right)$ & $\ln \left(N_u/g_u\right) - \ln\left(N_{\mbox{\tiny "+normalize_to+r"}}/g_{\mbox{\tiny "+normalize_to+r"}}\right)$ \\")
lines.append(r"\hline\hline")
lines.append(r"\endfirsthead")
lines.append(r"\hline")
lines.append(r"$\lambda_{\mbox{\tiny vacuum}}$ & \htwo{} line ID & $v_u$ & $J_u$ & $E_u/k$ & $\log_{10}\left(A_{ul}\right)$ & $\ln \left(N_u/g_u\right) - \ln\left(N_{\mbox{\tiny "+normalize_to+r"}}/g_{\mbox{\tiny "+normalize_to+r"}}\right)$ \\")
lines.append(r"\hline\hline")
lines.append(r"\endhead")
lines.append(r"\hline")
lines.append(r"\endfoot")
lines.append(r"\hline")
lines.append(r"\endlastfoot")
if any(self.V.u[self.s2n > s2n_cut]): #Error catching
highest_v = max(self.V.u[self.s2n > s2n_cut]) #Find highest V level
for v in range(1,highest_v+1): #Loop through each rotation ladder
i = (self.V.u == v) & (self.s2n > s2n_cut) #Find all lines in the current ladder
s = argsort(self.J.u[i]) #Sort by upper J level
labels = self.label[i][s] #Grab line labels
J = self.J.u[i][s] #Grab upper J
N = self.N[i][s] / self.g[i][s] #Grab column density N/g
E = self.T[i][s]
A = self.A[i][s]
wave = self.wave[i][s]
sig_N = self.Nsigma[i][s] / self.g[i][s] #Grab uncertainity in N
for j in range(len(labels)):
#lines.append(labels[j] + " & " + str(v) + " & " + str(J[j]) + " & " + "%1.2e" % N[j] + " $\pm$ " + "%1.2e" % sig_N[j] + r" \\")
lines.append(r"%1.6f" % wave[j] + " & " + labels[j] + " & " + str(v) + " & " + str(J[j]) + " & %5.0f" % E[j] + " & %1.2f" % log10(A[j]) +
" & $" + "%1.2f" % log(N[j]) + r"^{+%1.2f" % (-log(N[j]) + log(N[j]+sig_N[j])) +r"}_{%1.2f" % (-log(N[j]) + log(N[j]-sig_N[j])) +r"} $ \\")
#lines.append(r"\hline\hline")
#lines.append(r"\end{tabular}")
lines.append(r"\end{longtable}")
#lines.append(r"\end{table}")
savetxt(output_filename, lines, fmt="%s") #Output table
def save_table(self): #Output ascii table with data for making an excitation diagram
lines = [] #Set up array for saving lines for text file
lines.append('#H2 Line\twavelength [um]\tortho/para\tv_u\tJ_u\tE_u\tlog(N/g)-log(N/g)_1-0S(1)\t+sigma\t-sigma') #Header of text file listing all the columns
if any(self.V.u[self.s2n > 0.0]): #Error catching
highest_v = max(self.V.u[self.N > 0.0]) #Find highest V level
ortho_para = ['para' ,'ortho']
for v in range(1,highest_v+1): #Loop through each rotation ladder
i = (self.V.u == v) & (self.N > 0.0) #Find all lines in the current ladder
s = argsort(self.J.u[i]) #Sort by upper J level
labels = self.label[i][s] #Grab line labels
J = self.J.u[i][s] #Grab upper J
N = self.N[i][s] / self.g[i][s] #Grab column density N\
E = self.T[i][s]
sig_N = self.Nsigma[i][s] / self.g[i][s] #Grab uncertainity in N
wave = self.wave[i][s] #Grab wavelength of line
for j in range(len(labels)): #Loop through each rotation ladder
lines.append(labels[j] + '\t%1.5f' % wave[j] + '\t' + ortho_para[J[j]%2] + '\t' + str(v) + '\t' + str(J[j])+ '\t%1.1f' % E[j] +
'\t%1.3f' % log(N[j]) + '\t%1.3f' % (-log(N[j]) + log(N[j]+sig_N[j])) + '\t%1.3f' % (-log(N[j]) + log(N[j]-sig_N[j])) )
savetxt(self.path + '_H2_column_densities.dat', lines, fmt="%s") #Output table
def fit_rot_temp(self, T, log_N, y_error_bars, s2n_cut = 1., color='black', dotted_line=False, rot_temp_energy_limit=0., show=True): #Fit rotation temperature to a given ladder in vibration
log_N_sigma = nanmax(y_error_bars, 0) #Get largest error in log space
if rot_temp_energy_limit > 0.: #If user specifies to cut rotation temp fit, use that....
usepts = (T < rot_temp_energy_limit) & isfinite(log_N)
print('debug time! Log_N[usepts]=', log_N[usepts])
fit, cov = curve_fit(linear_function, T[usepts], log_N[usepts], sigma=log_N_sigma[usepts], absolute_sigma=False) #Do weighted linear regression fit
else: #Else fit all points
fit, cov = curve_fit(linear_function, T, log_N, sigma=log_N_sigma, absolute_sigma=False) #Do weighted linear regression fit
slope = fit[0]#[0]
sigma_slope = sqrt(abs(cov[0,0]))
if dotted_line:
linestyle=':'
else:
linestyle='-'
#y = polyval(fit, T) #Get y positions of rotation temperature fit
y = linear_function(T, fit[0], fit[1]) #Get y positions of rotation temperature fit
y_sigma = sqrt(cov[0,0]*T**2 + 2.0*cov[0,1]*T + cov[1,1]) #Grab uncertainity in fit for a given y value and the covariance matrix, see Pg 125 of the Math Methods notes
if show: #If user wants to plot lines
plot(T, y, color=color, linestyle=linestyle) #Plot T rot fit
#plot(T, y+y_sigma, color=color, linestyle='--') #Plot uncertainity in T rot fit
#plot(T, y-y_sigma, color=color, linestyle='--')
rot_temp = -1.0/slope #Calculate the rotation taemperature
sigma_rot_temp = rot_temp * (sigma_slope/abs(slope)) #Calculate uncertainity in rotation temp., basically just scale fractional error
print('rot_temp = ', rot_temp,'+/-',sigma_rot_temp)
#residuals = e**log_N - e**y #Calculate residuals in fit, but put back in linear space
#sigma_residuals = sqrt( (e**(y + y_sigma) - e**y)**2 + (e**(log_N + log_N_sigma)-e**log_N)**2 ) #Calculate uncertainity in residuals from adding uncertainity in fit and data points together in quadarature
residuals = e**(log_N-y)
sigma_residuals = sqrt(log_N_sigma**2 + y_sigma**2)
return rot_temp, sigma_rot_temp, residuals, sigma_residuals
def compare_model(self, h2_model_input, name='compare_model_excitation_diagrams', figsize=[17.0,13], x_range=[0.0,55000.0], y_range=array([-6.25,5.5]), ratio_y_range=[1e-1,1e1],
plot_residual_temp=False, residual_temp=default_single_temp, residual_temp_y_intercept=default_single_temp_y_intercept, multi_temp_fit=False,
take_ratio=False, s2n_cut=3.0, makeplot=True): #Make a Boltzmann diagram comparing a model (ie. Cloudy) to data, and show residuals, show even and odd vibration states for clarity
fname = self.path + '_'+name+'.pdf'
h2_model = copy.deepcopy(h2_model_input) #Copy h2 model obj so not to modify the original
show_these_v = [] #Set up a blank vibration array to automatically fill
for v in range(14): #Loop through and check each set of states of constant v
in_this_v = self.V.u == v
if any(self.s2n[self.V.u == v] >= s2n_cut): #If anything is found to be plotted in the data
show_these_v.append(v) #store this vibration state for later plotting
max_J = max(self.J.u[in_this_v & (self.s2n >= s2n_cut)])
if max_J > 6: #If data probes in this rotation ladder beyond J of six`
h2_model.N[in_this_v & (self.J.u > max_J+1)] = 0. #Blank out model where > J + 1 max
else:
h2_model.N[in_this_v & (self.J.u > 7)] = 0.
else:
h2_model.N[in_this_v] = 0. #Blank out model if no datapoints are in this rotation ladder
self.model_ratio = self.N / h2_model.N #Calulate and store ratio of data/model for later use to make tables or whatever the user wants to script up
ratio = copy.deepcopy(self)
if take_ratio: #If user actually wants to take a ratio
ratio.N = (self.N / h2_model.N) #Take a ratio, note we are multiplying by the degeneracy
ratio.Nsigma = self.Nsigma / h2_model.N
chi_sq = nansum(log10(ratio.N[ratio.s2n > s2n_cut])**2) #Calculate chisq from ratios
print('Compare model for ' + name + ' sum(log10(ratios)**2) = ', chi_sq)
else: #If user doesn ot specifiy acutally taking a ratio
ratio.N = self.N - h2_model.N
ratio.Nsigma = self.Nsigma
#ratio.Nsigma = (self.Nsigma /h2_model.N)
if makeplot:
with PdfPages(fname) as pdf: #Make a pdf
### Set up subplotting
subplots(2, sharex="col") #Set all plots to share the same x axis
tight_layout(rect=[0.03, 0.00, 1.0, 1.0]) #Try filling in white space
fig = gcf()#Adjust aspect ratio
fig.set_size_inches(figsize) #Adjust aspect ratio
subplots_adjust(hspace=0.037, wspace=0) #Set all plots to have no space between them vertically
gs = GridSpec(2, 1, height_ratios=[1, 1]) #Set up grid for unequal sized subplots
### Left side
subplot(gs[0])
h2_model.v_plot(V=show_these_v, orthopara_fill=False, empty_fill=True, show_legend=False, savepdf=False, show_labels=False, line=True,y_range=y_range, x_range=x_range, clear=False, show_axis_labels=False, no_legend_label=True) #Plot model points as empty symbols
self.v_plot(V=show_these_v, orthopara_fill=False, full_fill=True, show_legend=True, savepdf=False, y_range=y_range, x_range=x_range, clear=False, show_axis_labels=False, no_legend_label=False, s2n_cut=s2n_cut)
ylabel("Column Density ln(N$_u$/g$_u$)-ln(N$_{r}$/g$_{r}$)", fontsize=18)
V = range(1,14)
frame = gca() #Turn off axis number labels
setp(frame.get_xticklabels(), visible=False)
#subplot(gs[3])
subplot(gs[1])
plot([0,100000],[1,1], linestyle='--', color='gray')
ratio.v_plot(V=show_these_v, orthopara_fill=False, full_fill=True, show_legend=False, savepdf=False, no_zero_x=True, x_range=x_range, clear=False, show_axis_labels=False, no_legend_label=True,
plot_single_temp=plot_residual_temp, single_temp=residual_temp, single_temp_y_intercept=residual_temp_y_intercept, multi_temp_fit=multi_temp_fit, show_ratio=True, s2n_cut=s2n_cut, y_range=ratio_y_range)
if take_ratio:
#ylabel("Data/Model ratio ln(N$_u$/g$_u$)-ln(N$_{m}$/g$_{m}$)", fontsize=18)
ylabel("Data/Model Ratio", fontsize=18)
else:
ylabel("Data-Model ln((N$_u$-$N_m$)/g$_u$)-ln(N$_{r}$/g$_{r}$)", fontsize=18)
xlabel("Excitation Energy (E$_u$/k) [K]", fontsize=18)
pdf.savefig()
return(chi_sq) #Return chisq value to quantify the goodness of fit
def v_plot_with_model(self, h2_model_input, x_range=[0.0,55000.0], y_range=array([-6.25,5.5]), s2n_cut=3.0, **args): #Do a vplot with a model overlayed, a simple form of def compare_model for making multipaneled plots and things with your own scripts
h2_model = copy.deepcopy(h2_model_input) #Copy h2 model obj so not to modify the original
show_these_v = [] #Set up a blank vibration array to automatically fill
for v in range(14): #Loop through and check each set of states of constant v
in_this_v = self.V.u == v
if any(self.s2n[self.V.u == v] >= s2n_cut): #If anything is found to be plotted in the data
show_these_v.append(v) #store this vibration state for later plotting
max_J = max(self.J.u[in_this_v & (self.s2n >= s2n_cut)])
if max_J > 6: #If data probes in this rotation ladder beyond J of six`
h2_model.N[in_this_v & (self.J.u > max_J+1)] = 0. #Blank out model where > J + 1 max
else:
h2_model.N[in_this_v & (self.J.u > 7)] = 0.
else:
h2_model.N[in_this_v] = 0. #Blank out model if no datapoints are in this rotation ladder
#tight_layout(rect=[0.03, 0.00, 1.0, 1.0]) #Try filling in white space
h2_model.v_plot(V=show_these_v, orthopara_fill=False, empty_fill=True, show_legend=False, savepdf=False, show_labels=False, line=True,y_range=y_range, x_range=x_range, clear=False, show_axis_labels=False, no_legend_label=True) #Plot model points as lines
self.v_plot(V=show_these_v, orthopara_fill=False, full_fill=True, show_legend=False, savepdf=False, y_range=y_range, x_range=x_range, clear=False, show_axis_labels=False, no_legend_label=False, s2n_cut=s2n_cut, **args)
#ylabel("Column Density ln(N$_u$/g$_u$)-ln(N$_{r}$/g$_{r}$)", fontsize=18)
def v_plot_ratio_with_model(self, h2_model, x_range=[0.0,55000.0], y_range=array([1e-1,1e1]), s2n_cut=3.0, y_label=r'N$_{obs}$/N$_{model}$', **args):
show_these_v = [] #Set up a blank vibration array to automatically fill
for v in range(14): #Loop through and check each set of states of constant v
if any(self.s2n[self.V.u == v] >= s2n_cut): #If anything is found to be plotted in the data
show_these_v.append(v) #store this vibration state for later plotting
self.model_ratio = self.N / h2_model.N #Calulate and store ratio of data/model for later use to make tables or whatever the user wants to script up
ratio = copy.deepcopy(self)
ratio.N = (self.N / h2_model.N) #Take a ratio, note we are multiplying by the degeneracy
ratio.Nsigma = self.Nsigma / h2_model.N
#chi_sq = nansum(log10(ratio.N[ratio.s2n > s2n_cut])**2) #Calculate chisq from ratios
plot([0,100000],[1,1], linestyle='--', color='gray')
ratio.v_plot(V=show_these_v, orthopara_fill=False, full_fill=True, show_legend=False, savepdf=False, no_zero_x=True, x_range=x_range, clear=False, show_axis_labels=False, no_legend_label=False,
show_ratio=True, s2n_cut=s2n_cut, y_range=y_range, **args)
#ylabel(y_label, fontsize=18)
#xlabel("Excitation Energy (E$_u$/k) [K]", fontsize=18)
def plot_individual_ladders(self, x_range=[0.,0.0], s2n_cut = 0.0): #Plot set of individual ladders in the excitation diagram
fname = self.path + '_invidual_ladders_excitation_diagrams.pdf'
with PdfPages(fname) as pdf: #Make a pdf
V = range(0,14)
for i in V:
if any((self.V.u == i) & isfinite(self.N) & (self.N > 0.0)):
self.v_plot(V=[i], show_upper_limits=False, show_labels=True, rot_temp=False, show_legend=True, savepdf=False, s2n_cut=s2n_cut, no_zero_x=True)
pdf.savefig()
def plot_rot_temp_fit(self, s2n_cut = 3.0, figsize=[21.0,15], x_range=[0.0,55000.0], y_range=array([-5.25,15.25])): #Fit and plot rotation temperatures then show their residuals
fname = self.path + '_rotation_temperature_fits_and_residuals_all.pdf' #Set filename
with PdfPages(fname) as pdf: #Make a pdf
### Set up subplotting
subplots(2, sharex="col") #Set all plots to share the same x axis
tight_layout(rect=[0.03, 0.00, 1.0, 1.0]) #Try filling in white space
fig = gcf()#Adjust aspect ratio
fig.set_size_inches(figsize) #Adjust aspect ratio
subplots_adjust(hspace=0, wspace=0) #Set all plots to have no space between them vertically
gs = GridSpec(2, 1, height_ratios=[1, 1]) #Set up grid for unequal sized subplots
### Left side
subplot(gs[0])
V = range(0,15)
self.v_plot(V=V, orthopara_fill=False, full_fill=True, show_legend=True, savepdf=False, y_range=y_range, x_range=x_range, clear=False, show_axis_labels=False, no_legend_label=False,
rot_temp=True, rot_temp_residuals=False, s2n_cut=s2n_cut)
ylabel("Column Density ln(N$_u$/g$_u$)-ln(N$_{r}$/g$_{r}$)", fontsize=18)
frame = gca() #Turn off axis number labels
setp(frame.get_xticklabels(), visible=False)
#subplot(gs[3])
subplot(gs[1])
self.v_plot(V=V, orthopara_fill=False, full_fill=True, show_legend=False, savepdf=False, y_range=y_range, x_range=x_range, clear=False, show_axis_labels=False, no_legend_label=False,
rot_temp=False, rot_temp_residuals=True, s2n_cut=s2n_cut)
ylabel("Column Density Ratio of Data to Model ln(N$_u$/g$_u$)-ln(N$_{m}$/g$_{m}$)]", fontsize=18)
xlabel("Excitation Energy (E$_u$/k) [K]", fontsize=18)
pdf.savefig()
### Middle
# V=[1,3,5,7,9,11,13]
# subplot(gs[1])
# self.v_plot(V=V, orthopara_fill=False, full_fill=True, show_legend=True, savepdf=False, y_range=y_range, x_range=x_range, clear=False, show_axis_labels=False, no_legend_label=False,
# rot_temp=True, rot_temp_residuals=False, s2n_cut=s2n_cut)
# frame = gca() #Turn off axis number labels
# setp(frame.get_xticklabels(), visible=False)
# setp(frame.get_yticklabels(), visible=False)
# subplot(gs[4])
# frame = gca() #Turn off axis number labels
# setp(frame.get_yticklabels(), visible=False)
# self.v_plot(V=V, orthopara_fill=False, full_fill=True, show_legend=False, savepdf=False, y_range=y_range, x_range=x_range, clear=False, show_axis_labels=False, no_legend_label=False,
# rot_temp=False, rot_temp_residuals=True, s2n_cut=s2n_cut)
# xlabel("Excitation Energy (E$_u$/k) [K]", fontsize=18)
# ### Right side
# V=[0,2,4,6,8,10,12,14]
# subplot(gs[2])
# self.v_plot(V=V, orthopara_fill=False, full_fill=True, show_legend=True, savepdf=False, y_range=y_range, x_range=x_range, clear=False, show_axis_labels=False, no_legend_label=False,
# rot_temp=True, rot_temp_residuals=False, s2n_cut=s2n_cut)
# frame = gca() #Turn off axis number labels
# setp(frame.get_xticklabels(), visible=False)
# setp(frame.get_yticklabels(), visible=False)
# subplot(gs[5])
# frame = gca() #Turn off axis number labels
# setp(frame.get_yticklabels(), visible=False)
# self.v_plot(V=V, orthopara_fill=False, full_fill=True, show_legend=False, savepdf=False, y_range=y_range, x_range=x_range, clear=False, show_axis_labels=False, no_legend_label=False,
# rot_temp=False, rot_temp_residuals=True, s2n_cut=s2n_cut)
# xlabel("Excitation Energy (E$_u$/k) [K]", fontsize=18)
#pdf.savefig()
#OLD VERSION
# with PdfPages(fname) as pdf: #Make a pdf
# self.v_plot(V=V, show_labels=False, rot_temp=True, rot_temp_residuals=False, savepdf=False, s2n_cut=s2n_cut)
# pdf.savefig()
# for i in V:
# fname = self.path + '_rotation_temperature_fits_V'+str(i)+'.pdf'
# with PdfPages(fname) as pdf: #Make a pdf
# title('V = '+str(i))
# self.v_plot(V=[i], show_labels=True, rot_temp=True, rot_temp_residuals=False, savepdf=False, s2n_cut=s2n_cut, no_zero_x=True) #Plot single rotation ladder + rot temp fit
# pdf.savefig()
# fname = self.path + '_rotation_temperature_residuals_V'+str(i)+'.pdf'
# with PdfPages(fname) as pdf: #Make a pdf
# title('V = '+str(i)+' residuals')
# self.v_plot(V=[i], show_labels=True, rot_temp=False, rot_temp_residuals=True, savepdf=False, s2n_cut=s2n_cut, ignore_x_range=True, show_legend=False) #Plot residuals
# pdf.savefig()
# WORK IN PROGRESS, NEED TO ALLOW FITTING OF VIBRATIONAL TEMPERATURES
# def plot_vib_temp_fit(self, s2n_cut = 3.0, V = range(0,14)): #Fit and plot rotation temperatures then show their residuals
# fname = self.path + '_rotation_temperature_fits_and_residuals_all.pdf' #Set filename
# with PdfPages(fname) as pdf: #Make a pdf
# self.v_plot(V=V, show_labels=False, rot_temp=True, rot_temp_residuals=False, savepdf=False, s2n_cut=s2n_cut)
# pdf.savefig()
# for i in V:
# fname = self.path + '_rotation_temperature_fits_V'+str(i)+'.pdf'
# with PdfPages(fname) as pdf: #Make a pdf
# title('V = '+str(i))
# self.v_plot(V=[i], show_labels=True, rot_temp=True, rot_temp_residuals=False, savepdf=False, s2n_cut=s2n_cut, no_zero_x=True) #Plot single rotation ladder + rot temp fit
# pdf.savefig()
# fname = self.path + '_rotation_temperature_residuals_V'+str(i)+'.pdf'
# with PdfPages(fname) as pdf: #Make a pdf
# title('V = '+str(i)+' residuals')
# self.v_plot(V=[i], show_labels=True, rot_temp=False, rot_temp_residuals=True, savepdf=False, s2n_cut=s2n_cut, no_zero_x=True, show_legend=False) #Plot residuals
# pdf.savefig()
#Make simple plot first showing all the different rotational ladders for a constant V
def v_plot(self, plot_single_temp = False, show_upper_limits = False, nocolor = False, V=[-1], s2n_cut=-1.0, normalize=True, savepdf=False, orthopara_fill=True,
empty_fill =False, full_fill=False, show_labels=False, x_range=[0.,0.], y_range=[0.,0.], rot_temp=False, show_legend=True, rot_temp_energy_limit=100000.,
rot_temp_residuals=False, fname='', clear=True, legend_fontsize=14, line=False, subtract_single_temp = False, single_temp=default_single_temp, no_legend_label=False,
single_temp_y_intercept=default_single_temp_y_intercept, no_zero_x = False, show_axis_labels=True, ignore_x_range=False, label_J=False, label_V=False, multi_temp_fit=False, single_temp_fit=False,
single_color='none', show_ratio=False, symbsize = 9, single_temp_use_sigma=False, semilog=False):
if fname == '':
fname=self.path + '_excitation_diagram.pdf'
with PdfPages(fname) as pdf: #Make a pdf
nonzero = self.N != 0.0
if clear: #User can specify if they want to clear the plot
clf()
labelsize = 18 #Size of text for labels
if orthopara_fill: #User can specify how they want symbols to be filled
orthofill = 'full' #How symbols on excitation diagram are filled, 'full' vs 'none'
parafill = 'none'
elif empty_fill:
orthofill = 'none' #How symbols on excitation diagram are filled, 'full' vs 'none'
parafill = 'none'
else:
orthofill = 'full' #How symbols on excitation diagram are filled, 'full' vs 'none'
parafill = 'full'
if V == [-1]: #If user does not specify a specific set of V states to plot...
use_upper_v_states = unique(self.V.u) #plot every one found
else: #or else...
use_upper_v_states = V #Plot upper V s tates specified by the user
if subtract_single_temp: #If user wants to subtract the single temperature
x = arange(0,200000, 10) #Set up an ax axis
interp_single_temp = interp1d(x, single_temp_y_intercept - (x / single_temp), kind='linear') #create interpolation object for the single temperature
data_single_temp = interp_single_temp(self.T) #Create array of the single temperature for subtraction from the column density later on
#log_N = log(self.N/self.g) - data_single_temp #Log of the column density
log_N = log((self.N/self.g) - exp(data_single_temp))
#plus_one_sigma = abs(log_N + data_single_temp - log((self.N + self.Nsigma)/self.g) )
#minus_one_sigma = abs(log_N + data_single_temp - log((self.N + self.Nsigma)/self.g) )
#upper_limits = log(self.Nsigma*3.0/self.g) - data_single_temp
plus_one_sigma = abs(log_N - log((self.N + self.Nsigma)/self.g) )
minus_one_sigma = abs(log_N - log((self.N - self.Nsigma)/self.g) )
upper_limits = log((self.Nsigma*3.0/self.g) - exp(data_single_temp))
elif show_ratio: #If user is plotting column density ratios, keep things in linear form and use linear axes on a log scale
log_N = self.N #Not really log N but you get the idea
log_N[log_N<=0.] = nan #Nan out zero and negative values, since they are essentialy meaningless anyway and won't plot on a log plot
plus_one_sigma = self.Nsigma #Set error bars in linear space
minus_one_sigma = self.Nsigma
find_negative_sigma = log_N < minus_one_sigma #Find negative sigma
minus_one_sigma[find_negative_sigma] = 1e-1 * log_N[find_negative_sigma] #Just make negative error bars 1/10 of data so it can still be plotted on log plot
semilogy() #Set y axis to be semi log
elif rot_temp_residuals: #If user has previously calculated rotation temperatures for each ladder, here they can show the residuals after subtracting the linear fits
log_N = log(self.res_rot_T)
plus_one_sigma = abs(log_N - log(self.res_rot_T + (self.Nsigma/self.g)))
minus_one_sigma = abs(log_N - log(self.res_rot_T - (self.Nsigma/self.g)))
upper_limits = log(self.Nsigma*3.0/self.g)
else: #Default to simply plotting the column densities and their error bars
log_N = log(self.N/self.g) #Log of the column density
plus_one_sigma = abs(log_N - log((self.N + self.Nsigma)/self.g) )
minus_one_sigma = abs(log_N - log((self.N - self.Nsigma)/self.g) )
upper_limits = log(self.Nsigma*3.0/self.g)
#plus_one_sigma = abs(log_N - data_single_temp - log(self.N - exp(data_single_temp) + self.Nsigma)) #Upper 1 sigma errors in log space
#minus_one_sigma = abs(log_N - data_single_temp - log(self.N - exp(data_single_temp) - self.Nsigma)) #Lower 1 sigma errors in log space
if semilog: #If user wants to make a semi-log plot, convert log(N/g) to N/g
log_N = e**log_N
for i in use_upper_v_states:
if single_color != 'none': #If user specifies a specific color, use that single color
current_color = single_color
current_symbol = 'o'
elif nocolor: #If user specifies no color,
current_color = 'gray'
current_symbol = symbol_list[i]
else: #Or else by default use colors from the color list defined at the top of the code
current_color = color_list[i]
current_symbol = 'o'
if line: #if user specifies using lines
#current_symbol = current_symbol + '-' #Draw a line between each symbol
current_symbol = '-'
data_found = (self.V.u == i) & (self.s2n > s2n_cut) & (self.N > 0.) #Search for data in this vibrational state
#if any(data_found) and not show_ratio: #If any data is found in this vibrational state, add a line on the legend for this state
if any(data_found): #If any data is found in this vibrational state, add a line on the legend for this state
if no_legend_label:
use_label = '_nolegend_'
else:
use_label = ' '
errorbar([nan], [nan], yerr=1.0, fmt=current_symbol, color=current_color, label=use_label, capthick=3, elinewidth=2, markersize=symbsize, fillstyle=orthofill) #Do empty plot to fill legend
ortho = (self.J.u % 2 == 1) & (self.V.u == i) & (self.s2n > s2n_cut) & (self.N > 0.) #Select only states for ortho-H2, which has the proton spins aligned so J can only be odd (1,3,5...)
ortho_upperlimit = (self.J.u % 2 == 1) & (self.V.u == i) & (self.s2n <= s2n_cut) & (self.N > 0.) #Select ortho-H2 lines where there is no detection (e.g. S/N <= 1)
if any(ortho): #If datapoints are found...
if nansum(self.s2n[ortho]) == 0.:
plot(self.T[ortho], log_N[ortho], current_symbol, color=current_color, markersize=symbsize, fillstyle=orthofill) #Plot data + error bars
else:
y_error_bars = [minus_one_sigma[ortho], minus_one_sigma[ortho]] #Calculate upper and lower ends on error bars
errorbar(self.T[ortho], log_N[ortho], yerr=y_error_bars, fmt=current_symbol, color=current_color, capthick=3, elinewidth=2, markersize=symbsize, fillstyle=orthofill) #Plot data + error bars
if show_upper_limits:
test = errorbar(self.T[ortho_upperlimit], upper_limits[ortho_upperlimit], yerr=1.0, fmt=current_symbol, color=current_color, capthick=3, elinewidth=2,uplims=True, markersize=symbsize, fillstyle=orthofill) #Plot 1-sigma upper limits on lines with no good detection (ie. S/N < 1.0)
if show_labels: #If user wants to show labels for each of the lines
for j in range(len(log_N[ortho])): #Loop through each point to label
if (y_range[1] == 0 or (log_N[ortho][j] > y_range[0] and log_N[ortho][j] < y_range[1])) and (x_range[1] == 0 or (self.T[ortho][j] > x_range[0] and self.T[ortho][j] < x_range[1])): #check to make sure label is in plot y range
text(self.T[ortho][j], log_N[ortho][j], ' '+self.label[ortho][j], fontsize=8, verticalalignment='bottom', horizontalalignment='left', color='black') #Label line with text
if label_J: #If user specifies labels for J
for j in range(len(log_N[ortho])): #Loop through each point to label
if y_range[1] == 0 or (log_N[ortho][j] > y_range[0] and log_N[ortho][j] < y_range[1]): #check to make sure label is in plot y range
text(self.T[ortho][j], log_N[ortho][j], ' '+str(self.J.u[ortho][j]), fontsize=8, verticalalignment='bottom', horizontalalignment='left', color='black') #Label line with J upper level
#print('For ortho v=', i)
if rot_temp and len(log_N[ortho][isfinite(log_N[ortho])]) > 1: #If user specifies fit rotation temperature
#stop()
rt, srt, residuals, sigma_residuals = self.fit_rot_temp(self.T[ortho], log_N[ortho], y_error_bars, s2n_cut=s2n_cut, color=current_color, dotted_line=False, rot_temp_energy_limit=rot_temp_energy_limit) #Fit rotation temperature
self.rot_T[ortho] = rt #Save rotation temperature for individual lines
self.sig_rot_T[ortho] = srt #Save rotation tempreature uncertainity for individual lines
self.res_rot_T[ortho] = residuals #Save residuals for individual data points from the rotation tmeperature fit
self.sig_res_rot_T[ortho] = sigma_residuals #Save the uncertainity in the residuals from the rotation temp fit (point uncertainity and fit uncertainity added in quadrature)
for i in use_upper_v_states:
if single_color != 'none': #If user specifies a specific color, use that single color
current_color = single_color
current_symbol = '^'
elif nocolor:
current_color = 'Black'
current_symbol = symbol_list[i]
else: #Or else by default use colors from the color list defined at the top of the code
current_color = color_list[i]
current_symbol = '^'
if line: #if user specifies using lines
#current_symbol = current_symbol + ':' #Draw a line between each symbol
current_symbol = ':'
data_found = (self.V.u == i) & (self.s2n > s2n_cut) & (self.N > 0.) #Search for data in this vibrational state
#if any(data_found) and not show_ratio: #If any data is found in this vibrational state, add a line on the legend for this state
if any(data_found): #If any data is found in this vibrational state, add a line on the legend for this state
if no_legend_label: #Check if user wants to use legend labes, if not ignore the label
use_label = '_nolegend_'
else:
use_label = 'v='+str(i)
errorbar([nan], [nan], yerr=1.0, fmt=current_symbol, color=current_color, label=use_label, capthick=3, elinewidth=2, markersize=symbsize, fillstyle=parafill) #Do empty plot to fill legend
para = (self.J.u % 2 == 0) & (self.V.u == i) & (self.s2n > s2n_cut) & (self.N > 0.) #Select only states for para-H2, which has the proton spins anti-aligned so J can only be even (0,2,4,...)
para_upperlimit = (self.J.u % 2 == 0) & (self.V.u == i) & (self.s2n <= s2n_cut) & (self.N > 0.) #Select para-H2 lines where there is no detection (e.g. S/N <= 1)
if any(para): #If datapoints are found...
if nansum(self.s2n[para]) == 0.:
plot(self.T[para], log_N[para], current_symbol, color=current_color, markersize=symbsize, fillstyle=parafill) #Plot data + error bars
else:
y_error_bars = [minus_one_sigma[para], minus_one_sigma[para]] #Calculate upper and lower ends on error bars
#errorbar(self.T[para], log_N, yerr=y_error_bars, fmt=current_symbol, color=current_color, label='v='+str(i), capthick=3, markersize=symbsize, fillstyle=parafill) #Plot data + error bars
errorbar(self.T[para], log_N[para], yerr=y_error_bars, fmt=current_symbol, color=current_color, capthick=3, elinewidth=2,markersize=symbsize, fillstyle=parafill) #Plot data + error bars
if show_upper_limits:
test = errorbar(self.T[para_upperlimit], upper_limits[para_upperlimit], yerr=1.0, fmt=current_symbol, color=current_color, capthick=3, elinewidth=2, uplims=True, markersize=symbsize, fillstyle=parafill) #Plot 1-sigma upper limits on lines with no good detection (ie. S/N < 1.0)
if show_labels: #If user wants to show labels for each of the lines
for j in range(len(log_N[para])): #Loop through each point to label
if (y_range[1] == 0 or (log_N[para][j] > y_range[0] and log_N[para][j] < y_range[1])) and (x_range[1] == 0 or (self.T[para][j] > x_range[0] and self.T[para][j] < x_range[1])): #check to make sure label is in plot y range
text(self.T[para][j], log_N[para][j], ' '+self.label[para][j], fontsize=8, verticalalignment='bottom', horizontalalignment='left', color='black') #Label line with text
if label_J: #If user specifies labels for J
for j in range(len(log_N[para])): #Loop through each point to label
if y_range[1] == 0 or (log_N[para][j] > y_range[0] and log_N[para][j] < y_range[1]): #check to make sure label is in plot y range
text(self.T[para][j], log_N[para][j], ' '+str(self.J.u[para][j]), fontsize=8, verticalalignment='bottom', horizontalalignment='left', color='black') #Label line with J upper level
#print('For para v=', i)
if rot_temp and len(log_N[para][isfinite(log_N[para])]) > 1: #If user specifies fit rotation temperature
rt, srt, residuals, sigma_residuals = self.fit_rot_temp(self.T[para], log_N[para], y_error_bars, s2n_cut=s2n_cut, color=current_color, dotted_line=True, rot_temp_energy_limit=rot_temp_energy_limit) #Fit rotation temperature
self.rot_T[para] = rt #Save rotation temperature for individual lines
self.sig_rot_T[para] = srt #Save rotation tempreature uncertainity for individual lines
self.res_rot_T[para] = residuals #Save residuals for individual data points from the rotation tmeperature fit
self.sig_res_rot_T[para] = sigma_residuals #Save the uncertainity in the residuals from the rotation temp fit (point uncertainity and fit uncertainity added in quadrature)
elif rot_temp and len(log_N[para][isfinite(log_N[para])]) <= 1:
self.rot_T[para] = 0 #Save rotation temperature for individual lines
self.sig_rot_T[para] = 0 #Save rotation tempreature uncertainity for individual lines
self.res_rot_T[para] = ones(len(log_N[para])) #Save residuals for individual data points from the rotation tmeperature fit
self.sig_res_rot_T[para] = self.Nsigma[para]/self.g[para] #Save the uncertainity in the residuals from the rotation temp fit (point uncertainity and fit uncertainity added in quadrature)
tick_params(labelsize=14) #Set tick mark label size
if show_axis_labels: #By default print the axis labels, but the user can turn these off if so desired (replacing them with custom labels if needed)
if normalize: #If normalizing to the 1-0 S(1) line
if not semilog:
ylabel("Column Density ln(N$_u$/g$_u$)-ln(N$_{r}$/g$_{r}$)", fontsize=labelsize)
else:
ylabel("Column Density (N$_u$/g$_u$)-(N$_{r}$/g$_{r}$)", fontsize=labelsize)
else: #If using absolute flux calibrated data
if not semilog:
ylabel("Column Density ln(N$_u$/g$_u$) [cm$^{-2}$]", fontsize=labelsize)
else:
ylabel("Column Density (N$_u$/g$_u$) [cm$^{-2}$]", fontsize=labelsize)
xlabel("Excitation Energy (E$_u$/k) [K]", fontsize=labelsize, labelpad=4)
if x_range[1] == 0.0: #If user does not specifiy a range for the x-axis
if any(self.T[self.s2n >= s2n_cut]) and not no_zero_x: #Catch error
goodpix = (self.s2n >= s2n_cut)
xlim([0,1.4*max(self.T[goodpix])]) #Autoscale with left side of x set to zero
elif any(self.T[self.s2n >= s2n_cut]) and no_zero_x: #If user does not want left side of x set to zero
goodpix = (self.s2n >= s2n_cut) & (self.V.u == V[0])
xlim([0.9*min(self.T[goodpix]), 1.1*max(self.T[goodpix])]) #Autoscale with left side of x not fixed at zero
elif ignore_x_range:
print('') #Do nothing, we are ignoring the xrange here
else: #If no points are acutally found just set the limit here.
xlim([0,70000.0])
else: #Else if user specifies range
xlim(x_range) #Use user specified range for x-axis
if y_range[1] != 0.0: #If user specifies a y axis limit, use it
ylim(y_range) #Use user specified y axis range
if label_V: #Loop through and label every vibration level (rotation ladder), if the user sets label_V = True
for i in use_upper_v_states:
if single_color != 'none': #If user specifies a specific color, use that single color
current_color = single_color
elif nocolor: #If user specifies no color,
current_color = 'gray'
else: #Or else by default use colors from the color list defined at the top of the code
current_color = color_list[i]
data_found = (self.V.u == i) & (self.s2n > s2n_cut) & (self.N > 0.) #Search for data in this vibrational state
if sum(data_found) > 1: #Only add label if any data is found
xposition = self.T[data_found][0]
#yposition = log_N[data_found][0]
text(xposition, y_range[1]*0.9, 'v='+str(i), fontsize=12, verticalalignment='top', horizontalalignment='left', color=current_color, rotation=90) #Label line with J upper level
if show_legend: #If user does not turn off showing the legend
legend( ncol=2, fontsize=legend_fontsize, numpoints=1, columnspacing=-0.5, title = 'ortho para ladder', loc="upper right", bbox_to_anchor=(1,1))
if plot_single_temp: #Plot a single temperature line for comparison, if specified
x = arange(0,20000, 10)
plot(x, single_temp_y_intercept - (x / single_temp), linewidth=2, color='orange')
midpoint = size(x)/2
text(0.7*x[int(midpoint)], 0.7*(single_temp_y_intercept - (x[int(midpoint)] / single_temp)), "T = "+str(single_temp)+" K", color='orange')
if multi_temp_fit: #If user specifies they want to fit a multi temperature gas
goodpix = (self.s2n > 5.0) & (self.N > 0.)
if -1 not in V:
for i in range(15): #Use only levels in the listed vibration levels
if i not in V:
goodpix[self.V.u == i] = False
x = self.T[goodpix]
y = log(self.N[goodpix]/self.g[goodpix])
vary_y_intercept = 7.0
vary_temp = 3000.0
vary_coeff = 0.6
guess = array([15.0, 0.85, 0.15, 1e-8, 350.0, 650.0, 5500.0])
upper_bound = guess + array([vary_y_intercept, vary_coeff, vary_coeff, vary_coeff, vary_temp, vary_temp, vary_temp])
lower_bound = guess - array([vary_y_intercept, vary_coeff, vary_coeff, vary_coeff, vary_temp, vary_temp, vary_temp])
fit, cov = curve_fit(multi_temp_func, x, y, guess, bounds=[lower_bound, upper_bound])
b = fit[0]
c = [fit[1], fit[2], fit[3]]
T = [fit[4], fit[5], fit[6]]
x = arange(0.0,70000.0,0.1)
plot(x, b+log(c[0]*e**(-x/T[0]) + c[1]*e**(-x/T[1])+ c[2]*e**(-x/T[2])),'--', color='Black', linewidth=2)# + c[3]*e**(-x/T[3]) + c[4]*e**(-x/T[4]) + + c[5]*e**(-x/T[5])))
print('Results from temperature fit to Boltzmann diagram data:')
print('b = ', b)
print('c = ', c)
print('T = ', T)
if single_temp_fit: #If user specifies they want to do a single temperature fit (ie. for shocks)
goodpix = (self.s2n > s2n_cut) & (self.N > 0.) & (self.T >= x_range[0]) & (self.T <= x_range[1])
if -1 not in V:
for i in range(15): #Use only levels in the listed vibration levels
if i not in V:
goodpix[self.V.u == i] = False
#x = self.T[goodpix]
#y = log(self.N[goodpix]/self.g[goodpix])
#sigma = minus_one_sigma[goodpix]
# vary_y_intercept = 40.0
# vary_temp = 3000.0
# guess = array([10.0, 1000.0])
# upper_bound = guess + array([vary_y_intercept, vary_temp])
# lower_bound = guess - array([vary_y_intercept, vary_temp])
# # if single_temp_use_sigma: #If user specifies using the statistical 1 sigma uncertainity in the fit
# fit, cov = curve_fit(single_temp_func, x, y, guess, sigma, bounds=[lower_bound, upper_bound])
# else: #Don't use statistical sigma in fit
# fit, cov = curve_fit(single_temp_func, x, y, guess, bounds=[lower_bound, upper_bound])
# b, T = fit
# b_err, T_err = sqrt(diag(cov))
# line_init = models.Linear1D(intercept=guess[0], slope=guess[1])
# fitter = fitting.LinearLSQFitter(calc_uncertainties=True)
# fit = fitter(line_init, x, y, weights=1.0/sigma)
# b = fit.intercept.value
# T = -1.0/fit.slope.value
# if fit.slope.std != None:
# slope_std = fit.slope.std
# b_err = fit.intercept.std
# else:
# slope_std = two_point_slope_uncertainity(x,y,sigma)
# b_err = -999.0
# T_err_plus = abs(T - (-1.0/(fit.slope.value-slope_std))) #Calcualte both possible T_errs
# T_err_minus = abs(T - (-1.0/(fit.slope.value+slope_std)))
b, T, b_err, T_err = fit_exponential_for_temp(self.T[goodpix], self.N[goodpix]/self.g[goodpix], self.Nsigma[goodpix]/self.g[goodpix])
#b, T, b_err, T_err = wiggle_bootstrap_temp_fit(x, y, guess, sigma, [lower_bound, upper_bound], n=1000)
x = arange(min(self.T[goodpix])-1000.0,max(self.T[goodpix])+1000.0,0.1)
plot(x, b-x/T,'--', color='Black', linewidth=2)
print('Results from temperature fit ')
print('b = ', b, ' +/- ', b_err)
print('T = ', T, ' +/- ', T_err)
#stop()
self.model_ratio = self.N / (self.g*exp(b-self.T/T)) #Calcualte and store ratio of
#TESTING N monte carlo fitting to compare the results
#x = self.T
#y = log(self.N_monte_carlo/self.g[:,newaxis])
#fit = fitter(line_init, x[goodpix], y[goodpix])
#slope_std_monte_carlo = fit.slope.std
#b_err_monte_carlo = fit.intercept.std
#b_monte_carlo = fit.intercept.value
#T_monte_carlo = -1.0/fit.slope.value
#b_monte_carlo, T_monte_carlo, b_err_monte_carlo, T_err_monte_carlo = bootstrap_temp_fit(x, y, guess)
#b_expfit, T_expfit, b_expfit_err, T_expfit_err = fit_exponential_for_temp(self.T[goodpix], self.N[goodpix]/self.g[goodpix], self.Nsigma[goodpix]/self.g[goodpix])
#b, T, b_err, T_err = fit_exponential_for_temp(self.T[goodpix], self.N[goodpix]/self.g[goodpix], self.Nsigma[goodpix]/self.g[goodpix])
#T_err_plus_monte_carlo = abs(T_monte_carlo - (-1.0/(fit.slope.value-slope_std_monte_carlo))) #Calcualte both possible T_errs
#T_err_minus_monte_carlo = abs(T_monte_carlo - (-1.0/(fit.slope.value+slope_std_monte_carlo)))
#plot(x, b_expfit-x/T_expfit,'--', color='red', linewidth=2)
# print('Results from temperature fit to data in linear space with an exponential:')
# print('b = ', b_expfit, ' +/- ', b_expfit_err)
# #print('T = ', T_monte_carlo, ' +/- ', T_err_monte_carlo)
# print('T = ', T_expfit, ' +/- ', T_expfit_err)
#breakpoint()
#show()
if semilog: #Make a semilog plot instead of using log(N/g) if that is what the user wants
semilogy()
draw()
if savepdf:
pdf.savefig() #Add in the pdf
#stop()
if single_temp_fit:
#return T, T_err_plus, T_err_minus
return T, T_err
#Plot
def rotation_plot(self, show_upper_limits = True, nocolor = False, V=[-1], s2n_cut=-1.0, normalize=True, savepdf=True, orthopara_fill=True, empty_fill =False, full_fill=False,
show_labels=False, x_range=[0.,0.], y_range=[0.,0.], show_legend=True, fname='', clear=True, legend_fontsize=14):
if fname == '':
fname = self.path + '_rotation_diagram.pdf'
with PdfPages(fname) as pdf: #Make a pdf
nonzero = self.N != 0.0
if clear: #User can specify if they want to clear the plot
clf()
symbsize = 7 #Size of symbols on excitation diagram
labelsize = 18 #Size of text for labels
if orthopara_fill: #User can specify how they want symbols to be filled
orthofill = 'full' #How symbols on excitation diagram are filled, 'full' vs 'none'
parafill = 'none'
elif empty_fill:
orthofill = 'none' #How symbols on excitation diagram are filled, 'full' vs 'none'
parafill = 'none'
else:
orthofill = 'full' #How symbols on excitation diagram are filled, 'full' vs 'none'
parafill = 'full'
if V == [-1]: #If user does not specify a specific set of V states to plot...
use_upper_v_states = unique(self.V.u) #plot every one found
else: #or else...
use_upper_v_states = V #Plot upper V s tates specified by the user
for i in use_upper_v_states:
if nocolor: #If user specifies no color,
current_color = 'gray'
current_symbol = symbol_list[i]
else: #Or else by default use colors from the color list defined at the top of the code
current_color = color_gradient[i]
current_symbol = 'o'
ortho = (self.J.u % 2 == 1) & (self.V.u == i) & (self.s2n > s2n_cut) & (self.N > 0.) #Select only states for ortho-H2, which has the proton spins aligned so J can only be odd (1,3,5...)
ortho_upperlimit = (self.J.u % 2 == 1) & (self.V.u == i) & (self.s2n <= s2n_cut) & (self.N > 0.) #Select ortho-H2 lines where there is no detection (e.g. S/N <= 1)
if any(ortho): #If datapoints are found...
log_N = log(self.N[ortho]/self.g[ortho]) #Log of the column density
if nansum(self.s2n[ortho]) == 0.:
plot(self.J.u[ortho], log_N, current_symbol, color=current_color, label=' ', markersize=symbsize, fillstyle=orthofill) #Plot data + error bars
else:
y_error_bars = [abs(log_N - log((self.N[ortho]-self.Nsigma[ortho])/self.g[ortho])), abs(log_N - log((self.N[ortho]-self.Nsigma[ortho])/self.g[ortho]))] #Calculate upper and lower ends on error bars
errorbar(self.J.u[ortho], log_N, yerr=y_error_bars, fmt=current_symbol, color=current_color, label=' ', capthick=3, elinewidth=2, markersize=symbsize, fillstyle=orthofill) #Plot data + error bars
if show_upper_limits:
test = errorbar(self.J.u[ortho_upperlimit], log(self.Nsigma[ortho_upperlimit]*3.0/self.g[ortho_upperlimit]), yerr=1.0, fmt=current_symbol, color=current_color, capthick=3, elinewidth=2, uplims=True, markersize=symbsize, fillstyle=orthofill) #Plot 1-sigma upper limits on lines with no good detection (ie. S/N < 1.0)
if show_labels: #If user wants to show labels for each of the lines
for j in range(len(log_N)): #Loop through each point to label
text(self.J.u[ortho][j], log_N[j], ' '+self.label[ortho][j], fontsize=8, verticalalignment='bottom', horizontalalignment='left', color='black') #Label line with text
#print('For ortho v=', i)
else: #Else if no datapoints are found...
errorbar([nan], [nan], yerr=1.0, fmt=current_symbol, color=current_color, label=' ', capthick=3, elinewidth=2, markersize=symbsize, fillstyle=orthofill) #Do empty plot to fill legend
for i in use_upper_v_states:
if nocolor: #If user specifies no color,
current_color = 'Black'
current_symbol = symbol_list[i]
else: #Or else by default use colors from the color list defined at the top of the code
current_color = color_gradient[i]
current_symbol = '^'
para = (self.J.u % 2 == 0) & (self.V.u == i) & (self.s2n > s2n_cut) & (self.N > 0.) #Select only states for para-H2, which has the proton spins anti-aligned so J can only be even (0,2,4,...)
para_upperlimit = (self.J.u % 2 == 0) & (self.V.u == i) & (self.s2n <= s2n_cut) & (self.N > 0.) #Select para-H2 lines where there is no detection (e.g. S/N <= 1)
if any(para): #If datapoints are found...
log_N = log(self.N[para]/self.g[para]) #Log of the column density
if nansum(self.s2n[para]) == 0.:
plot(self.J.u[para], log_N, current_symbol, color=current_color, label='v='+str(i), markersize=symbsize, fillstyle=parafill) #Plot data + error bars
else:
y_error_bars = [abs(log_N - log((self.N[para]-self.Nsigma[para])/self.g[para])), abs(log_N - log((self.N[para]-self.Nsigma[para])/self.g[para]))] #Calculate upper and lower ends on error bars
errorbar(self.J.u[para], log_N, yerr=y_error_bars, fmt=current_symbol, color=current_color, label='v='+str(i), capthick=3, elinewidth=2, markersize=symbsize, fillstyle=parafill) #Plot data + error bars
if show_upper_limits:
test = errorbar(self.J.u[para_upperlimit], log(self.Nsigma[para_upperlimit]*3.0/self.g[para_upperlimit]), yerr=1.0, fmt=current_symbol, color=current_color, capthick=3, elinewidth=2, uplims=True, markersize=symbsize, fillstyle=parafill) #Plot 1-sigma upper limits on lines with no good detection (ie. S/N < 1.0)
if show_labels: #If user wants to show labels for each of the lines
for j in range(len(log_N)): #Loop through each point to label
text(self.J.u[para][j], log_N[j], ' '+self.label[para][j], fontsize=8, verticalalignment='bottom', horizontalalignment='left', color='black') #Label line with text
#print('For para v=', i)
else: #Else if no datapoints are found...
errorbar([nan], [nan], yerr=1.0, fmt=current_symbol, color=current_color, label='v='+str(i), capthick=3, elinewidth=2, markersize=symbsize, fillstyle=parafill) #Do empty plot to fill legend
tick_params(labelsize=14) #Set tick mark label size
if normalize: #If normalizing to the 1-0 S(1) line
ylabel("Column Density ln(N$_u$/g$_u$)-ln(N$_{r}$/g$_{r}$)", fontsize=labelsize)
else: #If using absolute flux calibrated data
ylabel("Column Density ln(N$_u$/g$_u$) [cm$^{-2}$]", fontsize=labelsize)
xlabel("Upper Rotation State J$_u$", fontsize=labelsize, labelpad=4)
if x_range[1] == 0.0: #If user does not specifiy a range for the x-axis
xlim([0,1.4*max(self.J.u[self.s2n >= s2n_cut])]) #Autoscale
else: #Else if user specifies range
xlim(x_range) #Use user specified range for x-axis
if y_range[1] != 0.0: #If user specifies a y axis limit, use it
ylim(y_range) #Use user specified y axis range
if show_legend: #If user does not turn off showing the legend
legend(loc=1, ncol=2, fontsize=legend_fontsize, numpoints=1, columnspacing=-0.5, title = 'ortho para ladder')
#show()
draw()
if savepdf:
pdf.savefig() #Add in the pdf
#stop()
def vibration_plot(self, show_upper_limits = True, nocolor = False, J=[-1], s2n_cut=-1.0, normalize=True, savepdf=True, empty_fill =False, full_fill=False,
show_labels=False, x_range=[0.,0.], y_range=[0.,0.], show_legend=True, fname='', clear=True, legend_fontsize=14):
if fname == '':
fname = self.path + '_vibration_diagram.pdf'
with PdfPages(fname) as pdf: #Make a pdf
nonzero = self.N != 0.0
if clear: #User can specify if they want to clear the plot
clf()
symbsize = 7 #Size of symbols on excitation diagram
labelsize = 18 #Size of text for labels
if empty_fill:
fill = 'none'
else:
fill = 'full'
if J == [-1]: #If user does not specify a specific set of V states to plot...
use_upper_j_states = unique(self.J.u) #plot every one found
else: #or else...
use_upper_j_states = J #Plot upper V s tates specified by the user
for i in use_upper_j_states:
if nocolor: #If user specifies no color,
current_color = 'Black'
current_symbol = symbol_list[i]
else: #Or else by default use colors from the color list defined at the top of the code
current_color = color_gradient[i]
current_symbol = 'o'
found = (self.J.u == i) & (self.s2n > s2n_cut) & (self.N > 0.) #Select only states for para-H2, which has the proton spins anti-aligned so J can only be even (0,2,4,...)
upperlimit = (self.J.u == i) & (self.s2n <= s2n_cut) & (self.N > 0.) #Select para-H2 lines where there is no detection (e.g. S/N <= 1)
if any(found): #If datapoints are found...
log_N = log(self.N[found]/self.g[found]) #Log of the column density
if nansum(self.s2n[found]) == 0.:
plot(self.V.u[found], log_N, current_symbol, color=current_color, label='v='+str(i), markersize=symbsize, fillstyle=fill) #Plot data + error bars
else:
y_error_bars = [abs(log_N - log((self.N[found]-self.Nsigma[found])/self.g[found]) ), abs(log_N - log((self.N[found]-self.Nsigma[found])/self.g[found]) )] #Calculate upper and lower ends on error bars
errorbar(self.V.u[found], log_N, yerr=y_error_bars, fmt=current_symbol, color=current_color, label='J='+str(i), capthick=3, elinewidth=2, markersize=symbsize, fillstyle=fill) #Plot data + error bars
if show_upper_limits:
test = errorbar(self.V.u[upperlimit], log(self.Nsigma[upperlimit]*3.0/self.g[upperlimit]), yerr=1.0, fmt=current_symbol, color=current_color, capthick=3, elinewidth=2, uplims=True, markersize=symbsize, fillstyle=fill) #Plot 1-sigma upper limits on lines with no good detection (ie. S/N < 1.0)
if show_labels: #If user wants to show labels for each of the lines
for j in range(len(log_N)): #Loop through each point to label
text(self.V.u[found][j], log_N[j], ' '+self.label[found][j], fontsize=8, verticalalignment='bottom', horizontalalignment='left', color='black') #Label line with text
else: #Else if no datapoints are found...
errorbar([nan], [nan], yerr=1.0, fmt=current_symbol, color=current_color, label='J='+str(i), capthick=3, markersize=symbsize, fillstyle=fill) #Do empty plot to fill legend
tick_params(labelsize=14) #Set tick mark label size
if normalize: #If normalizing to the 1-0 S(1) line
ylabel("Column Density ln(N$_u$/g$_u$)-ln(N$_{r}$/g$_{r}$)", fontsize=labelsize)
else: #If using absolute flux calibrated data
ylabel("Column Density ln(N$_u$/g$_u$) [cm$^{-2}$]", fontsize=labelsize)
xlabel("Upper Vibration State v$_u$", fontsize=labelsize, labelpad=4)
if x_range[1] == 0.0: #If user does not specifiy a range for the x-axis
xlim([0,1.4*max(self.J.u[self.s2n >= s2n_cut])]) #Autoscale
else: #Else if user specifies range
xlim(x_range) #Use user specified range for x-axis
if y_range[1] != 0.0: #If user specifies a y axis limit, use it
ylim(y_range) #Use user specified y axis range
if show_legend: #If user does not turn off showing the legend
legend(loc=1, ncol=1, fontsize=legend_fontsize, numpoints=1, columnspacing=-0.5)
#show()
draw()
if savepdf:
pdf.savefig() #Add in the pdf
def test_3D_plot(self, s2n_cut=-1.0, wireframe=False, surface=False, extra=[], x_range=[-1.0,15.0], y_range=[-1.0,15.0]):
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
i = (self.s2n > s2n_cut) & (self.N > 0.)
#ax.scatter(self.V.u[i], self.J.u[i], log(self.N[i]))
if surface: #By default plot as a surface plot
ax.plot_trisurf(self.V.u[i], self.J.u[i], log(self.N[i]), cmap=cm.jet, alpha=0.3)
#elif wireframe:
# ax.plot_wireframe(self.V.u[i], self.J.u[i], log(self.N[i]), rstride=2, cstride=2, cmap=cm.jet)
else: #Else plot as a scatter plot
ax.scatter(self.V.u[i], self.J.u[i], log(self.N[i]/self.g[i]))
for more_surfaces in extra: #Plot more H2 surfaces (ie. models or thermalized populations)
i = (more_surfaces.N > 0.)
ax.plot_trisurf(more_surfaces.V.u[i], more_surfaces.J.u[i], log(more_surfaces.N[i]), cmap=cm.jet, alpha=0.3)
xlim(x_range)
ylim(y_range)
ax.set_xlabel('v$_u$')
ax.set_ylabel('J$_u$')
ax.set_zlabel('Column Density ln(N$_i$/g$_i$)-ln(N$_{r}$/g$_{r}$)')
draw()
stop()
def correct_extinction(self, s2n_cut=3.0, alpha_range=arange(0.5,3.0,0.1), A_K_range=arange(0.0,5.0,0.01)):
#First find all the line pairs and store their indicies
pair_a = [] #Store index numbers of one of a set of line pairs from the same upper state
pair_b = [] #Store index numbers of the other of a set of line pairs from the same upper state
i = (self.F != 0.0) & (self.s2n > s2n_cut) #Find only transitions where a significant measurement of the column density was made (e.g. lines where flux was measured)
J_upper_found = unique(self.J.u[i]) #Find J for all (detected) transition upper states
V_upper_found = unique(self.V.u[i]) #Find V for all (detected) transition upper states
for V in V_upper_found: #Check each upper V for pairs
for J in J_upper_found: #Check each upper J for pairs
#i = (self.F != 0.0) & (self.s2n > s2n_cut) #Find only transitions where a significant measurement of the column density was made (e.g. lines where flux was measured)
match_upper_states = (self.J.u[i] == J) & (self.V.u[i] == V) #Find all transitions from the same upper J and V state
waves = self.wave[i][match_upper_states] #Store wavelengths of all found transitions
s = argsort(waves) #sort by wavelength
waves = waves[s]
labels = self.label[i][match_upper_states][s]
if len(waves) == 2 and abs(waves[0]-waves[1]) > wave_thresh: #If a single pair of lines from the same upper state are found, calculate observed vs. intrinsic ratio
pair_a.append(where(self.wave == waves[0])[0][0])
pair_b.append(where(self.wave == waves[1])[0][0])
print('Found two transitions from the same upper state to calculate extiction.')
print('They are ', labels)
print('at wavelengths ', waves)
elif len(waves) == 3: #If three liens are found from the same upper state, calculate differential extinction from differences between all three lines
#Pair 1
if abs(waves[0] - waves[1]) > wave_thresh:
pair_a.append(where(self.wave == waves[0])[0][0])
pair_b.append(where(self.wave == waves[1])[0][0])
#Pair 2
if abs(waves[0] - waves[2]) > wave_thresh: #check if pair of lines are far enoug7h apart
pair_a.append(where(self.wave == waves[0])[0][0])
pair_b.append(where(self.wave == waves[2])[0][0])
if abs(waves[1] - waves[2]) > wave_thresh: #check if pair of lines are far enough apart
pair_a.append(where(self.wave == waves[1])[0][0])
pair_b.append(where(self.wave == waves[2])[0][0])
print('Found three transitions from the same upper state to calculate extiction.')
print('They are ', labels)
print('at wavelengths ', waves)
pair_a = array(pair_a) #Turn lists of indicies into arrays of indicies
pair_b = array(pair_b)
chisqs = [] #Store chisq for each possible extinction and extinction law
alphas = [] #Store alphas for each possible exctinction and extinction law
A_Ks = [] #Store extinctions for each possible exctinction and exctinction law
for a in alpha_range: #Loop through different exctinction law powers
for A_K in A_K_range: #Loop through different possible K band exctinctions
h = copy.deepcopy(self) #Make a copy of the input h2 line object
A_lambda = A_K * h.wave**(-a) / lambda0**(-a) #Calculate an extinction correction
h.F *= 10**(0.4*A_lambda) #Apply extinction correction
h.calculate_column_density() #Calculate column densities from each transition, given the guess at extinction correction
# chisq = nansum((h.N[pair_a] - h.N[pair_b])**2 / h.N[pair_b]) #Calculate chisq from all line pairs that arise from same upper states
# chisq = nansum((h.N[pair_a] - h.N[pair_b])**2 / (h.N[pair_b]**2)) #Calculate chisq from all line pairs that arise from same upper states
ln_N_pair_a = log(h.N[pair_a])
ln_N_pair_b = log(h.N[pair_b])
chisq = nansum((ln_N_pair_a - ln_N_pair_b)**2 / ln_N_pair_b)
chisqs.append(chisq) #Store chisq and corrisponding variables for extinction correction
alphas.append(a)
A_Ks.append(A_K)
chisqs = array(chisqs) #Convert lists to arrays
alphas = array(alphas)
A_Ks = array(A_Ks)
best_fit = chisqs == nanmin(chisqs) #Find the minimum chisq and best fit alpha and A_K
best_fit_A_K = A_Ks[best_fit]
best_fit_alpha = alphas[best_fit]
print('Found ', len(pair_a), ' line pairs for calculating extinction.')
print('Best fit alpha =', best_fit_alpha) #Print results so user can see
print('Best fit A_K = ', best_fit_A_K)
A_lambda = best_fit_A_K * self.wave**(-best_fit_alpha) / lambda0**(-best_fit_alpha) #Calculate an extinction correction
self.F *= 10**(0.4*A_lambda) #Apply extinction correction
self.sigma *= 10**(0.4*A_lambda)
self.calculate_column_density(normalize=False) #Calculate column densities from each transition, given the new extinction correction
self.A_K = best_fit_A_K #Store extinction paramters in case user wants to inspect or tabulate them later
self.alpha = best_fit_alpha
# def find_cascade(v_u, j_u, v_l, j_l): #Find all possible paths between two levels
# found_transitions = self.tout(v_up, j_up) #Find all transitions out of the upper level
# for i in range(len(v_l_trans)):
# if v_l_trans == v_l and j_l_trans == j_l
# for found_transition in found_transitions: #Loop through each transition found
# if self.v.l[found_transition] == v_l and
# XXXXX.append(find_cascade
def gbar_approx(self): #Estimate collision rate coeffs based on the g-bar approximatino done in Shaw et al. (2005) Section 2.2.1 and Table 2
y0 = array([-9.9265, -8.281, -10.0357, -8.6213, -9.2719])#Coeffs from Table 2 of Shaw et al (2005 for H0, He, H2(ortho), H2(para), & H+)
a = array([-0.1048, -0.1303, -0.0243, -0.1004, -0.0001])
b = array([0.456, 0.4931, 0.67, 0.5291, 1.0391])
E_trans = self.E.diff() #Grab energy of transitions (in wavenumber cm^-1)
E_trans[E_trans < 100.0] = 100.0 #Set max(sigma, 100) sen in Eq. 1 of Shaw et al. (2005)
k_total = zeros(len(E_trans)) #Store total collisional coeffs (cm^3 s^-1)
for i in range(5): #Loop through g-bar approx for H0, He, H2(ortho), H2(para), & H+ and total up the collisional coeffs k for each transition
k_total += exp( y0[i] + a[i]*(E_trans**b[i]) ) #Eq 1 in Shaw et al. (2005)
#k_total[k_total < 0.] = 0. #Ignore negative coefficients
self.k = k_total #Store the estimated collisional reate coeffs for each transition
# def find_ortho_to_para_ratio(self, s2n_cut = 5.0): #Function to find the best fit ortho-to-para ratio
# op_ratio = arange(3.5, 1.2, -0.1) #Range of OP ratios to check
# n_op_ratio = len(op_ratio) #Count number of OP ratios to check
# chisq = zeros(n_op_ratio) #Set up array to store chisq results for a given OP ratio tested
# ortho_levels = (self.J.u % 2 == 1) #Find all ortho levels
# para_levels = ~ortho_levels #Find all para levels
# #p_init = models.Polynomial1D(degree=2) #Initialize a 3rd degree polynomial for fitting rotation ladders
# #fit_p = fitting.SimplexLSQFitter()
# base_op_ratio = 3.0 #Base OP ratio
# self.g[ortho_levels] /= base_op_ratio #Remove base OP ratio before doing anything
# for i in range(n_op_ratio): #Loop through each OP ratio to test
# self.g[ortho_levels] *= op_ratio[i] #Apply this OP ratio to test
# for v in range(14): #Loop through all possible v levels
# good_ortho_levels = (self.V.u == v) & (self.s2n >= s2n_cut) & ortho_levels #Find all good data points in this
# good_para_levels = (self.V.u == v) & (self.s2n >= s2n_cut) & para_levels #Find all good data points in this
# if (sum(good_ortho_levels) > 2) and (sum(good_para_levels) > 2):
# ortho_x = self.T[good_ortho_levels]
# ortho_y = log(self.N[good_ortho_levels] / self.g[good_ortho_levels])
# para_x = self.T[good_para_levels]
# para_y = log(self.N[good_para_levels] / self.g[good_para_levels])
# interp_obj = interp1d(ortho_x, ortho_y, bounds_error=False, fill_value='extrapolate')
# interp_y = interp_obj(para_x)
# #print('interp_y', interp_y)
# #print('para_x', para_x)
# use_finite = isfinite(interp_y) & isfinite(para_y)
# chisq[i] += nansum((interp_y[use_finite]-para_y[use_finite])**2)
# #y = log(self.N[good_levels] / self.g[good_levels])
# #p = fit_p(p_init, x, y)
# #fit_y = p(x)
# #chisq[i] += nansum((y - fit_y)**2)
# print(op_ratio[i], chisq[i])
# self.g[ortho_levels] /= op_ratio[i] #Remove this OP ratio now that has been tested so we can move onto the next one to test
# self.g[ortho_levels] *= base_op_ratio #Restore base OP ratio now that we are done
# def find_ortho_to_para_ratio(self, s2n_cut = 3.0): #Function to find the best fit ortho-to-para ratio
# h_compare = copy.deepcopy(self) #Find weighted mean of all column densities for a given level
# for J in range(0,50):
# for V in range(1,15): #Loop through each V ladder
# use_these = (h_compare.J.u == J) & (h_compare.V.u==V) & (h_compare.N > 0.)
# if any(use_these):
# # variance = 1.0 / nansum(h_compare.Nsigma[use_these]**-2)
# # weighted_mean = variance * nansum(h_compare.N[use_these] / h_compare.Nsigma[use_these]**2)
# # h_compare.N[use_these] = weighted_mean
# # h_compare.Nsigma[use_these] = sqrt(variance)
# h_compare.N[use_these] = exp(nanmean(log(h_compare.N[use_these])))
# ortho_levels = (self.J.u % 2 == 1) #Find all ortho levels
# para_levels = ~ortho_levels #Find all para levels
# base_op_ratio = 3.0 #Base OP ratio
# h_compare.g[ortho_levels] /= base_op_ratio #Remove base OP ratio before doing anything
# self.g[ortho_levels] /= base_op_ratio
# ratios = [] #List to hold the ratios
# ratio_variances = []
# for v in range(14): #Loop through all possible v levels
# good_ortho_levels = (self.V.u == v) & (self.s2n >= s2n_cut) & ortho_levels #Find all good data points in this
# good_para_levels = (self.V.u == v) & (self.s2n >= s2n_cut) & para_levels #Find all good data points in this
# if (sum(good_ortho_levels) > 2) and (sum(good_para_levels) > 2):
# ortho_x = self.T[good_ortho_levels]
# ortho_y = (self.N[good_ortho_levels] / self.g[good_ortho_levels])
# ortho_y_to_interp = (h_compare.N[good_ortho_levels] / h_compare.g[good_ortho_levels])
# ortho_y_sigma = (self.Nsigma[good_ortho_levels] / self.g[good_ortho_levels])
# para_x = self.T[good_para_levels]
# para_y = (self.N[good_para_levels] / self.g[good_para_levels])
# para_y_to_interp = (h_compare.N[good_para_levels] / h_compare.g[good_para_levels])
# para_y_sigma = (self.Nsigma[good_para_levels] / self.g[good_para_levels])
# order_para = para_x.argsort()
# order_ortho = ortho_x.argsort()
# para_x = para_x[order_para].data
# para_y = para_y[order_para].data
# para_y_to_interp = para_y_to_interp[order_para].data
# para_y_sigma = para_y_sigma[order_para].data
# ortho_x = ortho_x[order_ortho].data
# ortho_y = ortho_y[order_ortho].data
# ortho_y_to_interp = ortho_y_to_interp[order_ortho].data
# ortho_y_sigma = ortho_y_sigma[order_ortho].data
# ortho_interp_obj = interp1d(ortho_x, log(ortho_y_to_interp), fill_value='extrapolate')
# ortho_y_interpolated = exp(ortho_interp_obj(para_x))
# para_interp_obj = interp1d(para_x, log(para_y_to_interp), fill_value='extrapolate')
# para_y_interpolated = exp(para_interp_obj(ortho_x))
# # use_finite = isfinite(interp_y) & isfinite(para_y)
# # figure(v)
# # clf()
# # suptitle('v = '+str(v))
# # plot(para_x[use_finite], (interp_y/para_y)[use_finite])
# # ratio_for_this_v = (interp_y/para_y)[use_finite]
# # ratio_variance_for_this_v = (ratio_for_this_v**2 * ((para_y_sigma[use_finite]/para_y[use_finite])**2 + (0.3)**2))
# # ratios = ratios + ratio_for_this_v.tolist()
# # ratio_variances = ratio_variances + ratio_variance_for_this_v.tolist()
# ortho_ratio_for_this_v = (ortho_y/para_y_interpolated)
# para_ratio_for_this_v = (ortho_y_interpolated/para_y)
# ortho_ratio_variance_for_this_v = (ortho_ratio_for_this_v**2 * ((ortho_y_sigma/ortho_y)**2 + (0.2)**2))
# para_ratio_variance_for_this_v = (para_ratio_for_this_v**2 * ((para_y_sigma/para_y)**2 + (0.2)**2))
# ratios = ratios + ortho_ratio_for_this_v.tolist() + para_ratio_for_this_v.tolist()
# ratio_variances = ratio_variances + ortho_ratio_variance_for_this_v.tolist() + para_ratio_variance_for_this_v.tolist()
# print('For v = ', v)
# print('Ratio = ', ortho_ratio_for_this_v.tolist() + para_ratio_for_this_v.tolist())
# weighted_variance = (1.0 / nansum(1.0/array(ratio_variances)))
# weighted_mean = nansum(array(ratios) / array(ratio_variances)) / nansum(1.0 / array(ratio_variances))
# print('Median O/P = ',nanmedian(ratios))
# print('Mean O/P = ', nanmean(ratios))
# print('Stddev O/P = ', nanstd(ratios))
# print('Weighted mean O/P = ', weighted_mean, '+/-', sqrt(weighted_variance))
# h_compare.g[ortho_levels] *= base_op_ratio #Restore base OP ratio now that we are done
# self.g[ortho_levels] *= base_op_ratio #Restore base OP ratio now that we are done
def find_ortho_to_para_ratio(self, s2n_cut = 3.0, bootstrap=False): #Function to find the best fit ortho-to-para ratio
h_compare = copy.deepcopy(self) #Find weighted mean of all column densities for a given level
for J in range(0,50):
for V in range(1,15): #Loop through each V ladder
use_these = (h_compare.J.u == J) & (h_compare.V.u==V) & (h_compare.N > 0.)
if any(use_these):
h_compare.N[use_these] = exp(nanmean(log(h_compare.N[use_these])))
ortho_levels = (self.J.u % 2 == 1) #Find all ortho levels
if bootstrap:
n_levels = len(ortho_levels)
para_levels = ~ortho_levels & random.choice([True,False], size=n_levels)
else:
para_levels = ~ortho_levels #Find all para levels
base_op_ratio = 3.0 #Base OP ratio
h_compare.g[ortho_levels] /= base_op_ratio #Remove base OP ratio before doing anything
self.g[ortho_levels] /= base_op_ratio
op_ratios = arange(1.0, 4.0, 0.01)
n_op_ratios = len(op_ratios)
chisq = zeros(n_op_ratios)
#fractional_uncertainity = zeros(n_op_ratios)
n = zeros(n_op_ratios)
for i in range(n_op_ratios):
#ratios = [] #List to hold the ratios
#ratio_variances = []
h_compare.g[ortho_levels] *= op_ratios[i]
self.g[ortho_levels] *= op_ratios[i]
for v in range(14): #Loop through all possible v levels
good_all_levels = (self.V.u == v) & (self.s2n >= s2n_cut)
good_ortho_levels = good_all_levels & ortho_levels #Find all good data points in this
good_para_levels = good_all_levels & para_levels #Find all good data points in this
if (sum(good_ortho_levels) > 2) and (sum(good_para_levels) > 2):
good_all_levels_and_good_s2n = good_all_levels & (self.s2n >= s2n_cut)
x = self.T[good_all_levels_and_good_s2n]
ortho_x = self.T[good_ortho_levels]
ortho_y = (self.N[good_ortho_levels] / self.g[good_ortho_levels])
ortho_y_to_interp = (h_compare.N[good_ortho_levels] / h_compare.g[good_ortho_levels])
#ortho_y_sigma = (self.Nsigma[good_ortho_levels] / self.g[good_ortho_levels])
para_x = self.T[good_para_levels]
para_y = (self.N[good_para_levels] / self.g[good_para_levels])
#para_y_to_interp = (h_compare.N[good_para_levels] / h_compare.g[good_para_levels])
#para_y_sigma = (self.Nsigma[good_para_levels] / self.g[good_para_levels])
order_all = x.argsort()
order_para = para_x.argsort()
order_ortho = ortho_x.argsort()
x = x[order_all]
para_x = para_x[order_para].data
para_y = para_y[order_para].data
#para_y_to_interp = para_y_to_interp[order_para].data
#para_y_sigma = para_y_sigma[order_para].data
ortho_x = ortho_x[order_ortho].data
ortho_y = ortho_y[order_ortho].data
ortho_y_to_interp = ortho_y_to_interp[order_ortho].data
#ortho_y_sigma = ortho_y_sigma[order_ortho].data
ortho_interp_obj = interp1d(ortho_x, log(ortho_y_to_interp), fill_value='extrapolate')
#ortho_y_interpolated = exp(ortho_interp_obj(x))
#ortho_y_interpolated = exp(ortho_interp_obj(ortho_x))
ortho_y_interpolated = exp(ortho_interp_obj(para_x))
#para_interp_obj = interp1d(para_x, log(para_y_to_interp), fill_value='extrapolate')
#para_y_interpolated = exp(para_interp_obj(x))
#para_y_interpolated = exp(para_interp_obj(ortho_x))
#para_y_interpolated = exp(para_interp_obj(para_x))
#chisq[i] += nansum((ortho_y_interpolated - para_y_interpolated)**2 / abs(para_y_interpolated))
chisq[i] += nansum((ortho_y_interpolated - para_y)**2 / abs(para_y))
#chisq[i] += nansum((ortho_y_interpolated - para_y_interpolated)**2 / self.Nsigma[good_all_levels]**2)
n[i] += float(len(ortho_y_interpolated))
h_compare.g[ortho_levels] /= op_ratios[i]
self.g[ortho_levels] /= op_ratios[i]
#print(op_ratios[i], chisq[i])
min_chisq = where(chisq == nanmin(chisq))
#std_dev = sqrt((1.0/(n[min_chisq]-1.0)) * chisq[min_chisq])
#print('Best fit O/P = ', op_ratios[min_chisq], '+/-', std_dev)
#print('Chisq test result = ', chisq[min_chisq])
# weighted_variance = (1.0 / nansum(1.0/array(ratio_variances)))
# weighted_mean = nansum(array(ratios) / array(ratio_variances)) / nansum(1.0 / array(ratio_variances))
# print('Median O/P = ',nanmedian(ratios))
# print('Mean O/P = ', nanmean(ratios))
# print('Stddev O/P = ', nanstd(ratios))
# print('Weighted mean O/P = ', weighted_mean, '+/-', sqrt(weighted_variance))
#h_compare.g[ortho_levels] *= base_op_ratio #Restore base OP ratio now that we are done
self.g[ortho_levels] *= base_op_ratio #Restore base OP ratio now that we are done
self.op_ratio = op_ratios[min_chisq][0] #Save best fit O/P ratio in H2 obj
return(self.op_ratio) #Return best fit chisq
# def find_ortho_to_para_ratio(self, s2n_cut = 3.0): #Function to find the best fit ortho-to-para ratio
# h_compare = copy.deepcopy(self) #Find weighted mean of all column densities for a given level
# for J in range(0,50):
# for V in range(1,15): #Loop through each V ladder
# use_these = (h_compare.J.u == J) & (h_compare.V.u==V) & (h_compare.N > 0.)
# if any(use_these):
# h_compare.N[use_these] = exp(nanmean(log(h_compare.N[use_these])))
# ortho_levels = (self.J.u % 2 == 1) #Find all ortho levels
# para_levels = ~ortho_levels #Find all para levels
# base_op_ratio = 3.0 #Base OP ratio
# h_compare.g[ortho_levels] /= base_op_ratio #Remove base OP ratio before doing anything
# self.g[ortho_levels] /= base_op_ratio
# # op_ratios = arange(1.0, 4.0, 0.01)
# # n_op_ratios = len(op_ratios)
# # chisq = zeros(n_op_ratios)
# # fractional_uncertainity = zeros(n_op_ratios)
# n = 0
# # for i in range(n_op_ratios):
# #ratios = [] #List to hold the ratios
# #ratio_variances = []
# # h_compare.g[ortho_levels] *= op_ratios[i]
# # self.g[ortho_levels] *= op_ratios[i]
# ratios = []
# for v in range(14): #Loop through all possible v levels
# good_all_levels = (self.V.u == v) & (self.s2n >= s2n_cut)
# good_ortho_levels = good_all_levels & ortho_levels #Find all good data points in this
# good_para_levels = good_all_levels & para_levels #Find all good data points in this
# if (sum(good_ortho_levels) > 2) and (sum(good_para_levels) > 2):
# good_all_levels_and_good_s2n = good_all_levels & (self.s2n >= s2n_cut)
# x = self.T[good_all_levels_and_good_s2n]
# ortho_x = self.T[good_ortho_levels]
# ortho_y = (self.N[good_ortho_levels] / self.g[good_ortho_levels])
# ortho_y_to_interp = (h_compare.N[good_ortho_levels] / h_compare.g[good_ortho_levels])
# ortho_y_sigma = (self.Nsigma[good_ortho_levels] / self.g[good_ortho_levels])
# para_x = self.T[good_para_levels]
# para_y = (self.N[good_para_levels] / self.g[good_para_levels])
# para_y_to_interp = (h_compare.N[good_para_levels] / h_compare.g[good_para_levels])
# para_y_sigma = (self.Nsigma[good_para_levels] / self.g[good_para_levels])
# order_all = x.argsort()
# order_para = para_x.argsort()
# order_ortho = ortho_x.argsort()
# x = x[order_all]
# para_x = para_x[order_para].data
# para_y = para_y[order_para].data
# para_y_to_interp = para_y_to_interp[order_para].data
# para_y_sigma = para_y_sigma[order_para].data
# ortho_x = ortho_x[order_ortho].data
# ortho_y = ortho_y[order_ortho].data
# ortho_y_to_interp = ortho_y_to_interp[order_ortho].data
# ortho_y_sigma = ortho_y_sigma[order_ortho].data
# ortho_interp_obj = interp1d(ortho_x, log(ortho_y_to_interp), fill_value='extrapolate')
# #ortho_y_interpolated = exp(ortho_interp_obj(x))
# ortho_y_interpolated = exp(ortho_interp_obj(para_x))
# para_interp_obj = interp1d(para_x, log(para_y_to_interp), fill_value='extrapolate')
# #para_y_interpolated = exp(para_interp_obj(x))
# para_y_interpolated = exp(para_interp_obj(ortho_x))
# ratios = ratios + (ortho_y/para_y_interpolated).tolist() #+ (ortho_y_interpolated/para_y).tolist()
# #chisq[i] += nansum((ortho_y_interpolated - para_y_interpolated)**2 / abs(para_y_interpolated))
# #chisq[i] += nansum((ortho_y_interpolated - para_y_interpolated)**2 / self.Nsigma[good_all_levels]**2)
# n += float(len(ortho_y_interpolated))
# print('Median O/P = ',nanmedian(ratios))
# print('Mean O/P = ', nanmean(ratios))
# print('Stddev O/P = ', nanstd(ratios))
# # print('Weighted mean O/P = ', weighted_mean, '+/-', sqrt(weighted_variance))
# h_compare.g[ortho_levels] *= base_op_ratio #Restore base OP ratio now that we are done
# self.g[ortho_levels] *= base_op_ratio #Restore base OP ratio now that we are done
class transition_node:
def __init__(self, h2_obj, v, J, itercount=0, wave_range=[0.,0.]):
#if itercount < 50:
print('itercont = ', itercount, ' v = ', v , ' J = ', J)
touts = h2_obj.tout(v, J)
n = size(touts)
if n > 0:
children = []
v_out, J_out, wave = h2_obj.V.l[touts], h2_obj.J.l[touts], h2_obj.wave[touts]
for i in range(n):
if (wave_range[0] == 0. and wave_range[1] == 0.) or (wave[i] >= wave_range[0] and wave[i] <= wave_range[1]):
print('found transition ', h2_obj.label[touts][i], ' wavelength=', h2_obj.wave[touts][i])
children.append(transition_node(h2_obj, v_out[i], J_out[i], itercount + 1, wave_range=wave_range))
self.v = v
self.J = J
self.i = touts
self.children = children
self.last = False
else:
self.last = True
print('Looks like that is the last of one set of tranistions.')
class density_surface(): #Fit surface in v and J space, save object to store surface
def __init__(self, h2_obj, s2n_cut=-1.0):
i = (h2_obj.s2n > s2n_cut) & (h2_obj.N > 0.) #Filter out low S/N and unused points
v = h2_obj.V.u[i] #Grabe the relavent variables to fit
J = h2_obj.J.u[i]
log_N = log(h2_obj.N[i])
v_fit = linregress(v, log_N) #Fit v and J functions seperately with a simple linear regression
J_fit = linregress(J, log_N)
self.v_slope = v_fit.slope
self.v_intercept = v_fit.intercept
self.J_slope = J_fit.slope
self.J_intercept = J_fit.intercept
#Store upper and lower J (rotational) states
class J:
def __init__(self, u, l):
self.u = u #Store upper J state
self.l = l #Store lower J state
self.label = self.makelabel() #Store portion of spectroscopic notation for J
def diff(self): #Get difference between J upper and lower levels
return self.u - self.l
def makelabel(self): #Make spectroscopic notation label
delta_J = self.diff() #Grab difference between J upper and lower levels
n = len(delta_J) #Number of transitions
J_labels = []
for i in range(n):
if delta_J[i] == -2:
J_labels.append('O(' + str(self.l[i]) + ')') #Create label O(J_l) for transitions where delta-J = -2
elif delta_J[i] == 0:
J_labels.append('Q(' + str(self.l[i]) + ')') #Create label Q(J_l) for transitions where delta-J = 0
elif delta_J[i] == 2:
J_labels.append('S(' + str(self.l[i]) + ')') #Create label S(J_l) for transitions where delta-J = +2
return array(J_labels)
def sort(self, sort_object): #Sort both upper and lower levels for a given sorted object fed to this function (e.g. argsort)
self.u = self.u[sort_object] #Sort upper states
self.l = self.l[sort_object] #Sort lower states
self.label = self.label[sort_object] #Sort labels
#Store upper and lower V (vibrational) states
class V:
def __init__(self, u, l):
self.u = u #Store upper V state
self.l = l #Store lower V state
self.label = self.makelabel() #Store portion of spectroscopic notation for V
def diff(self): #Get difference between V upper and lower levels
return self.u - self.l
def makelabel(self):
n = len(self.u) #Number of transitions
V_labels = []
for i in range(n):
V_labels.append( str(self.u[i]) + '-' + str(self.l[i]) ) #Create label for V transitions of V_u-V_l
return array(V_labels)
def sort(self, sort_object): #Sort both upper and lower levels for a given sorted object fed to this function (e.g. argsort)
self.u = self.u[sort_object] #Sort upper states
self.l = self.l[sort_object] #Sort lower states
self.label = self.label[sort_object] #Sort labels
#Store upper and lower E (energies) of the states
class E:
def __init__(self, u, l):
self.u = u #Store upper J state
self.l = l #Store lower J state
#self.wave = self.getwave() #Store wavelength for each line
def diff(self): #Get difference between J upper and lower levels
return self.u - self.l
def getwave(self): #Get wavelength from difference in energy levels
return self.diff()**(-1) * 1e4 #Get wavelength of line from energy [cm ^-1] and convert to um
def sort(self, sort_object): #Sort both upper and lower levels for a given sorted object fed to this function (e.g. argsort)
self.u = self.u[sort_object] #Sort upper states
self.l = self.l[sort_object] #Sort lower states
#@jit
def run_cascade(iterations, time, N, trans_A, upper_states, lower_states, pure_rot_states, rovib_states_per_J, J, V, collisions=False, scale_factor=1e-10): #Speed up radiative cascade with numba
transition_amount = trans_A*time
#para = J%1==0
#ortho = J%1==1
#ground_J1 = (J==1) & (V==0)
#ground_J0 = (J==0) & (V==0)
#pure_rot_states = V == 0
#rovib_states = V > 1
#J_pure_rot_states = J[pure_rot_states]
n_states = len(N)
n_lines = len(trans_A)
time_x_scale_factor = time * scale_factor
for k in range(iterations): #loop through however many iterations user specifies
#N[para] += distribution[para]*(N[ground_J0] + 0.5*(1.0-sum(N)))
#N[ortho] += distribution[ortho]*(N[ground_J1] + 0.5*(1.0-sum(N)))
#N += distribution * scale_factor*time
if scale_factor > 0.:
for current_J in range(32): #Loop through each J
#current_rovib_states = (V > 1) & (J==current_J) #Grab index of current pure rotation state
#current_pure_rot_state = (V == 0) & (J==current_J) #Grab indicies of current rovibration states with the same J as the current pure rotation state
#current_pure_rot_state = pure_rot_states[current_J]
#current_rovib_states = rovib_states_per_J[current_J]
#num_current_rovib_states = len(N[current_rovib_states]) #Count number of rovibrational states we are going to redistribute the popluations from v=0 into
delta_N = N[pure_rot_states[current_J]] * time_x_scale_factor#How many molecules out of the pure rotation state to redistribute to higher v
N[rovib_states_per_J[current_J]] += delta_N / len(N[rovib_states_per_J[current_J]]) #num_current_rovib_states #Redistribute fraction of pure rotation state molecules to higher v
N[pure_rot_states[current_J]] -= delta_N #Remove molecules in pure rotation state that have now been redistributed
#N[para] += scale_factor*distribution[para]#*N[ground_J0] #+ 0.5*(1.0-sum(N)))
#N[ortho] += scale_factor*distribution[ortho]#*N[ground_J1] #+ 0.5*(1.0-sum(N)))
#N[ground_J0] = 0.
#N[ground_J1] = 0.
#N[pure_rot_states] = pure_rot_pops*sum(N[pure_rot_states]) / sum(pure_rot_pops) #Thermalize pure rotation states
store_delta_N = zeros(n_states) #Set up array to store all the changes in N
for i in range(n_lines):
delta_N = N[upper_states[i]]*transition_amount[i]
store_delta_N[upper_states[i]] -= delta_N
store_delta_N[lower_states[i]] += delta_N
N += store_delta_N #Modfiy level populations after the effects of all the transitions have been summed up
if collisions: #If user specifies to use collisions
N -= 0.01*N*time*V #Apply this very crude approximation of collisional de-excitation, which favors high V
return N
#Object for storing column densities of individual levels, and performing calculations upon them
class states:
def __init__(self, max_J=99):
ion() #Set up plotting to be interactive
show() #Open a plotting window
V, J = loadtxt(energy_table, usecols=(0,1), unpack=True, dtype='int')#, skiprows=1) #Read in data for H2 ground state rovibrational energy levels
E = loadtxt(energy_table, usecols=(2,), unpack=True, dtype='float')#, skiprows=1)
if max_J < 99: #If user specifies a maximum J, use only states where J <= max_J
use_these_states = J <= max_J
V = V[use_these_states]
J = J[use_these_states]
E = E[use_these_states]
self.n_states = len(V) #Number of levels
self.V = V #Array to store vibration level
self.J = J #Array to store rotation level
self.T = E / k #Excited energy above the ground rovibrational state in units of Kelvin
self.N = zeros(self.n_states) #Array for storing column densities
g_ortho_para = 1 + 2 * (J % 2 == 1) #Calculate the degenearcy for ortho or para hydrogen
self.g = g_ortho_para * (2*J+1) #Store degeneracy
self.tau = zeros(self.n_states) #array for storing radiative lifetime
self.Q = zeros(self.n_states)
self.A_tot_in = zeros(self.n_states) #A tots for radiative transitions
self.A_tot_out = zeros(self.n_states)
self.k_tot_out = zeros(self.n_states) #Estimated k tot (collision rate coeff.) for collisional transitions
self.transitions = make_line_list() #Create transitions list
self.transitions.upper_states = zeros(self.transitions.n_lines, dtype=int) #set up index to upper states
self.transitions.lower_states = zeros(self.transitions.n_lines, dtype=int) #Set up index to lower states
self.transitions.gbar_approx() #Estiamte collisional rate coeffs based on section 2.1.1 of Shaw et al. (2005)
for i in range(self.transitions.n_lines):
if self.transitions.J.u[i] <= max_J and self.transitions.J.l[i] <= max_J:
self.transitions.upper_states[i] = where((J == self.transitions.J.u[i]) & (V == self.transitions.V.u[i]))[0][0] #Find index of upper states
self.transitions.lower_states[i] = where((J == self.transitions.J.l[i]) & (V == self.transitions.V.l[i]))[0][0] #Find index of lower states
for i in range(self.n_states): #Calculate relative lifetime of each level (inverse sum of transition probabilities), see Black & Dalgarno (1976) Eq. 4
transitions_out_of_this_state = (self.transitions.J.l == J[i]) & (self.transitions.V.l == V[i]) #Find transitions out of this state
transitions_into_this_state = (self.transitions.J.u == J[i]) & (self.transitions.V.u == V[i])
self.tau[i] = sum(self.transitions.A[transitions_out_of_this_state])**-1 #Black & Dalgarno (1976) Eq. 4
self.Q[i] = sum(self.transitions.A[transitions_into_this_state])**-1
self.A_tot_out[i] = sum(self.transitions.A[transitions_out_of_this_state]) #Black & Dalgarno (1976) Eq. 4
self.A_tot_in[i] = sum(self.transitions.A[transitions_into_this_state])
self.k_tot_out[i] = sum(self.transitions.k[transitions_out_of_this_state])
self.ncr = self.A_tot_out / self.k_tot_out #Estimate critical densities
self.test_n = self.Q * self.tau
self.start_cascade = False #Flag if cascade has started or not
#self.convergence = [] #Set up python list that will hold convergence of cascade
#UV pumping from Black & Dalgarno (1976)
self.BD76_cloud_boundary_pumping = array([1.78e-11, 1.32e-11, 1.32e-11, 8.88e-12, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.1e-11, 7.77e-12, 7.81e-12, 5.1e-12, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.06e-11, 7.02e-12, 7.23e-12, 4.64e-12, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 1.07e-11, 6.85e-12, 7.18e-12, 4.55e-12, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.05e-11, 6.65e-12,
6.96e-12, 4.4e-12, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.01e-11, 6.43e-12, 6.7e-12, 4.24e-12, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 8.71e-12, 5.95e-12, 6.01e-12, 3.91e-12, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 7.47e-12, 5.47e-12, 5.38e-12, 3.58e-12, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.66e-12, 4.86e-12, 4.53e-12, 3.17e-12,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.62e-12, 4.6e-12, 4.1e-12, 2.99e-12, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.36e-12,
3.89e-12, 3.07e-12, 2.51e-12, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.39e-12, 3.66e-12, 2.77e-12, 2.41e-12, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.45e-12, 3.8e-12,
2.88e-12, 2.49e-12, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.36e-12, 3.55e-12, 2.69e-12, 2.29e-12, 0.0, 0.0, 0.0, 0.0, 8.71e-13, 2.29e-12, 1.58e-12, 1.27e-12])
self.BD76_formation_pumping = array([1.12e-14, 1.14e-13, 5.41e-12, 2.53e-13, 9.08e-14, 3.66e-13, 1.19e-13, 4.43e-13, 1.36e-13, 4.83e-13, 1.42e-13, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 7.91e-15, 8.09e-14, 3.84e-14, 1.8e-13, 6.46e-14, 2.59e-13, 8.43e-14, 3.14e-13, 9.6e-14, 3.43e-13, 1.01e-13, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 5.73e-15, 5.86e-14, 2.78e-14, 1.3e-13, 4.68e-14, 1.88e-13, 6.09e-14, 2.27e-13, 6.95e-14, 2.47e-13, 7.27e-14, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.21e-15, 4.31e-14, 2.05e-14, 9.6e-14, 3.44e-14, 1.38e-13, 4.49e-14, 1.67e-13, 5.12e-14, 1.83e-13, 5.37e-14, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 3.17e-15, 3.24e-14, 1.54e-14, 7.19e-14, 2.59e-14, 1.04e-13, 3.36e-14, 1.25e-13, 3.85e-14, 1.37e-13, 4.03e-14, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.42e-15, 2.47e-14, 1.18e-14, 5.5e-14, 1.98e-14, 7.94e-14, 2.58e-14, 9.6e-14, 2.94e-14, 1.05e-13, 3.09e-14, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0, 1.89e-15, 1.93e-14, 9.17e-15, 4.29e-14, 1.54e-14, 6.18e-14, 2.01e-14, 7.48e-14, 2.29e-14, 8.16e-14, 2.4e-14, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
1.5e-15, 1.53e-14, 7.27e-15, 3.41e-14, 1.23e-14, 4.91e-14, 1.59e-14, 5.95e-14, 1.83e-14, 6.49e-14, 1.91e-14, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.22e-15, 1.25e-14, 5.9e-15, 2.76e-14,
9.95e-15, 3.99e-14, 1.3e-14, 4.83e-14, 1.48e-14, 5.26e-15, 1.55e-14, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1e-15, 1.03e-14, 4.88e-15, 2.28e-14, 8.22e-15, 3.3e-14, 1.07e-14, 3.99e-14, 1.22e-14,
4.35e-14, 1.28e-14, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 8.5e-16, 8.74e-15, 4.13e-15, 1.93e-14, 6.95e-15, 2.79e-14, 9.08e-15, 3.37e-14, 1.03e-14, 3.68e-14, 1.08e-14, 0.0, 0.0, 0.0, 0.0, 7.37e-16,
7.53e-15, 3.57e-15, 1.68e-14, 6.02e-15, 2.41e-14, 7.85e-15, 2.92e-14, 9e-15, 3.18e-14, 9.43e-15, 0.0, 0.0, 6.55e-16, 6.7e-15, 3.18e-15,
1.49e-14, 5.35e-15, 2.15e-14, 6.97e-15, 2.6e-14, 7.97e-15, 2.84e-14, 8.35e-15, 6.01e-16, 6.15e-15, 2.92e-15, 1.37e-14, 4.91e-15, 1.94e-14, 6.4e-15, 2.39e-14, 7.31e-15, 2.6e-14, 7.66e-15, 5.7e-16])
#self.BD76_cloud_center_pumping =
pure_rot_states = [] #Save indicies of pure rotation states
rovib_states_per_J = [] #Save indicies of each set of rovib states of constant J
for current_J in range(32): #Loop through each rotation level
pure_rot_states.append((J == current_J) & (V == 0)) #Store indicies for a given J for pure rotation state
rovib_states_per_J.append((J == current_J) & (V > 0)) #Store indicies for a given J for all rovib. states where v>0
self.pure_rot_states = pure_rot_states
self.rovib_states_per_J = rovib_states_per_J
def thermalize(self, temperature, N_tot=1.0): #Set populations to be thermal at the user supplied temperature
exponential = self.g * exp(-self.T/temperature) #Calculate boltzmann distribution for user given temperature, used to populate energy levels
boltzmann_distribution = exponential / nansum(exponential) #Create a normalized boltzmann distribution
self.N = boltzmann_distribution * N_tot #Set column densities to the boltzmann distribution
def generate_synthetic_spectrum(self, wave_range=[1.45,2.45], pixel_size=1e-5, line_fwhm=7.5, centroid=0.): #Generate a synthetic 1D spectrum based on stored flux values in this object, can be used to synthesize spectra from Cloudy models, or thermal gas generated by the "thermalize" command
self.set_transition_column_densities() #Set column densities from the states class object here
self.transitions.calculate_flux()
w, f = self.transitions.generate_synthetic_spectrum(wave_range=wave_range, pixel_size=pixel_size, line_fwhm=line_fwhm, centroid=centroid)
return w, f #Send the wavelength and flux arrays back to you
def total_N(self): #Grab total column density N and return it
return nansum(self.N) #Return total column density of H2
def cascade(self, time=1.0, temp=250.0, quick=0, showplot=True, iterations=1, scale_factor=1e-10, collisions=False): #Do a step in the radiative cascade
V = self.V #Assign variables to speed up loop
J = self.J
N = self.N
g = self.g
#trans_V_u = self.transitions.V.u
#trans_V_l = self.transitions.V.l
#trans_J_u = self.transitions.J.u
#trans_J_l = self.transitions.J.l
trans_A = self.transitions.A
upper_states = self.transitions.upper_states
lower_states = self.transitions.lower_states
pure_rot_states = self.pure_rot_states
rovib_states_per_J = self.rovib_states_per_J
#if quick != 0: #If user wants to speed up the cascade
# maxthresh = -partsort(-trans_A,quick)[quick-1] #Find threshold for maximum
#else:
# maxthresh = 0. #E
exponential = g * exp(-self.T/temp) #Calculate boltzmann distribution for user given temperature, used to populate energy levels
boltmann_distribution = exponential / nansum(exponential)
# ground_J1 = (J==1) & (V==0)
# ground_J0 = (J==0) & (V==0)
if not self.start_cascade: #If cascade has not started yet
N = zeros(self.n_states) #Start off with everything = 0.0
N = boltmann_distribution #preset the population to be the boltzmann distribution
# pure_rotation_states = V == 0
# const = 1e-3 #Fraction to populate other states at v > 0 to match the J levels of v=0
# N[pure_rotation_states] = boltmann_distribution[pure_rotation_states] #preset the population to be the boltzmann distribution for only the pure rotation states
# for current_J in J[pure_rotation_states]: #loop through each possible rotation state
# other_vibrational_states_with_same_J = (J==current_J) & (~pure_rotation_states) #Find states at higher V with same J
# if any(other_vibrational_states_with_same_J): #If any J states in v>0 matches the current J
# N[other_vibrational_states_with_same_J] = const * N[pure_rotation_states & (J==current_J)] #Set level populations
# #N = (1.-(J/10.)) + (1.-(V/14.0)) #Set populations based on V and J
# #N[(V==14) & (J==1)] = 1.0
# #N = self.BD76_formation_pumping
# #N = self.BD76_cloud_boundary_pumping
# #N = exp(-(0.5*(J+1)+0.3*(V+1)))
# #N[V>-1] = exp(-(0.25*(J[V>-1]-1)+0.2*(V[V>-1]-1)))
# #N[ground_J0] = 0.
# #N[ground_J1] = 0.
# #N = exp(-J.astype(float)-V.astype(float))
# #N = ones(self.n_states)
# N[V==0] == 0.
N = N/sum(N) #Normalize
self.distribution = copy.deepcopy(N)
self.start_cascade = True #Then flip the flag so that the populations stay as they are
#old_N = copy.deepcopy(self.N)
N = run_cascade(iterations, time, N, trans_A, upper_states, lower_states, pure_rot_states, rovib_states_per_J, J, V, collisions=collisions, scale_factor=scale_factor) #Test cascade with numba
# transition_amount = trans_A*time
# para = J%1==0
# ortho = J%1==1
# for k in range(iterations): #loop through however many iterations user specifies
# #delta_N = N*trans_A[upper_states]*time
# #store_delta_N -= delta_N
# #store_delta_N += delta_N
# #for i in range(self.n_states):
# # u = upper_states==i
# # l = lower_states==i
# # store_delta_N[i] -= N[i]*sum(trans_A[u])*time
# # store_delta_N[i] += sum(N[l]*trans_A[l])*time
# store_delta_N = zeros(self.n_states) #Set up array to store all the changes in N
# delta_N = N[upper_states]*transition_amount #Move this much H2 around with this transition
# for i in range(self.n_states):
# store_delta_N[i] = nansum(delta_N[lower_states == i]) - nansum(delta_N[upper_states == i])
# #store_delta_N[upper_states] -= delta_N
# #store_delta_N[lower_states] += delta_N
# #for i in range(self.transitions.n_lines): #Loop through each transition
# # #if trans_A[i] > maxthresh: #Select only certain transitions below a certain A to be important, to optimize code
# # #Ju = self.transitions.J.u[i] #Grab upper and lower J levels for this transition
# # #Jl = self.transitions.J.l[i]
# # #Vu = self.transitions.V.u[i] #Grab upper and lower V levels for this transition
# # #Vl = self.transitions.V.l[i]
# # #upper_state = logical_and(V == trans_V_u[i], J == trans_J_u[i])#Finder upper state of transition
# # #lower_state = logical_and(V == trans_V_l[i], J == trans_J_l[i])#Find loer state of transition
# # #upper_state = (V == trans_V_u[i]) & (J == trans_J_u[i])#Finder upper state of transition
# # #lower_state = (V == trans_V_l[i]) & (J == trans_J_l[i])#Find loer state of transition
# #store_delta_N[upper_states[i], lower_states[i]] += [-delta_N, delta_N] #Try some vectorization
# #print('delta_N=', delta_N)
# # #delta_N = self.N[upper_state]*(1.0 - exp(-self.transitions.A[i]*time) )#Move this much H2 around with this transition
# # store_delta_N[upper_states[i]] -= delta_N[i] #Store change in N taken out of upper state by this transition
# # store_delta_N[lower_states[i]] += delta_N[i] #Store change in N put into lower state by this transition
# N += store_delta_N #Modfiy level populations after the effects of all the transitions have been summed up
# #self.N[1:] = self.N[1:] + self.N[0] / (float(self.n_states)-1.0) #Crudely redistribute everything in the ground state back to all other states
# #N[1:] = N[1:] + boltmann_distribution[1:]*N[0]
# #N[j] = N[j] + boltmann_distribution*N[0]
# #stop()
# N[para] += self.distribution[para]*(N[ground_J0] + 0.5*(1.0-nansum(N)))
# N[ortho] += self.distribution[ortho]*(N[ground_J1] + 0.5*(1.0-nansum(N)))
# #N[J%1==0] += boltmann_distribution[J%1==0]*N[ground_J0]
# #N[J%1==1] += boltmann_distribution[J%1==1]*N[ground_J1]
# N[ground_J0] = 0.
# N[ground_J1] = 0.
# #stop()
# #N[V>0] += self.distribution*sum(N[V==0])
# #N[V==0] = 0.
#N[285] = N[285] + N[0] #Test just dumping everything into the final level and let everything cascade out of it.
#N[0] = 0.0 #Empty out ground state after redistributing all the molecules in the ground
#convergence_measurement = (nansum((N-old_N)))**2
#print('convergence = ', convergence_measurement)
#self.convergence.append(convergence_measurement)#Calculate convergence from one step to the
self.N = N
if showplot:
self.set_transition_column_densities()
self.transitions.v_plot(s2n_cut=-1.0, savepdf=False)
#stop()
def set_transition_column_densities(self): #Put column densities into
for i in range(self.n_states): #Loop through each level
upper_states = (self.transitions.J.u == self.J[i]) & (self.transitions.V.u == self.V[i]) #Find all transitions with upper states in a level
self.transitions.N[upper_states] = self.N[i] #Set new column densities to the transitions object
| 160,758
| 64.243101
| 323
|
py
|
plotspec
|
plotspec-master/run_plotspec_test.py
|
#Test script for plotspec.py library to demonstrate what library can do
from plotspec import * #Import plotspec library
import h2 #Import H2 library
#~~~~~~~~~~~~~~~~~~~~SCIENCE TARGET INFORMATION~~~~~~~~~~~~~~~~~~~~~~~~~~~~
save.name('NGC 7027')
date = 20141023 #Date of observation
frameno = 51 #Frame number for science target
stdno = 59 #Frame number for standard star
B = 5.734 #B band magnitude for std star HD 205314
V = 5.766 #V band magnitude for std star HD 205314
waveno = 118 #Frame number for wavelength solution (usually sky frame)
#waveno = 59
skyno = 120 #Frame number for difference between first and last offs, used for automated removal of OH sky liens
HI_lines = lines('HI_ngc7027.dat', delta_v = 43.0)
ncapture_lines = lines('neutron_capture_species_ngc7027.dat', delta_v = 33.0)
#~~~~~~~~~~~~~~~~~~~~SET UP H2 TRANSITIONS OBJECT IF USING A LINE LIST WITH H2~~~~~~~~~~~~~~~~~~~~~~~~~~~~
h2_transitions = h2.make_line_list() #Set up object for storing H2 transitions
#~~~~~~~~~~~~~~~~~~~~SCRIPT FOR ANALYSING SPECTRA~~~~~~~~~~~~~~~~~~~~~~~~~~~~
spec1d, spec2d = getspec(date, waveno, frameno, stdno, B=B, V=V, y_scale=1.0, wave_smooth=0.0, oh=skyno) #Create 1D and 2D spectra objects for all orders combining both H and K bands (easy eh?), also input H & K mags for std. star, y_scale scales A0V H I line fit, wave_smooth smooths A0V H I line fit, delta_v moves A0V H I lines in velocity space
#spec1d.subtract_continuum() #Subtract continuum from 1D spectrum, comment out to not subtract continuum
spec2d.subtract_continuum() #Subtract continuum from 2D spectrum, comment out to not subtract continuum
spec1d.combine_orders() #Combine all orders in 1D spectrum into one very long spectrum
spec2d.combine_orders() #Combine all orders in 2D spectrum into one very long spectrum
#spec1d.plot() #Plot 1D spectrum
#spec1d.plotlines(spectral_lines, rows = 2, ymax=1e7, fontsize=14)
spec2d.plot(ncapture_lines, pause = True, close = True, label_OH = True, num_wave_labels = 1000) #Plot 2D spectrum in DS9
pv = position_velocity(spec1d.combospec, spec2d.combospec, HI_lines) #Extract and create a datacube in position-velocity space of all lines in line list(s) found in spectrum
test_integrate_region = region(pv, file='n7027_HI.reg', background='all', name='HI') #Grab line fluxes from a user specified region, here defined in this script
pv = position_velocity(spec1d.combospec, spec2d.combospec, ncapture_lines) #Extract and create a datacube in position-velocity space of all lines in line list(s) found in spectrum
pv.view(line = '1-0 S(1)', pause=True, close=False, printlines=True) #View position-velocity datacube of all lines in DS
test_integrate_region = region(pv, file='n7027_ncapture.reg', background='all', name='ncapture') #Grab line fluxes from a user specified region, here defined in this script
#h2_transitions.set_flux(test_integrate_region) #Read fluxes into H2 transition object
#h2_transitions.calculate_column_density() #Calculate column density of H2 transition upper states from fluxes
#h2_transitions.v_plot(plot_single_temp = True, show_upper_limits = False) #Plot Boltmann Diagram of H2 transition upper states labeled with upper V states
| 3,188
| 78.725
| 348
|
py
|
plotspec
|
plotspec-master/plotspec_demo.py
|
#2D demo - IGRINS Conference 2015 in Korea
#by Kyle Kaplan
#~~~~~~~~~~~~~~~~~~~~IMPORT LIBRARIES~~~~~~~~~~~~~~~~~~~~~~~~~~~
from plotspec import * #Import plotspec library
import h2 #Import H2 library
#~~~~~~~~~~~~~~~~~~~~SCIENCE TARGET INFORMATION~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##M 1-11
save.name('Demo')
date = 20141204 #Date of IGRINS observations
frameno = 152 #First frame # for science target
stdno = 164 #First frame # for A0V standard star
B = 4.714 #B magnitude for A0V std.
V = 4.669 #v magnitude for A0V std.
waveno = 153 #Frame # of sky frame for wavelength calibration
ohno = 162 #Frame # for sky difference, this is used for
demo_lines = lines('demo.dat', delta_v = 30.0) #Emission line list + velocity
h2_lines = lines('demo_H2.dat', delta_v=30.0) #Load specific H2 line list for M 1-11
#~~~~~~~~~~~~~~~~~~~~PROCESS SCIENCE DATA~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
spec1d, spec2d = getspec(date, waveno, frameno, stdno, B=B, V=V, y_scale=0.6, oh=ohno, oh_scale=0.2) #Create 1D and 2D spectra objects for all orders combining both H and K bands (easy eh?)
spec1d.combine_orders() #Combine all orders in 1D spectrum into one very long spectrum
spec2d.combine_orders() #Combine all orders in 2D spectrum into one very long spectrum
spec2d.plot(demo_lines, pause=True, close=True) #View long 2D spectrum
#~~~~~~~~~~~~~~~~~~~~SUBTRACT CONTINUUM~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
spec1d.subtract_continuum() #Subtract continuum in 1D
#spec1d.subtract_continuum(lines=demo_lines, vrange=[-50.0,50.0]) #Subtract continuum in 1D
spec1d.combine_orders() #Combine all orders in 1D spectrum into one very long spectrum
spec2d.subtract_continuum() #Subtract continuum in 2D
#spec2d.subtract_continuum(lines=demo_lines, vrange=[-50.0,50.0]) #Subtract continuum in 2D
spec2d.combine_orders() #Combine all orders in 2D spectrum into one very long spectrum
spec2d.plot(demo_lines, pause=True, close=True) #View long 2D spectrum
#~~~~~~~~~~~~~~~~~~~~POSITION-VELOCITY DIAGRAMS AND ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
pv = position_velocity(spec1d.combospec, spec2d.combospec, demo_lines) #Extract and create a datacube in position-velocity space of all lines in line list(s) found in spectrum
pv.view(line='H2 1-0 S(1)', printlines=True, pause=True, close=False) #View extracted lines and draw circle around them.
pv = position_velocity(spec1d.combospec, spec2d.combospec, h2_lines) #Extract and create a datacube in position-velocity space of all lines in line list(s) found in spectrum
demo_extract_region = region(pv, file='demo.reg', background='all', name='Demo Region', show_regions=True) #Extract flux for region defined in DS9
show()
ans = input('Press any key to continue.')
#~~~~~~~~~~~~~~~~~~~~~~~S/N EXTRACTION~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# demo_extract_sn = region(pv, name='SN_Demo', background='all', optimal_extraction=True, line='1-0 S(1)', pixel_range=[-10,10], weight_threshold=0.5, savepdf=True) #Grab line fluxes from a user specified region, here defined in this script
# #demo_extract_sn = region(pv, name='SN_Demo', background='all', s2n_cut = 0.0, s2n_mask = 5.0, line='1-0 S(1)', pixel_range=[-10,10]) #Grab line fluxes from a user specified region, here defined in this script
# ans = input('Press any key to continue.')
#~~~~~~~~~~~~~~~~~~~~MAKE A PLOT~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
h2_demo = h2.make_line_list() #Set up object for storing H2 transitions
#h2_demo.set_flux(demo_extract_sn) #Read H2 line fluxes into object to calculate dolumn density of H2
h2_demo.set_flux(demo_extract_region) #Read H2 line fluxes into object to calculate dolumn density of H2
h2_demo.calculate_column_density() #Calculate column density of H2
#Test making a plot
use_V_ring = [1,2,3,4,5,6,7,8,9,10,11] #Plot the folowing vibration states
h2_demo.v_plot(plot_single_temp=False, show_upper_limits=False, s2n_cut = 3.0, show_labels=False, V=use_V_ring, savepdf=True) #Plot Boltzmann Diagram
show()
| 3,923
| 73.037736
| 240
|
py
|
plotspec
|
plotspec-master/datacube_demo.py
|
#Test demo script for make_datacube.py library
from scipy import *
import make_datacube as cubelib #Import library to make datacubes
workdir = '/Volumes/IGRINS_Data/datacube_demo/' #Set to where you want to save resulting fits files
vrange = [-10.0,10.0] #Velocity range
#Demo of saving files from datacube
cube = cubelib.data() #Create datacube object
cube.fill_gaps() #Fill in nans, this is optional, comments out if you don't want to do this
cube.savecube('1-0 S(1)', workdir+'1-0_S(1)_cube.fits') #Save datacube of an emission line
cube.saveimage('1-0 S(1)', workdir+'1-0_S(1)_img.fits', vrange=vrange) #Save image of an emission line in the velocity range "vrange", here set to +/- 10 km/s
cube.saveratio('2-1 S(1)', '1-0 S(1)', vrange=vrange, fname=workdir+'ratio_21s1_10s1.fits') #Test save ratio maps
| 812
| 61.538462
| 158
|
py
|
plotspec
|
plotspec-master/plotspec.py
|
#This library will eventually be the ultimate IGRINS emission line viewability/analysis code
#
#start as test_new_plotspec.py
#Set matplotlib backend to get around freezing plot windows, first try the one TkAgg
import matplotlib
#Import libraries
import os #Import OS library for checking and creating directories
import json #For reading in json files, ie. wavelength solutions given by the PLP not in fits files
from astropy.io import fits #Use astropy for processing fits files
from astropy.modeling import models, fitting #import the astropy model fitting package
import pyregion #For reading in regions from DS9 into python
from pylab import * #Always import pylab because we use it for everything
from scipy.interpolate import interp1d, UnivariateSpline, griddata #For interpolating
#from scipy.ndimage import zoom #Was used for continuum subtraction at one point, commented out for now
import ds9 #For scripting DS9
#import h2 #For dealing with H2 spectra
import copy #Allow objects to be copied
from scipy.ndimage import median_filter #For cosmic ray removal
from astropy.convolution import convolve, Gaussian1DKernel, Gaussian2DKernel, interpolate_replace_nans #For smoothing, not used for now, commented out
from astropy.stats import biweight_location
from astropy.nddata import StdDevUncertainty
from pdb import set_trace as stop #Use stop() for debugging
#ion() #Turn on interactive plotting for matplotlib
from matplotlib.colors import LogNorm #For plotting PV diagrams with imshow
#from numba import jit #Import numba for speeding up some definitions, commented out for now since there is a major error importing numba
from matplotlib.backends.backend_pdf import PdfPages #For outputting a pdf with multiple pages (or one page)
from pylab import size #For some reason size was not working, so I will import it last
#For creating synthetic spectra for standard stars using Phoenix model atmpsheres, Gollum, and muler
from astropy import units as u
from dust_extinction.averages import GCC09_MWAvg #Dust_extinction: https://dust-extinction.readthedocs.io/en/latest/index.html#
import matplotlib.gridspec as grd
from tynt import FilterGenerator
from astropy.visualization import ImageNormalize, ZScaleInterval, LogStretch, SinhStretch, AsinhStretch, AsymmetricPercentileInterval
try: #Try to import bottleneck library, this greatly speeds up things such as nanmedian, nanmax, and nanmin
from bottleneck import * #Library to speed up some numpy routines
except ImportError:
print("Bottleneck library not installed. Code will still run but might be slower. You can try to bottleneck with 'pip install bottleneck' or 'sudo port install bottleneck' for a speed up.")
try:
from gollum.phoenix import PHOENIXSpectrum #Gollum: https://gollum-astro.readthedocs.io/en/latest/
from specutils.manipulation import LinearInterpolatedResampler #Specutils: https://specutils.readthedocs.io/en/stable/
LinInterpResampler = LinearInterpolatedResampler()
from muler.utilities import resample_list #Muler:
from muler.echelle import EchelleSpectrum, EchelleSpectrumList
except:
print('Specutils, muler, and/or gollum not installed. Legacy code should still run but PHEONIX stellar model atmospheres or absolute flux calibration will not be useable. Please raise a github issue if you need help with this.')
#Global variables user should set
#pipeline_path = '/Volumes/home/plp/'
#save_path = '/Volumes/home/results/'
#pipeline_path = '/Volumes/IGRINS_Data/plp/' #Paths for running on linux laptop
#save_path = '/Volumes/IGRINS_Data/results/'
#save_path = '/home/kfkaplan/Desktop/results/'
#pipeline_path = '/Volumes/IGRINS_Data_Backup/plp/'
#save_path = '/Volumes/IGRINS_Data_Backup/results/' #Define path for saving temporary files'
#pipeline_path = '/Users/kk25239/Desktop/plp-update-test/plp/'
pipeline_path = '/Users/kk25239/Desktop/plp/'
save_path = '/Users/kk25239/Desktop/results/'
path_to_pheonix_models = '/Users/kk25239/Box/phoenix_standard_star_models'
scratch_path = save_path + 'scratch/' #Define a scratch path for saving some temporary files
if not os.path.exists(scratch_path): #Check if directory exists
print('Directory '+ scratch_path + ' does not exist. Making new directory.')
os.mkdir(scratch_path) #If path does not exist, make directory
#default_wave_pivot = 0.625 #Scale where overlapping orders (in wavelength space) get stitched (0.0 is blue side, 1.0 is red side, 0.5 is in the middle)
default_wave_pivot = 0.85 #Scale where overlapping orders (in wavelength space) get stitched (0.0 is blue side, 1.0 is red side, 0.5 is in the middle)
set_velocity_range =100.0 # +/- km/s for interpolated velocity grid
set_velocity_res = 1.0 #Resolution of velocity grid
#slit_length = 62 #Number of pixels along slit in both H and K bands
slit_length = 100 #Number of pixels along slit in both H and K bands
block = 750 #Block of pixels used for median smoothing, using iteratively bigger multiples of block
cosmic_horizontal_mask = 5 #Number of pixels to median smooth horizontally (in wavelength space) when searching for cosmics
cosmic_horizontal_limit = 3.0 #Number of times the data must be above it's own median smoothed self to find cosmic rays
cosmic_s2n_min = 5.0 #Minimum S/N needed to flag a pixel as a cosmic ray
#Global variables, should remain untouched
data_path = pipeline_path + 'outdata/'
calib_path = pipeline_path + 'calib/primary/'
OH_line_list = 'OH_Rousselot_2000.dat' #Read in OH line list
c = 2.99792458e5 #Speed of light in km/s
half_block = block / 2 #Half of the block used for running median smoothing
#slit_length = slit_length - 1 #This is necessary to get the proper indexing
# vega_radius = 1.8019e+11 #cm. average of polar and equitorial radii from Yoon et al. (2010) Table 1 column 2
# vega_distance = 2.36940603e+19 #cm, based on parallax from Leeuwen (2007) which is an updated Hipparcos catalog
# vega_R_over_D_squared = (vega_radius/vega_distance)**2 #(Radius/Distance)^2, used for magnitude estimates from synthetic standard star spectra and absolute flux calibration
vega_V_flambdla_zero_point = 363.1e-7 #Vega flux zero point for V band from Bessell et al. (1998) in erg cm^2 s^-1 um^-1
V_band_effective_lambda = 0.545 #Effective central wavelength for V band in microns
#Definition takes a high resolution spectrum and rebins it (via interpolation and integration) onto a smaller grid
#while conserving flux, based on Chad Bender's idea for "srebin"
def srebin(oldWave, newWave, oldFlux, kind='linear'):
nPix = len(newWave) #Number of pixels in new binned spectrum
newFlux = zeros(len(newWave)) #Set up array to store rebinned fluxes
interpObj = interp1d(oldWave, oldFlux, kind=kind, bounds_error=False) #Create a 1D linear interpolation object for finding the flux density at any given wavelength
#wavebindiffs = newWave[1:] - newWave[:-1] #Calculate difference in wavelengths between each pixel on the new wavelength grid
wavebindiffs = diff(newWave) #Calculate difference in wavelengths between each pixel on the new wavelength grid
wavebindiffs = hstack([wavebindiffs, wavebindiffs[-1]]) #Reflect last difference so that wavebindiffs is the same size as newWave
wavebinleft = newWave - 0.5*wavebindiffs #Get left side wavelengths for each bin
wavebinright = newWave + 0.5*wavebindiffs #get right side wavelengths for each bin
fluxbinleft = interpObj(wavebinleft)
fluxbinright = interpObj(wavebinright)
for i in range(nPix): #Loop through each pixel on the new wavelength grid
useOldWaves = (oldWave >= wavebinleft[i]) & (oldWave <= wavebinright[i]) #Find old wavelength points that are inside the new bin
nPoints = sum(useOldWaves)
wavePoints = zeros(nPoints+2)
fluxPoints = zeros(nPoints+2)
wavePoints[0] = wavebinleft[i]
wavePoints[1:-1] = oldWave[useOldWaves]
wavePoints[-1] = wavebinright[i]
fluxPoints[0] = fluxbinleft[i]
fluxPoints[1:-1] = oldFlux[useOldWaves]
fluxPoints[-1] = fluxbinright[i]
newFlux[i] = 0.5 * nansum((fluxPoints[:-1]+fluxPoints[1:])*diff(wavePoints)) / wavebindiffs[i]
return newFlux
#~~~~~~~~~~~~~~~~~~~~~~~~Make a simple contour plot given three lists~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def contour_plot(x, y, z, nx=100, ny=100, levels=[3.,4.,5.,6.,7.,8.,9.,10.,15.,20.,30.,40.]): #Canned definition to make interpolated contour plots with three lists, based off of http://stackoverflow.com/questions/9008370/python-2d-contour-plot-from-3-lists-x-y-and-rho1
#if z_range[1] == 0: #Automatically set z range if not provided by user
# z_range = [min(z), max(z)]
xmin, xmax = min(x), max(x)
ymin, ymax = min(y), max(y)
#zmin, zmax = min(z), max(z)
#Set up grid of interpolated points
xi, yi = linspace(xmin, xmax, nx), linspace(ymin, ymax, ny)
xi, yi = meshgrid(xi, yi, copy=False)
#Interpolate
#rbf = Rbf(x, y, z, function='linear')
#zi = rbf(xi, yi)
zi = griddata((x,y), z, (xi, yi), method='linear')
#imshow(zi, vmin=z_range[0], vmax=z_range[1], origin='lower',
# extent=[xmin, xmax, ymin, ymax], aspect='auto')
#colorbar()
scatter(x, y, color='grey', s=7)
cs = contour(xi,yi,zi, levels=levels, colors='black')
clabel(cs, inline=1, fontsize=12, fmt='%1.0f')
#scatter(x, y, color='grey')
#~~~~~~~~~~~~~~~~~~~~~~~~Code for storing information on saved data directories under save_path~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class save_class: #Class stores information on what path to save files to
def __init__(self):
self.object = 'scratch' #If no name is set, by default save in the "scratch" directory
self.set_path()
def name(self, input): #User can change name of science target in script by saying 'save.name('NGC XXXX')
self.object = input.replace(' ', '_')
self.set_path()
def set_path(self): #Update path to directory to save results in
self.path = save_path + self.object + '/'
if not os.path.exists(self.path): #Check if directory exists
print('Directory '+ self.path+ ' does not exist. Making new directory.')
os.mkdir(self.path) #If path does not exist, make directory
save = save_class() #Create object user can change the name to
#~~~~~~~~~~~~~~~Optimized pre-compiled functions ~~~~~~~~~~~~~~~~~~~
#@jit #Fast precompiled function for nanmax for using whole array (no specific axis)
def flat_nanmax(input):
max = -1e99
f = input.flat
for i in f:
if i > max:
max = i
if max==-1e99:
return nan
else:
return max
#@jit #Fast precompiled function for nanmin for using whole array (no specific axis)
def flat_nanmin(input):
min = 1e99
f = input.flat
for i in f:
if i < min:
min = i
if min == 1e99:
return nan
else:
return min
#~~~~~~~~~~~~~~~~~~~~~~~~~Code for modifying spectral data~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Roll an array (typically an order) an arbitrary number of pixels (via interpolation for fractions of a pixel)
#@jit #Compile Just In Time using numba, for speed up
def roll_interp(array_to_correct, correction, axis=0):
integer_correction = round(correction) #grab whole number component of correction
fractional_correction = correction - float(integer_correction) #Grab fractional component of correction (remainder after grabbing whole number out)
rolled_array = roll(array_to_correct, integer_correction, axis=axis) #role array the number of pixels matching the integer correction
if fractional_correction > 0.: #For a positive correction
rolled_array_plus_one = roll(array_to_correct, integer_correction+1, axis=axis) #Roll array an extra one pixel to the right
else: #For a negative correction
rolled_array_plus_one = roll(array_to_correct, integer_correction-1, axis=axis) #Roll array an extra one pixel to the left
corrected_array = rolled_array*(1.0-abs(fractional_correction)) + rolled_array_plus_one*abs(fractional_correction) #interpolate over the fraction of a pixel
#stop()
return corrected_array
#Do a quick preview of a 2D array in DS9
def quicklook(arr, pause=False, close=False):
spec_fits = fits.PrimaryHDU(arr) #Create FITS object
spec_fits.writeto(save.path + 'quicklook.fits', overwrite=True) #Save temporary fits files for later viewing in DS9
ds9.open() #Display spectrum in DS9
ds9.show(save.path + 'quicklook.fits', new=False)
ds9.set('zoom to fit')
ds9.set('scale log') #Set view to log scale
ds9.set('scale ZScale') #Set scale limits to Zscale, looks okay
#Pause for viewing if user specified
if pause:
wait()
#Close DS9 after viewing if user specified (pause should be true or else DS9 will open then close)
if close:
ds9.close()
#For interpolating an order (or orders) to a new number of pixels in y direction along the slit, use where the H
##@jit #Fast precompiled function for nanmax for using whole array (no specific axis)
def regrid_slit(ungridded_spectrum, size=slit_length):
len_y, len_x = shape(ungridded_spectrum) #Get x and y shape of ungridded spectrum
ungridded_y = arange(len_y) #Get y size of ungridded spectrum
gridded_y = arange(size) * (float(len_y)/float(size)) #Get size of gridded y
interp_spectrum = interp1d(ungridded_y, ungridded_spectrum, axis=0, bounds_error=False, kind='nearest') #Create interpolation object
gridded_spectrum = interp_spectrum(gridded_y) #Create interpolated specturm (along the slit axis)
scale = float(len_y) / float(size) #Find factor to scale spectrum down by to account for the fact we have spread it out over a larger area
gridded_spectrum = gridded_spectrum * scale #Do the actual scaling
return gridded_spectrum #Send the now interpolated streatched spectrum back to where it came from
#Roll an array (typically an order) an arbitrary number of pixels to correct flexure
#@jit #Compile Just In Time using numba, for speed up
def flexure(array_to_correct, correction):
integer_correction = int(correction) #grab whole number component of correction
fractional_correction = correction - float(integer_correction) #Grab fractional component of correction (remainder after grabbing whole number out)
rolled_array = roll(array_to_correct, integer_correction) #role array the number of pixels matching the integer correction
if fractional_correction > 0.: #For a positive correction
rolled_array_plus_one = roll(array_to_correct, integer_correction+1) #Roll array an extra one pixel to the right
else: #For a negative correction
rolled_array_plus_one = roll(array_to_correct, integer_correction-1) #Roll array an extra one pixel to the left
corrected_array = rolled_array*(1.0-abs(fractional_correction)) + rolled_array_plus_one*abs(fractional_correction) #interpolate over the fraction of a pixel
#stop()
return corrected_array
#Artifically redden a spectrum,
#@jit #Compile Just In Time using numba, for speed up
def redden(B, V, waves, flux):
alpha = 2.14 #Slope of near-infrared extinction law from Stead & Hoare (2009)
#alpha = 1.75 #Slope from older literature
#lambda_H = 1.651 #Effective wavelength of H band determiend from Stead & Hoare (2009)
#lambda_K = 2.159 #Effective wavelength of K band determiend from Stead & Hoare (2009)
#lambda_H = 1.662 #Effective wavelength of H band filter given by 2MASS (http://www.ipac.caltech.edu/2mass/releases/allsky/doc/sec6_4a.html)
#lambda_K = 2.159 #Effective wavelength of K band filter given by 2MASS (http://www.ipac.caltech.edu/2mass/releases/allsky/doc/sec6_4a.html)
vega_B = 0.03 #Vega B band mag, from Simbad
vega_V = 0.03 #Vega V band mag, from simbad
#vega_H = -0.03 #Vega H band magnitude, from Simbad
#vega_K = 0.13 #Vega K band magnitude, from Simbaddef
#E_HK = (H-K) - (vega_H-vega_K) #Calculate E(H-K) = (H-K)_observed - (H-K)_intrinsic, intrinsic = Vega in this case
E_BV = (B-V) - (vega_B-vega_V) #Calculate E(B-V) = (B-V)_observed - (B-V)_intrinsic, intrinsic = Vega in this case
R = 3.09 #Ratio of total/selective extinction from Rieke & Lebofsky (1985)
A_V = R * E_BV #Calculate extinction A_V for standard star
A_lambda = array([ 0.482, 0.282, 0.175, 0.112, 0.058]) #(A_lambda / A_V) extinction curve from Rieke & Lebofsky (1985) Table 3
l = array([ 0.806, 1.22 , 1.63 , 2.19 , 3.45 ]) #Wavelengths for extinction curve from Rieke & Lebofsky (1985)
extinction_curve = interp1d(l, A_lambda, kind='quadratic') #Create interpolation object for extinction curve from Rieke & Lebofsky (1985)
reddened_flux = flux * 10**(-0.4*extinction_curve(waves)*A_V) #Apply artificial reddening
#reddened_flux = flux * 10**( -0.4 * (E_HK/(lambda_H**(-alpha)-lambda_K**(-alpha))) * waves**(-alpha) ) #Apply artificial reddening
#stop()
return reddened_flux
#Mask Hydrogen absorption lines in A0V standard star continuum, used during relative flux calibration
def mask_hydrogen_lines(wave, flux):
H_lines = [2.1661, 1.9451, 1.8181, 1.7367, 1.6811, 1.6412, 1.6114, 1.5885, 1.5705, 1.5561, 1.5443, 1.5346, 1.5265, 1.5196] #Wavelengths of H I lines
d_range = [-0.002 , 0.002] #Wavelength range for masking H I lines
for H_wave in H_lines: #For each hydrogen line that might be in the flux array
mask = (wave > H_wave + d_range[0]) & (wave < H_wave + d_range[1]) #Find pixels in flux array on top of H I line
flux[mask] = nan #Apply mask
goodpix = flux > -9e99 #Read in indicies of mask
#stop()
#print min(wave), max(wave), len(flux[goodpix])
if len(flux[goodpix]) < 2048: #If any mask is applied (ie. if any H I lines are found in order)
interpolated_flux = interp1d(wave[goodpix], flux[goodpix], bounds_error = False) #Interpolate over only unmasked pixels
flux_to_return = interpolated_flux(wave) #Replace masked pixels with a linear interpolation around them
return flux_to_return #Return now masked pixels
else:
return flux #If nothing is masked, return the flux unmodified
def absolute_flux_calibration(std_date, std_frameno, sci, sci2d=None, t_std=1.0, t_obj=1.0, V=0.03, slit_length_arcsec=14.8, PA=90.0, guiding_error=1.5, per_solid_angle=True):
#Calculate the fraction of starlight (usually used for A0V standards for absolute flux calibration) through the IGRINS slit
#Based on https://github.com/kfkaplan/estimate_IGRINS_std_star_light_through_slit/blob/main/estimate_IGRINS_std_star_light_through_slit.ipynb
#Read in slit profile file outputted by IGRINS PLP
#Note by default per_solid_angle=True will give results in erg s^-1 cm^-2 sr^-1 and the flux will be averaged over the solid angle subtended by the IGRINS slit,
#if per_solid_angle=False, the results will be in erg s^-1 cm^-1, and will be the flux THROUGH the slit, this result does NOT account for light from the science target outside the slit
print('slit_length_arcsec = ', slit_length_arcsec)
#magnitude_scale = 10**(0.4*(0.03 - V)) #Scale flux by difference in V magnitude between standard star and Vega (V for vega = 0.03 in Simbad)
magnitude_scale = 10**(0.4*(-V))
f_through_slit_H = 0.
f_through_slit_K = 0.
for band in ['H', 'K']:
json_file = open(data_path+str(std_date)+'/SDC'+band+'_'+str(std_date)+'_'+'%.4d' % int(std_frameno)+'.slit_profile.json')
json_obj = json.load(json_file)
x = array(json_obj['profile_x']) * slit_length_arcsec
y = array(json_obj['profile_y'])
#Fit 2 Moffat distributions to the psfs from A and B positions (see https://docs.astropy.org/en/stable/modeling/compound-models.html)
g1 = models.Moffat1D(amplitude=0.5, x_0=slit_length_arcsec*0.33333, alpha=1.0, gamma=1.0)
g2 = models.Moffat1D(amplitude=-0.5, x_0=slit_length_arcsec*0.66666, alpha=1.0, gamma=1.0)
gg_init = g1 + g2
#fitter = fitting.SLSQPLSQFitter()
fitter = fitting.TRFLSQFitter()
gg_fit = fitter(gg_init, x, y)
print('FWHM A beam:', gg_fit[0].fwhm)
print('FWHM B beam:', gg_fit[1].fwhm)
#breakpoint()
#Numerically estimate light through slit
g1_fit = models.Moffat2D(amplitude=abs(gg_fit[0].amplitude) , x_0=gg_fit[0].x_0 - 0.5*slit_length_arcsec, alpha=gg_fit[0].alpha, gamma=gg_fit[0].gamma)
g2_fit = models.Moffat2D(amplitude=abs(gg_fit[1].amplitude), x_0=gg_fit[1].x_0 - 0.5*slit_length_arcsec, alpha=gg_fit[1].alpha, gamma=gg_fit[1].gamma)
#Generate a 2D grid in x and y for numerically calculating slit loss
n_axis = 5000
half_n_axis = n_axis / 2
dx = 1.2 * (slit_length / n_axis)
dy = 1.2 * (slit_length / n_axis)
y2d, x2d = meshgrid(arange(n_axis), arange(n_axis))
x2d = (x2d - half_n_axis) * dx
y2d = (y2d - half_n_axis) * dy
#Perform numerical integration for total flux ignoring slit losses
#Test simulating guiding error
position_angle_in_radians = PA * (pi)/180.0 #PA in radians
fraction_guiding_error = cos(position_angle_in_radians)*guiding_error #arcsec, estimated by doubling average fwhm of moffet functions
diff_x0 = fraction_guiding_error * cos(position_angle_in_radians)
diff_y0 = fraction_guiding_error * sin(position_angle_in_radians)
g1_fit.x_0 += 0.5*diff_x0
g2_fit.x_0 += 0.5*diff_x0
g1_fit.y_0 += 0.5*diff_y0
g2_fit.y_0 += 0.5*diff_y0
profiles_2d = zeros(shape(x2d))
n = 5
for i in range(n):
profiles_2d += (1/n)*(g1_fit(x2d, y2d) + g2_fit(x2d, y2d))
g1_fit.x_0 -= (1/(n-1))*diff_x0
g2_fit.x_0 -= (1/(n-1))*diff_x0
g1_fit.y_0 -= (1/(n-1))*diff_y0
g2_fit.y_0 -= (1/(n-1))*diff_y0
profiles_2d = profiles_2d / nansum(profiles_2d) #Normalize each pixel by fraction of starlight and area in sterradians per pixel
slit_width_to_length_ratio = 1.0/14.8
slit_width_arcsec = slit_length_arcsec * slit_width_to_length_ratio
outside_slit = (y2d <= -0.5*slit_width_arcsec) | (y2d >= 0.5*slit_width_arcsec) | (x2d <= -0.5*slit_length_arcsec) | (x2d >= 0.5*slit_length_arcsec)
profiles_2d[outside_slit] = nan
f_through_slit = nansum(profiles_2d)
if (band == 'H'):
f_through_slit_H = f_through_slit
elif (band == 'K'):
f_through_slit_K = f_through_slit
# flux_total = nansum(profiles_2d) * dx * dy
# profiles_2d = profiles_2d / flux_total #Normalize
#Perform numerical integration for flux through slit by masking out pixels outside of the slit
#area_flat_2d = ones(shape(profiles_2d)) * (slit_length_arcsec * slit_width_arcsec) / size(profiles_2d) #Area per pixel in arcsec^-2
# slit_area = (slit_length_arcsec * slit_width_arcsec)
# area_profiles = slit_area / nansum(profiles_2d) #Calculate area on sky through slit covered by Std Star PSF in arcsec^2, later used for calibration
# area_per_pixel = slit_area / 100 * (100 * slit_width_to_length_ratio)
#f_through_slit = nansum(profiles_2d) * dx * dy
#f_through_slit = flux_inside_slit / flux_total
#breakpoint()
#ster_per_slit = (slit_wid) / 4.25e10 #Sterradians covered by the IGRINS slit.
#pixels_per_slit = 100 * (100 * slit_width_to_length_ratio)
arcsec_squared_per_pixel = (slit_length_arcsec * slit_width_arcsec) / (100.0 * (100 * slit_width_to_length_ratio))
ster_per_pixel = arcsec_squared_per_pixel / 4.25e10 #Sterradians per pixel
w = (100 * slit_width_to_length_ratio) #Pixels per slit
#breakpoint()
# combined_abs_flux_scale = magnitude_scale * (t_std / t_obj) * f_through_slit * (1/100.0) #* (ster_per_pixel / w)
#combined_abs_flux_scale = magnitude_scale * (t_std/t_obj) * (area_profiles/area_per_pixel) #Related to eqn. 10 in Lee & Pak (2006)
# combined_abs_flux_scale = magnitude_scale * f_through_slit * (t_std/t_obj) * (1.0 / pixels_per_slit) #Related to eqn. 10 in Lee & Pak (2006)
# #Apply absolute flux calibration to each order seperately
# for order in sci.orders:
# if (band == 'H' and order.wave[0] < 1.85) or (band == 'K' and order.wave[0] >= 1.85):
# order.flux *= combined_abs_flux_scale
# order.noise *= combined_abs_flux_scale
# if sci2d is not None:
# for order in sci2d.orders:
# if (band == 'H' and order.wave[0] < 1.85) or (band == 'K' and order.wave[0] >= 1.85):
# order.flux *= combined_abs_flux_scale
# order.noise *= combined_abs_flux_scale
# print('Band', band)
# print('combined_abs_flux_scale', combined_abs_flux_scale)
# print('f_through_slit', f_through_slit)
#Fit linear trend through slit throughput as function of wavelength and using fitting a line through two points
m = (f_through_slit_K - f_through_slit_H) / ((1/2.2) - (1/1.65))
b = f_through_slit_H - m*(1/1.65)
print('f_through_slit_K', f_through_slit_K)
print('f_through_slit_H', f_through_slit_H)
print('m', m)
print('b', b)
# print('combined_abs_flux_scale', combined_abs_flux_scale)
# print('f_through_slit', f_through_slit)
# combined_abs_flux_scale = magnitude_scale * (t_std / t_obj) * f_through_slit * (1/100.0) #* (ster_per_pixel / w)
for order in sci.orders:
f_through_slit = m*(1/order.wave) + b
if per_solid_angle: #units of erg s^-1 cm^-1 sr^-1
combined_abs_flux_scale = magnitude_scale * (t_std / t_obj) * f_through_slit * (1.0/(ster_per_pixel * w * 100))
else: #units of erg s^-1 cm^-1
combined_abs_flux_scale = magnitude_scale * (t_std / t_obj) * f_through_slit
order.flux *= combined_abs_flux_scale
order.noise *= combined_abs_flux_scale
if sci2d is not None:
for order in sci2d.orders:
f_through_slit = m*(1/order.wave) + b
if per_solid_angle: #units of erg s^-1 cm^-1 sr^-1
combined_abs_flux_scale = magnitude_scale * (t_std / t_obj) * f_through_slit * (1.0/(ster_per_pixel * w * 100))
else: #units of erg s^-1 cm^-1
combined_abs_flux_scale = magnitude_scale * (t_std / t_obj) * f_through_slit
order.flux *= combined_abs_flux_scale
order.noise *= combined_abs_flux_scale
#Function normalizes A0V standard star spectrum, for later telluric correction, or relative flux calibration
def telluric_and_flux_calib(sci, std, std_flattened, calibration=[], B=0.0, V=0.0, y_scale=1.0, y_power=1.0, y_sharpen=0., wave_smooth=0.0, delta_v=0.0, quality_cut = False, no_flux = False, savechecks=True, telluric_power=1.0, telluric_spectrum=[], std_shift=0.0, current_frame=''):
# #Read in Vega Data
std.combine_orders() #Combine orders for standard star specturm for later plotting
vega_file = pipeline_path + 'master_calib/A0V/vegallpr25.50000resam5' #Directory storing Vega standard spectrum #Set up reading in Vega spectrum
vega_wave, vega_flux, vega_cont = loadtxt(vega_file, unpack=True) #Read in Vega spectrum
vega_wave = (vega_wave / 1e3)*(1.0 + std_shift/c) #convert angstroms to microns and shift wavelengths if a velocity correction is given by the user
vega_flux = vega_flux * 1e3 #Convert per nm to per um for the flux
vega_cont = vega_cont * 1e3
interp_vega_flux = interp1d(vega_wave, vega_flux)
scale_vega_flux = vega_V_flambdla_zero_point / interp_vega_flux(V_band_effective_lambda)
print('venga zero point divided by model venga flux (scale_vega_flux) = ',scale_vega_flux)
#breakpoint()
############scale_vega_flux = 1.0 #Used only for testing
vega_flux *= scale_vega_flux #Scale vega flux to match V band zero point
vega_cont *= scale_vega_flux
waves = arange(1.4, 2.5, 0.000005) #Array to store HI lines
HI_line_profiles = ones(len(waves)) #Array to store synthetic (ie. scaled vega) H I lines
x = array([1.4, 1.5, 1.6, 1.62487, 1.66142, 1.7, 1.9, 2.0, 2.1, 2.2, 2.3, 2.4, 2.5]) #Coordinates tracing continuum of Vega, taken between H I lines in the model spectrum vegallpr25.50000resam5
y = array([2493670., 1950210., 1584670., 1512410., 1406170. , 1293900., 854857., 706839., 589023., 494054., 417965., 356822., 306391.]) * scale_vega_flux * 1e3
interpolate_vega_continuum = interp1d(x, y, kind='cubic', bounds_error=False) #Create interpolation object for Vega continuum defined by coordinates above
interpolated_vega_continuum = interpolate_vega_continuum(waves) #grab interpolated continuum once so dn't have to interpolate it again
#scale_vega_continuum = vega_V_flambdla_zero_point/interpolate_vega_continuum(V_band_effective_lambda)#Scale continuum estimate to match V band zero point
#interpolated_vega_continuum *= scale_vega_continuum
continuum_normalized_vega_flux = vega_flux / interpolate_vega_continuum(vega_wave) #Normalzie synthetic Vega spectrum by its own continuum
#interpolate_regular_vega_spectrum = interp1d(vega_wave, 1.0 + (continuum_normalized_vega_flux-1.0), bounds_error=False) #Divide out continnum and interpolate H I lines, allowing for the H I lines to scale by y_scale
if size(y_scale) == 2 or size(y_power) == 2 or size(y_sharpen) == 2 or size(wave_smooth) == 2: #if there are two sets of inputs for modifying the H I lines in the synthetic Vega spectrum, run twice and average the two together
if size(y_scale) == 1: y_scale = [y_scale, y_scale] #If there is only one input for these parameters, just make two indentical versions of it to run it twice easily
if size(y_power) == 1: y_power = [y_power, y_power]
if size(y_scale) == 1: y_sharpen = [y_sharpen, y_sharpen]
if size(y_scale) == 1: wave_smooth = [wave_smooth, wave_smooth]
if y_sharpen[0] > 0.: #If user specifies they want to sharpen the H I lines in the synthetic Vega spectrum, smaller sharp numbers will sharpen the lines more, best to start with a very large number
g = Gaussian1DKernel(stddev = y_sharpen[0]) #Set up gaussian smoothing for Vega I lines, here sharp = std deviation in pixels of gaussian used for smoothing
smooothed_interpolated_vega_lines = convolve(continuum_normalized_vega_flux, g) - 1.0 #Smooth the vega lines, subtract one to put continuum for the smoothed liens on the x axis
intepolate_vega_lines = interp1d(vega_wave, 1.0 + y_scale[0] * ((continuum_normalized_vega_flux-smooothed_interpolated_vega_lines)**y_power[0]-1.0), bounds_error=False) #Divide out continnum and interpolate H I lines, allowing for the H I lines to scale by y_scale
else: #Ignore sharpening and just use the old method (most common action taken)
intepolate_vega_lines = interp1d(vega_wave, 1.0 + y_scale[0] * (continuum_normalized_vega_flux**y_power[0]-1.0), bounds_error=False) #Divide out continnum and interpolate H I lines, allowing for the H I lines to scale by y_scale
interpolated_vega_lines_0 = intepolate_vega_lines(waves) #Grab interpoalted lines once
if y_sharpen[1] > 0.: #If user specifies they want to sharpen the H I lines in the synthetic Vega spectrum, smaller sharp numbers will sharpen the lines more, best to start with a very large number
g = Gaussian1DKernel(stddev = y_sharpen[1]) #Set up gaussian smoothing for Vega I lines, here sharp = std deviation in pixels of gaussian used for smoothing
smooothed_interpolated_vega_lines = convolve(continuum_normalized_vega_flux, g) - 1.0 #Smooth the vega lines, subtract one to put conhttp://www.u.arizona.edu/~kfkaplan/hpf/20180129/figures/0004.Slope-20180129T014837_R01.optimal.pngtinuum for the smoothed liens on the x axis
intepolate_vega_lines = interp1d(vega_wave, 1.0 + y_scale[1] * ((continuum_normalized_vega_flux-smooothed_interpolated_vega_lines)**y_power[1]-1.0), bounds_error=False) #Divide out continnum and interpolate H I lines, allowing for the H I lines to scale by y_scale
else: #Ignore sharpening and just use the old method (most common action taken)
intepolate_vega_lines = interp1d(vega_wave, 1.0 + y_scale[1] * (continuum_normalized_vega_flux**y_power[1]-1.0), bounds_error=False) #Divide out continnum and interpolate H I lines, allowing for the H I lines to scale by y_scale
interpolated_vega_lines_1 = intepolate_vega_lines(waves) #Grab interpoalted lines twice
interpolated_vega_lines = interpolated_vega_lines_0 + interpolated_vega_lines_1 - 1.0 #Average two sets of modified Vega H I lines together into one synthetic set of lines
else: #If there is only one set of inputs for modifying the Vega synetheic spectrum H I lines, just run the single input (This is generally the default you want to do, the thing above is an added complciation)
if y_sharpen > 0.: #If user specifies they want to sharpen the H I lines in the synthetic Vega spectrum, smaller sharp numbers will sharpen the lines more, best to start with a very large number
g = Gaussian1DKernel(stddev = y_sharpen) #Set up gaussian smoothing for Vega I lines, here sharp = std deviation in pixels of gaussian used for smoothing
smooothed_interpolated_vega_lines = convolve(continuum_normalized_vega_flux, g) - 1.0 #Smooth the vega lines, subtract one to put continuum for the smoothed liens on the x axis
intepolate_vega_lines = interp1d(vega_wave, 1.0 + y_scale * ((continuum_normalized_vega_flux-smooothed_interpolated_vega_lines)**y_power-1.0), bounds_error=False) #Divide out continnum and interpolate H I lines, allowing for the H I lines to scale by y_scale
else: #Ignore sharpening and just use the old method (most common action taken)
intepolate_vega_lines = interp1d(vega_wave, 1.0 + y_scale * (continuum_normalized_vega_flux**y_power-1.0), bounds_error=False) #Divide out continnum and interpolate H I lines, allowing for the H I lines to scale by y_scale
interpolated_vega_lines = intepolate_vega_lines(waves) #Grab interpoalted lines once
a0v_synth_cont = interp1d(waves, redden(B, V, waves, interpolated_vega_continuum), kind='linear', bounds_error=False) #Paint H I line profiles onto Vega continuum to create a synthetic A0V spectrum (not yet reddened)
#STOP
if wave_smooth > 0.: #If user specifies they want to gaussian smooth the synthetic pectrum
g = Gaussian1DKernel(stddev = wave_smooth) #Set up gaussian smoothing for Vega I lines, here wave_smooth = std deviation in pixels of gaussian used for smoothing
a0v_synth_spec = interp1d(waves, redden(B, V, waves, convolve(interpolated_vega_lines*interpolated_vega_continuum, g)), kind='linear', bounds_error=False) #Artifically redden synthetic A0V spectrum to match standard star observed
else: #If no smoothing
a0v_synth_spec = interp1d(waves, redden(B, V, waves, interpolated_vega_lines*interpolated_vega_continuum), kind='linear', bounds_error=False) #Artificially redden model Vega spectrum to match A0V star observed
#Onto calibrations...
num_dimensions = ndim(sci.orders[0].flux) #Store number of dimensions
if num_dimensions == 2: #If number of dimensions is 2D
slit_pixel_length = len(sci.orders[0].flux[:,0]) #Height of slit in pixels for this target and band
if savechecks: #If user specifies saving pdf check files
with PdfPages(save.path + 'check_flux_calib_'+current_frame+'.pdf') as pdf: #Load pdf backend for saving multipage pdfs
#Plot easy preview check of how well the H I lines are being corrected
clf() #Clear page first
expected_continuum = copy.deepcopy(std_flattened) #Create object to store the "expected continuum" which will end up being the average of each order's adjacent blaze functions from what the PLP thinks the blaze is for the standard star
g = Gaussian1DKernel(stddev=5.0) #Do a little bit of smoothing of the blaze functions
for i in range(2,std.n_orders-2): #Loop through each order
adjacent_orders = array([convolve(std.orders[i-1].flux/std_flattened.orders[i-1].flux, g), #Combine the order before and after the current order, while applying a small amount of smoothing
convolve(std.orders[i+1].flux/std_flattened.orders[i+1].flux, g),])
mean_order = nanmean(adjacent_orders, axis=0) #Smooth the before and after order blazes together to estimate what we think the continuum/blaze should be
expected_continuum.orders[i].flux = mean_order #Save the expected continuum
expected_continuum.combine_orders()#Combine all the orders in the expected continuum
HI_line_waves = [2.166120, 1.7366850, 1.6811111, 1.5884880] #Wavelengths of H I lines will be previewing
HI_line_labes = ['Br-gamma','Br-10','Br-11', 'Br-14'] #Names of H I lines we will be previewing
delta_wave = 0.012 # +/- wavelength range to plot on the xaxis of each line preview
n_HI_lines = len(HI_line_waves) #Count up how many H I lines we will be plotting
subplots(nrows=2, ncols=2) #Set up subplots
figtext(0.02,0.5,r"Flux", fontsize=20,rotation=90) #Set shared y-axis label
figtext(0.4,0.02,r"Wavelength [$\mu$m]", fontsize=20,rotation=0) #Set shared x-axis label
figtext(0.05,0.95,r"Check AOV H I line fits (y-scale: "+str(y_scale)+", y-power: "+str(y_power)+", y_sharpen: "+str(y_sharpen)+" wave_smooth: "+str(wave_smooth)+", std_shift: "+str(std_shift)+")", fontsize=12,rotation=0) #Shared title
waves = std.combospec.wave #Wavelength array to interpolate to
normalized_HI_lines = a0v_synth_cont(waves)/a0v_synth_spec(waves) #Get normalized lines to the wavelength array
for i in range(n_HI_lines): #Loop through each H I line we want to preview
subplot(2,2,i+1) #Set up current line's subplot
#tight_layout(pad=5) #Use tightlayout so things don't overlap
fig = gcf()#Adjust aspect ratio
fig.set_size_inches([15,10]) #Adjust aspect ratio
plot(std.combospec.wave, std.combospec.flux, label='H I Uncorrected', color='gray') #Plot raw A0V spectrum, no H I correction applied
plot(std.combospec.wave, std.combospec.flux*normalized_HI_lines, label='H I Corrected',color='black') #Plot raw A0V spectrum with H I correction applied
plot(expected_continuum.combospec.wave, expected_continuum.combospec.flux, label='Expected Continuum', color='blue') #Plot expected continuu, which the average of each order's adjacent A0V continnua
xlim(HI_line_waves[i]-delta_wave, HI_line_waves[i]+delta_wave) #Set x axis range
j = (std.combospec.wave > HI_line_waves[i]-delta_wave) & (std.combospec.wave < HI_line_waves[i]+delta_wave) #Find only pixels in window of x-axis range for automatically determining y axis range
max_flux = nanmax(std.combospec.flux[j]*normalized_HI_lines[j]) #Min y axis range
min_flux = nanmin(std.combospec.flux[j]*normalized_HI_lines[j]) #Max y axis range
ylim([0.9*min_flux,1.02*max_flux]) #Set y axis range
title(HI_line_labes[i]) #Set title
if i==n_HI_lines-1: #If last line is being plotted
legend(loc='lower right') #plot the legend
tight_layout(pad=4)
pdf.savefig() #Save plots showing how well the H I correciton (scaling H I lines from Vega) fits
clf() #Plot Vega model spectrum on second page
plot(vega_wave, vega_flux, '--', color='blue', label='Model Vega Spectrum') #Plot vega model
premake_a0v_synth_cont = a0v_synth_cont(waves) #Load interpolated synthetic A0V spectrum into memory
plot(waves,premake_a0v_synth_cont, color='black', label='Synethic A0V Continuum') #Plot synthetic A0V continuum
xlim([flat_nanmin(waves),flat_nanmax(waves)]) #Set limits on plot
ylim([0., flat_nanmax(premake_a0v_synth_cont)])
xlabel(r'Wavelength [$\mu$m]')
ylabel(r'Relative Flux')
title('Check A0V Reddening (B='+str(B)+', V='+str(V)+')')
legend(loc="upper right")
tight_layout()
pdf.savefig() #Save showing synthetic A0V spectrum that the data will be divided by to do relative flux calibration & telluric correction on second page of PDF
for i in range(std.n_orders): #Loop through and plot each order for the observed A0V, along with the corrected H I absorption to see how well the synthetic A0V spectrum fits
if quality_cut: #Generally we throw out bad pixels, but the user can turn this feature off by setting quality_cut = False
std.orders[i].flux[std_flattened.orders[i].flux <= .1] = nan #Mask out bad pixels
waves = std.orders[i].wave #Std wavelengths
std_flux = std.orders[i].flux #Std flux
if telluric_spectrum == []: #If user does not specifiy a telluric spectrum directly
telluric_flux = std_flattened.orders[i].flux #Use the flatteneed standard flux given by the PLP, used for scaling telluric lines
else: #But if the user does specify a telluric spectrum object
telluric_flux = telluric_spectrum.orders[i].flux #use that object given by the user instead
interpolated_a0v_synth_spec = a0v_synth_spec(waves) #Grab synthetic A0V spectrum across current order
if calibration != []: #If user specifies they are using their own calibration: WARNING FOR TESTING PURPOSES ONLY
relative_flux_calibration = calibration.orders[i].flux #Then use the calibration given by the user
else: #Or else use the default calibration
#relative_flux_calibration = (std_flux * (telluric_flux**(telluric_power-1.0))/ interpolated_a0v_synth_spec)
relative_flux_calibration = std_flux / interpolated_a0v_synth_spec
#s2n = 1.0/sqrt(sci.orders[i].s2n()**-2 + std.orders[i].s2n()**-2) #Error propogation after telluric correction, see https://wikis.utexas.edu/display/IGRINS/FAQ or http://chemwiki.ucdavis.edu/Analytical_Chemistry/Quantifying_Nature/Significant_Digits/Propagation_of_Error#Arithmetic_Error_Propagation
#s2n = 1.0/sqrt((1.0/sci.orders[i].s2n()**2) + (1.0/std.orders[i].s2n()**2)) #Error propogation after telluric correction, see https://wikis.utexas.edu/display/IGRINS/FAQ or http://chemwiki.ucdavis.edu/Analytical_Chemistry/Quantifying_Nature/Significant_Digits/Propagation_of_Error#Arithmetic_Error_Propagation
s2n = ((1.0/sci.orders[i].s2n()**2) + (1.0/std.orders[i].s2n()**2))**-0.5 #Error propogation after telluric correction, see https://wikis.utexas.edu/display/IGRINS/FAQ or http://chemwiki.ucdavis.edu/Analytical_Chemistry/Quantifying_Nature/Significant_Digits/Propagation_of_Error#Arithmetic_Error_Propagation
if not no_flux: #As long as user does not specify doing a flux calibration
sci.orders[i].flux /= relative_flux_calibration #Apply telluric correction and flux calibration
sci.orders[i].noise = sci.orders[i].flux / s2n #It's easiest to just work back the noise from S/N after calculating S/N, plus it is now properly scaled to match the (relative) flux calibrati
#Print estimated J,H,K magnitudes as a sanity check to compare to 2MASS
bands = ['J', 'H', 'Ks']
f0_lambda = array([3.129e-13, 1.133e-13, 4.283e-14]) * 1e7 #Convert units to from W cm^-2 um^-1 to erg s^-1 cm^-2 um^-1
x = arange(1.0, 3.0, 1e-6)
delta_lambda = abs(x[1]-x[0])
magnitude_scale = 10**(0.4*(0.03 - V)) #Scale flux by difference in V magnitude between standard star and Vega (V for vega = 0.03 in Simbad)
resampled_synthetic_spectrum = a0v_synth_spec(x) * magnitude_scale #* 4 * pi * vega_R_over_D_squared
for i in range(len(bands)):
tcurve_wave, tcurve_trans = loadtxt(path_to_pheonix_models + '/2MASS_transmission_curves/'+bands[i]+'.dat', unpack=True) #Read in 2MASS band filter transmission curve
#tcurve_trans[tcurve_trans < 0] = 0.0 #Zero out negative values
tcurve_interp = interp1d(tcurve_wave, tcurve_trans, kind='cubic', fill_value=0.0, bounds_error=False) #Create interp obj for the transmission curve
tcurve_resampled = tcurve_interp(x)
f_lambda = nansum(resampled_synthetic_spectrum * tcurve_resampled * x * delta_lambda) / nansum(tcurve_resampled * x * delta_lambda)
magnitude = -2.5 * log10(f_lambda / f0_lambda[i])# - (0.03 - V)
print('For band '+bands[i]+' the estimated magnitude is '+str(magnitude))
return(sci) #Return the spectrum object (1D or 2D) that is now flux calibrated and telluric corrected
#Class creates, stores, and displays lines as position velocity diagrams, one of the main tools for analysis
class position_velocity:
def __init__(self, input_spec1d, input_spec2d, line_list, make_1d=False, make_1d_y_range=[0,0], shift_lines='', velocity_range=set_velocity_range, velocity_res=set_velocity_res):
spec1d = copy.deepcopy(input_spec1d)
spec2d = copy.deepcopy(input_spec2d)
slit_pixel_length = shape(spec2d.flux)[0] #Height of slit in pixels for this target and band
wave_pixels = spec2d.wave #Extract 1D wavelength for each pixel
x = arange(len(wave_pixels)) + 1.0 #Number of pixels across detector
interp_velocity = arange(-velocity_range, velocity_range, velocity_res) #Velocity grid to interpolate each line onto
show_lines = line_list.parse(flat_nanmin(wave_pixels), flat_nanmax(wave_pixels)) #Only grab lines withen the wavelength range of the current order
n_lines = len(show_lines.wave) #Number of spectral lines
n_velocity = len(interp_velocity) #Number of velocity points
flux = empty([n_lines, n_velocity]) #Set up list of arrays to store 1D fluxes
var1d = empty([n_lines, n_velocity])
pv = empty([n_lines, slit_pixel_length, n_velocity])
var2d = empty([n_lines, slit_pixel_length, n_velocity])
if shift_lines != '': #If user wants to apply a correction in velocity space to a set of lines, use this file shift_lines
shift_labels = loadtxt(shift_lines, usecols=[0,], dtype=str, delimiter='\t') #Load line labels to ID each line
shift_v = loadtxt(shift_lines, usecols=[1,], dtype=float, delimiter='\t') #Load km/s to artifically doppler shift spectral lines
save_shift_wave = zeros(n_lines) #Create arrays to store shifted wavelengths and velocities
save_shift_v = zeros(n_lines)
for i in range(len(shift_labels)): #Go through each line and shift it's wavlength
find_line = show_lines.label == shift_labels[i]
if any(find_line): #Only run if a line is found
pre_shfited_wavelength = copy.deepcopy(show_lines.wave[find_line]) #Save pre shifted wavelength
show_lines.wave[find_line] = show_lines.wave[find_line] * (-(shift_v[i]/c)+1.0)#Artifically doppler shift the line
save_shift_v[find_line] = shift_v[i] #Save velocity shift
save_shift_wave[find_line] = show_lines.wave[find_line] - pre_shfited_wavelength #save wavelength shift
#print shift_labels[i]
for i in range(n_lines): #Label the lines
pv_velocity = c * ( (wave_pixels / show_lines.wave[i]) - 1.0 ) #Calculate velocity offset for each pixel from c*delta_wave / wave
pixel_cut = abs(pv_velocity) <= velocity_range #Find only pixels in the velocity range, this is for conserving flux
ungridded_wavelengths = wave_pixels[pixel_cut]
ungridded_velocities = pv_velocity[pixel_cut]
ungridded_flux_1d = spec1d.flux[pixel_cut] #PV diagram ungridded on origional pixels
ungridded_flux_2d = spec2d.flux[:,pixel_cut] #PV diagram ungridded on origional pixels
ungridded_variance_1d = spec1d.noise[pixel_cut]**2 #PV diagram variance ungridded on original pixesl
ungridded_variance_2d = spec2d.noise[:,pixel_cut]**2 #PV diagram variance ungridded on original pixels
interp_wave = interp1d(ungridded_velocities, ungridded_wavelengths, kind='linear', bounds_error=False) #Create interp obj for wavelengths
interp_flux_2d = interp1d(ungridded_velocities, ungridded_flux_2d, kind='linear', bounds_error=False) #Create interp obj for 2D flux
interp_variance_2d = interp1d(ungridded_velocities, ungridded_variance_2d, kind='linear', bounds_error=False) #Create interp obj for 2D variance
gridded_wavelengths = interp_wave(interp_velocity) #Get wavelengths as they appear on the velocity grid
dl_dv = (gridded_wavelengths[1:] - gridded_wavelengths[:len(gridded_wavelengths)-1]) / velocity_res #Calculate scale factor delta-lambda/delta-velocity for conserving flux when interpolating from the wavleength grid to velocity grid
dl_dv = hstack([dl_dv, dl_dv[len(dl_dv)-1]]) #Add an extra thing at the end of the delta-lambda/delta-velocity array so that it has an equal number of elements as everything else here
gridded_flux_2d = interp_flux_2d(interp_velocity) * dl_dv #PV diagram velocity gridded
gridded_variance_2d = interp_variance_2d(interp_velocity) * (dl_dv)**2 #PV diagram variance velocity gridded
if not make_1d: #By default use the 1D spectrum outputted by the pipeline, but....
gridded_flux_1d = interp(interp_velocity, ungridded_velocities, ungridded_flux_1d) * dl_dv
gridded_variance_1d = interp(interp_velocity, ungridded_velocities, ungridded_variance_1d) * (dl_dv)**2
elif make_1d_y_range[1] > 0: #... if user sets make_1d = True, then we will create our own 1D spectrum by collapsing the 2D spectrum
gridded_flux_1d = nansum(gridded_flux_2d[make_1d_y_range[0]:make_1d_y_range[1],:], 0) * dl_dv #Create 1D spectrum by collapsing 2D spectrum
gridded_variance_1d = nansum(gridded_variance_2d[make_1d_y_range[0]:make_1d_y_range[1],:], 0) * (dl_dv)**2 #Create 1D variance spectrum by collapsing 2D variance
else:
gridded_flux_1d = nansum(gridded_flux_2d, 0) * dl_dv #Create 1D spectrum by collapsing 2D spectrum
gridded_variance_1d = nansum(gridded_variance_2d, 0) * (dl_dv)**2 #Create 1D variance spectrum by collapsing 2D variance
badpix = (gridded_flux_1d==0.0) | (gridded_variance_1d==0.0)#nan out zeros
gridded_flux_1d[badpix] = nan
gridded_variance_1d[badpix] = nan
flux[i,:] = gridded_flux_1d# * scale_flux_1d #Append 1D flux array with line
var1d[i,:] = gridded_variance_1d# * scale_flux_1d #Append 1D variacne array with line
pv[i,:,:] = gridded_flux_2d# * scale_flux_2d #Stack PV spectrum of lines into a datacube
var2d[i,:,:] = gridded_variance_2d# * scale_variance_2d #Stack PV variance of lines into a datacube
if shift_lines != '': #If lines were shifted, save the velocities and wavlengths that were shifted in this PV object
self.shift_v = save_shift_v
self.shift_wave = save_shift_wave
self.flux = flux #Save 1D PV fluxes
self.var1d = var1d #Save 1D PV variances
self.pv = pv #Save datacube of stack of 2D PV diagrams for each line
self.var2d = var2d #Save 2D PV variance
self.velocity = interp_velocity #Save aray storing velocity grid all lines were interpolated onto
self.label = show_lines.label #Save line labels
self.lab_wave = show_lines.lab_wave #Save lab wavelengths for all the lines
self.wave = show_lines.wave #Save line wavelengths
self.n_lines = len(self.flux) #Count number of individual spectral lines lines stored in position velocity object
self.slit_pixel_length = slit_pixel_length #Store number of pixels along slit
self.velocity_range = velocity_range #Store velocity range of PV diagrams
self.velocity_res = velocity_res #Store velocity resoultion (km/s)
def view(self, line='', wave=0.0, pause = False, close = False, printlines=False, name='pv'): #Function loads 2D PV diagrams in DS9 and plots 1D diagrams
self.save_fits() #Save a fits file of the pv diagrams for opening in DS9
ds9.open() #Open DS9
ds9.show(save.path + name + '.fits', new = False) #Load PV diagrams into DS9
ds9.set('zoom to fit') #Zoom PV diagram to fit ds9 window
ds9.set('zoom 0.9') #Zoom out a little bit to see the coordinate grid
ds9.set('scale log') #Set view to log scale
ds9.set('scale Zscale') #Set scale limits to Zscale, looks okay
ds9.set('grid on') #Turn on coordinate grid to position velocity coordinates
ds9.set('grid type publication') #Set to the more aesthetically pleasing publication type grid
ds9.set('grid system wcs') #Set to position vs. velocity coordinates
ds9.set('grid axes type exterior') #Set grid axes to be exterior
ds9.set('grid axes style 1') #Set grid axes to be "pulbication" type
ds9.set('grid numerics type exterior') #Put numbers external to PV diagram
ds9.set('grid numerics color black') #Make numbers black for easier reading
if printlines: #If user sets printlines = True, list lines and their index in the command line
print('Lines are....')
for i in range(self.n_lines): #Loop through each line
print(i+1, self.label[i]) #Print index and label for line in terminal
if line != '': #If user specifies line name, find index of that line and dispaly it
self.goline(line)
if wave != 0.0: #If user specifies wavelength, find nearest wavelength for that line being specified and display it
self.gowave(wave)
#Pause for viewing if user specified
if pause:
wait()
#Close DS9 after viewing if user specified (pause should be true or else DS9 will open then close)
if close:
ds9.close()
def goline(self, line): #Function causes DS9 to display a specified line (PV diagram must be already loaded up using self.view()
try:
if line != '': #If user specifies line name, find index of that line
i = 1 + where(self.label == line)[0][0] #Find instance of line matching the provided name
self.display_line(i)
else: #If index not provided
print('ERROR: No line label specified')
except IndexError: #If line is unable to be found (ie. not in current band) catch and print the following error...
print('ERROR: Unable to find the specified line in this spectrum. Please try again.')
def gowave(self, wave): #Function causes DS9 to display a specified line (PV diagram must be already loaded up using self.view()
if wave != 0.0: #If user specifies line name, find index of that line
nearest_wave = abs(self.lab_wave - wave).min() #Grab nearest wavelength
i = 1 + where(abs(self.lab_wave-wave) == nearest_wave)[0][0] #Grab index for line with nearest wavelength
self.display_line(i)
else:
print('ERROR: No line wavelength specified')
def display_line(self, i): #Moves DS9 to display correct 2D PV diagram of line, and also displays 1D line
label_string = self.label[i-1]
wave_string = "%12.5f" % self.lab_wave[i-1]
title = label_string + ' ' + wave_string + ' $\mu$m'
ds9.set('cube '+str(i)) #Go to line in ds9 specified by user in
self.plot_1d_velocity(i-1, title = title)
def make_1D_postage_stamps(self, pdf_file_name): #Make a PDF showing all 1D lines in a single PDF file
with PdfPages(save.path + pdf_file_name) as pdf: #Make a multipage pd
for i in range(self.n_lines):
label_string = self.label[i]
wave_string = "%12.5f" % self.lab_wave[i]
title = label_string + ' ' + wave_string + ' $\mu$m'
self.plot_1d_velocity(i, title=title) #Make 1D plot postage stamp of line
pdf.savefig() #Save as a page in a PDF file
def make_2D_postage_stamps(self, pdf_file_name): #Make a PDF showing all 2D lines in a single PDF file
#figure(figsize=(2,1), frameon=False)
with PdfPages(save.path + pdf_file_name) as pdf: #Make a multipage pd
for i in range(self.n_lines):
label_string = self.label[i]
wave_string = "%12.5f" % self.lab_wave[i]
title = label_string + ' ' + wave_string + ' $\mu$m'
#self.plot_1d_velocity(i, title=title) #Make 1D plot postage stamp of line
frame = gca() #Turn off axis number labels
frame.axes.get_xaxis().set_ticks([]) #Turn off axis number labels
frame.axes.get_yaxis().set_ticks([]) #Turn off axis number labels
ax = subplot(111)
suptitle(title)
imshow(self.pv[i,:,:], cmap='gray')
pdf.savefig() #Save as a page in a PDF file
def plot_1d_velocity(self, line_index, title='', clear=True, fontsize=18, show_zero=True, show_x_label=True, show_y_label=True, uncertainity_color='red', y_max=0., scale_flux=1.0/1e3, uncertainity_line='solid'): #Plot 1D spectrum in velocity space (corrisponding to a PV Diagram), called when viewing a line
if clear: #Clear plot space, unless usser sets clear=False
clf() #Clear plot space
velocity = self.velocity
flux = self.flux[line_index] * scale_flux #Scale flux so numbers are not so big
noise = self.var1d[line_index]**0.5 * scale_flux
max_flux = nanmax(flux + noise, axis=0) #Find maximum flux in slice of spectrum
fill_between(velocity, flux - noise, flux + noise, facecolor = uncertainity_color, linestyle=uncertainity_line) #Fill in space between data and +/- 1 sigma uncertainity
plot(velocity, flux, color='black') #Plot 1D spectrum slice
#plot(velocity, flux + noise, ':', color='red') #Plot noise level for 1D spectrum slice
#plot(velocity, flux - noise, ':', color='red') #Plot noise level for 1D spectrum slice
if show_zero: #Normally show the zero point line, but if user does not want it, don't plot it
plot([0,0], [-0.2*max_flux, max_flux], '--', color='black') #Plot velocity zero point
xlim([-self.velocity_range, self.velocity_range]) #Set xrange to be +/- the velocity range set for the PV diagrams
if y_max == 0.: #If user specifies no maximum y scale
ylim([-0.20*max_flux, 1.2*max_flux]) #Set yrange automatically
else: #If user specifies a maximum y sclae
ylim([-0.10*y_max, y_max]) #Base y range on what the user set the y_max to be
if title != '': #Add title to plot showing line name, wavelength, etc.
suptitle(title, fontsize=20)
#if label != '' and wave > 0.0:
#title(label + ' ' + "%12.5f" % wave + '$\mu$m')
#elif label != '':
#title(label)
#elif wave > 0.0:
#title("%12.5f" % wave + '$\mu$m')
if show_x_label: #Let user specificy showing x axis
xlabel('Velocity [km s$^{-1}$]', fontsize=fontsize) #Label x axis
#if self.s2n:
# ylabel('S/N per resolution element (~3.3 pixels)', fontsize=18) #Label y axis as S/N for S/N spectrum
#else:
# ylabel('Relative Flux', fontsize=18) #Or just label y-axis as relative flux
if show_y_label:
ylabel('Relative Flux', fontsize=fontsize) #Or just label y-axis as relative flux
#draw()
#show()
def save_fits(self, name='pv', dim=2, type='flux'): #Save fits file of PV diagrams
if type == 'flux' and dim == 2: #If user specifies 2D flux (default)
pv_file = fits.PrimaryHDU(self.pv) #Set up fits file object to hold 2D flux
elif type == 'var' and dim == 2: #If user specifies 2D variance
pv_file = fits.PrimaryHDU(self.var2d) #Set up fits file object to hold 2D variance
elif type == 'flux' and dim == 1: #If user specifies 1D flux
pv_file = fits.PrimaryHDU(self.flux)
elif type == 'var' and dim == 1: #If user specifies 1D variance
pv_file = fits.PrimaryHDU(self.var1d)
else: #Report error
print('ERROR: Type '+ type + ' or dimension' + str(dim) + 'for saving fits file not correctly specified.')
#Add WCS for linear interpolated velocity
pv_file.header['CTYPE1'] = 'VRAD' #Set unit to "Optical velocity" (I know it's really NIR but whatever...)
pv_file.header['CRPIX1'] = (self.velocity_range / self.velocity_res) + 1 #Set zero point to where v=0 km/s (middle of stamp)
pv_file.header['CDELT1'] = self.velocity_res #Set zero point to where v=0 km/s (middle of stamp)
pv_file.header['CUNIT1'] = 'km/s' #Set label for x axis to be km/s
pv_file.header['CRVAL1'] = 1 #
if dim == 2:
pv_file.header['CTYPE2'] = 'PIXEL' #Set unit for slit length to something generic
pv_file.header['CRPIX2'] = 1 #Set zero point to 0 pixel for slit length
pv_file.header['CDELT2'] = 14.8 / self.slit_pixel_length #Set slit length to go from 0->1 so user knows what fraction from the bottom they are along the slit
pv_file.header['CUNIT2'] = 'arcsec'
pv_file.header['CRVAL2'] = 1
pv_file.writeto(save.path + name +'.fits', overwrite = True) #Save fits file
# s2n_file = fits.PrimaryHDU(self.s2n) #Set up fits file object
# #Add WCS for linear interpolated velocity
# s2n_file.header['CTYPE1'] = 'km/s' #Set unit to "Optical velocity" (I know it's really NIR but whatever...)
# s2n_file.header['CRPIX1'] = (self.velocity_range / self.velocity_res) + 1 #Set zero point to where v=0 km/s (middle of stamp)
# s2n_file.header['CDELT1'] = self.velocity_res #Set zero point to where v=0 km/s (middle of stamp)
# s2n_file.header['CUNIT1'] = 'km/s' #Set label for x axis to be km/s
# s2n_file.header['CTYPE2'] = 'Slit Position' #Set unit for slit length to something generic
# s2n_file.header['CRPIX2'] = 1 #Set zero point to 0 pixel for slit length
# s2n_file.header['CDELT2'] = 1.0 / self.slit_pixel_length #Set slit length to go from 0->1 so user knows what fraction from the bottom they are along the slit
# s2n_file.writeto(scratch_path + 'pv_s2n.fits', overwrite = True) #Save fits file
def save_var(self, name='pv_var2d'): #Save fits file of PV diagrams variance, OLD! USE def save_fits above, kept here for compatibility
pv_file = fits.PrimaryHDU(self.var2d) #Set up fits file object
#Add WCS for linear interpolated velocity
pv_file.header['CTYPE1'] = 'km/s' #Set unit to "Optical velocity" (I know it's really NIR but whatever...)
pv_file.header['CRPIX1'] = (self.velocity_range / self.velocity_res) + 1 #Set zero point to where v=0 km/s (middle of stamp)
pv_file.header['CDELT1'] = self.velocity_res #Set zero point to where v=0 km/s (middle of stamp)
pv_file.header['CUNIT1'] = 'km/s' #Set label for x axis to be km/s
pv_file.header['CTYPE2'] = 'Slit Position' #Set unit for slit length to something generic
pv_file.header['CRPIX2'] = 1 #Set zero point to 0 pixel for slit length
pv_file.header['CDELT2'] = 1.0 / self.slit_pixel_length #Set slit length to go from 0->1 so user knows what fraction from the bottom they are along the slit
pv_file.writeto(save.path + name + '.fits', overwrite = True) #Save fits file
def read_fits(self, filename='pv.fits', dim=2, type='flux'): #Read in a saved pv.fits (saved with safe_fits) file that has been modified externally and overwrite flux/variance variable in this object
input_data = fits.getdata(filename)
if type == 'flux' and dim == 2: #If user specifies 2D flux (default)
self.pv = input_data
elif type == 'var' and dim == 2: #If user specifies 2D variance
self.var2d = input_data
elif type == 'flux' and dim == 1: #If user specifies 1D flux
self.flux = input_data
elif type == 'var' and dim == 1: #If user specifies 1D variance
self.var1d = input_data
else: #Report error
print('ERROR: Type '+ type + ' or dimension' + str(dim) + 'for reading fits file not correctly specified.')
def getline(self, line): #Grabs PV diagram for a single line given a line label
i = where(self.label == line)[0][0] #Search for line by label
return self.pv[i] #Return line found
def getvariance(self,line): #Grabs PV diagram variasnce for a single line given a line label
i = where(self.label == line)[0][0] #Search for line by label
return self.var2d[i] #Return variance of line found
def getline1d(self, line): #Grabs 1D flux in velocity space for a single line given a line label
i = where(self.label == line)[0][0] #Search for line by label
return self.flux[i] #Return line found
def getvariance1d(self,line): #Grabs 1D variance in velocity space for a single line given a line label
i = where(self.label == line)[0][0] #Search for line by label
return self.var1d[i] #Return variance of line found
def ratio(self, numerator, denominator): #Returns PV diagram of a line ratio
return self.getline(numerator) / self.getline(denominator)
def normalize(self, line): #Normalize all PV diagrams by a single line
norm_flux_2d = self.getline(line) #Grab flux (in PV space) of line to normalize by
norm_var_2d = self.getvariance(line) #Grab variance (in PV space) of line to normalize by
norm_flux_1d = self.getline1d(line) #Grab 1D flux (in vel. space) of line to normalize by
norm_var_1d = self.getvariance1d(line) #Grab 1D variance (in vel. space) of line to normalize by
self.var2d = (self.pv/norm_flux_2d)**2 * ((self.var2d/self.pv**2) + (norm_var_2d/norm_flux_2d**2)) #Propogate uncertainity and store the new variance after normalizing to the chosen line
self.var1d = (self.flux/norm_flux_1d)**2 * ((self.var1d/self.flux**2) + (norm_var_1d/norm_flux_1d**2))
self.pv = self.pv/ norm_flux_2d #Noramlize all lines to the selected line in 2D PV space
self.flux = self.flux/ norm_flux_1d #Normalize all lines to the slected line in 1D velocity space
def basic_flux(self, x_range, y_range):
sum_along_x = nansum(self.pv[:, y_range[0]:y_range[1], x_range[0]:x_range[1]], axis=2) #Collapse along velocity space
total_sum = nansum(sum_along_x, axis=1) #Collapse along slit space
return(total_sum) #Return the integrated flux found for each line in the box defined by the user
def inspection(self): #Interactively loop through and view each each line to construct a pared down line list for a target for later reading inded
#ioff()# Turn off interactive plotting mode
self.view() #Load up DS9 and the 1D view of the PV diagrams
save_text = [] #Set up array for ascii text to save new pared down line list
for i in range(self.n_lines): #Loop through to read in each line
clf() #Clear figure for looking at 1D
self.goline(self.label[i]) #View this line
#show()
pause(0.001)
print('Line = ', self.label[i]) #Print info about line in command line
print('Wave = ', self.lab_wave[i])
answer = input('Include in list? (y/n) ') #Ask user if they want to include this line in the line list
if answer == 'Y' or answer == 'y':
save_text.append('%1.10f' % self.lab_wave[i] + '\t' + self.label[i])
print('DONE WITH LIST!')
output_filename = 'line_lists/' + input('Please give a filename for the line list: ')
savetxt(output_filename, save_text, fmt="%s") #Output line list
#ion() #Turn interactive plotting mode back on 0
def calculate_moments(self, vrange=[-set_velocity_range, set_velocity_range], prange=[0,0], s2n_cut=0.0, s2n_smooth=0.): #Calculate (mathematical) moments of the flux in velocity and position space; explicitely calculates moments 0, 1, 2 = flux, mean, variance
pv = copy.deepcopy(self.pv)
if s2n_cut > 0.:
#g = Gaussian2DKernel(stddev=s2n_smooth)
g = Gaussian2DKernel(s2n_smooth)
for i in range(len(pv)):
low_s2n_mask = convolve(pv[i], g) / self.var2d[i]**0.5 < s2n_cut
pv[i][low_s2n_mask] = nan
if prange[0] == 0 and prange[1] == 0: #If user does not specify prange explicitely
prange = [0, self.slit_pixel_length] #Set to use the whole slit by default
use_velocities = (self.velocity >= vrange[0]) & (self.velocity <= vrange[1]) #Find indicies within velocity range specified by the variable vrange and only at those pixels, masking everything outside that range out
position = arange(self.slit_pixel_length) #Set up an array for position along the slit (in pixel space, not in arcseconds)
velocity_flux = nansum(pv[:,:,use_velocities] * self.velocity_res, axis=2) #Calculate moment 0 (the flux) along the velocity axis
velocity_mean = nansum(pv[:,:,use_velocities] * self.velocity[use_velocities] * self.velocity_res, axis=2) / velocity_flux
velocity_variance =nansum(pv[:,:,use_velocities] * (self.velocity[newaxis,newaxis,use_velocities] - velocity_mean[:,:,newaxis])**2 * self.velocity_res, axis=2) / velocity_flux
position_flux = nansum(pv[:,prange[0]:prange[1],:], axis=1)
position_mean = nansum(pv[:,prange[0]:prange[1],:] * position[prange[0]:prange[1],newaxis], axis=1) / position_flux
position_variance = nansum(pv[:,prange[0]:prange[1],:] * (position[newaxis,prange[0]:prange[1],newaxis]-position_mean[:,newaxis,:])**2, axis=1) / position_flux
self.velocity_flux = velocity_flux #Store all the moments in the position_velocity object as these variables for later use
self.velocity_mean = velocity_mean
self.velocity_variance = velocity_variance
self.position_flux = position_flux
self.position_mean = position_mean
self.position_variance = position_variance
def create_moment_mask(self, sigma=1.0): #Create a mask +/- 3 sigma around the position and velocity moments, used for plotting positions of lines in a simplified way
velocity_moment_mask = zeros(shape(self.pv)) #Set up array that will hold masks
position_moment_mask = zeros(shape(self.pv)) #Set up array that will hold masks
combined_moment_mask = zeros(shape(self.pv)) #Set up array that will hold masks
position = arange(self.slit_pixel_length)
for i in range(len(self.pv)): #Loop through each line
for j in range(len(self.velocity)): #Loop through each position along the slit
three_sigma_range = (position < self.position_mean[i,j]+sigma*self.position_variance[i,j]**0.5) & (position > self.position_mean[i,j]-sigma*self.position_variance[i,j]**0.5) #Find +/- 3 sigma from mean
position_moment_mask[i,three_sigma_range,j] = 1.0 #Apply mask
combined_moment_mask[i,three_sigma_range,j] = 1.0 #Apply mask
for k in range(len(position)): #Loop through each velocity resoultion element
three_sigma_range = (self.velocity < self.velocity_mean[i,k]+sigma*self.velocity_variance[i,k]**0.5) & (self.velocity > self.velocity_mean[i,k]-sigma*self.velocity_variance[i,k]**0.5)#Find +/- 3 sigma from mean
velocity_moment_mask[i,k,three_sigma_range] = 1.0 #Apply mask
combined_moment_mask[i,k,three_sigma_range] = 1.0 #Apply mask
self.velocity_moment_mask = velocity_moment_mask #Store the moment masks
self.position_moment_mask = position_moment_mask
self.combined_moment_mask = combined_moment_mask
def fitmodel(self, fitter, model, slit_length=15.0): #for fitting 2D astropy models to a position_velocity object, outputs include model fit paramters, residuals, fluxes, and uncertainities in the fits
model_fits = [] #Array to store model fits
model_results = zeros(shape(self.pv))
#x = self.velocity
#y = arange(self.slit_pixel_length, dtype=float)/slit_length
x, y = meshgrid(self.velocity, arange(self.slit_pixel_length, dtype=float)/slit_length)
gaussian_2d_kernel_for_replacing_nans = Gaussian2DKernel(0.5)
for i in range(self.n_lines): #Loop through each line and attempt to fit the model
data = interpolate_replace_nans(self.pv[i,:,:], gaussian_2d_kernel_for_replacing_nans) #Fill all nans, or else the model fitting does not work (nans screw it up)
weights = interpolate_replace_nans(data/ self.var2d[i,:,:]**0.5, gaussian_2d_kernel_for_replacing_nans)
goodpix = isfinite(data) & isfinite(weights)
data[~goodpix] = 0.0 #Catch pixels that went bad anyway
try:
model_fit = fitter(model, x, y, data, weights=weights) #Fit the model
for j in range(10): #Iterate on the model fit a bit to improve the fit
model_fit = fitter(model_fit, x, y, data)
model_fits.append(model_fit) #Add results from the model fit to an array that stores the model fit for each line
model_results[i,:,:] = model_fit(x, y)
except:
model_fits.append(None)
print('WARNING: Line '+self.label[i]+' had a bad model fit. Moving on.')
self.model_fits = array(model_fits)
self.model_residuals = self.pv - model_results
self.model_results = model_results
self.model_flux = nansum(model_results, axis=0)
def print_fitmodel(self, pdffilename, percentile_interval=[2.0, 98.0]): #Create pdf of
pv_data = self.pv
pv_models = self.model_results
pv_residuals = self.model_residuals
line_labels = self.label
line_wave = self.lab_wave
with PdfPages(save.path + pdffilename) as pdf: #Load pdf backend for saving multipage pdfs
for i in range(self.n_lines): #Loop through each line and attempt to fit the model
gs = grd.GridSpec(3, 1)
ax=subplot(gs[0])
norm = ImageNormalize(pv_data[i,:,:], interval=AsymmetricPercentileInterval(percentile_interval[0], percentile_interval[1]), stretch=LogStretch())
imshow(pv_data[i,:,:], cmap='gray', interpolation='Nearest', origin='lower', norm=norm, aspect='auto') #Plot data
suptitle(line_labels[i] +' '+str(line_wave[i]))
colorbar()
ax=subplot(gs[1])
imshow(pv_models[i,:,:], cmap='gray', interpolation='Nearest', origin='lower', norm=norm, aspect='auto') #Plot model
colorbar()
ax=subplot(gs[2])
imshow(pv_residuals[i,:,:], cmap='gray', interpolation='Nearest', origin='lower', norm=norm, aspect='auto') #Plot residuals
colorbar()
pdf.savefig()
def get_fitmodel_attribute(self, attribute_strs, filter='', return_labels=True): #Returns an array of atributes from the astropy models fit with def modelfit
return_this = []
if return_labels:
labels = []
for i in range(self.n_lines):
if any(filter in self.label[i]):
labels.append(self.label[i])
return_this.append(array(labels))
if size(attribute_strs) == 1:
attribute_strs = [attribute_strs]
for attribute_str in attribute_strs:
attribute = []
for i in range(self.n_lines):
if any(filter in self.label[i] and self.model_fits[i] is not None):
attribute.append(getattr(self.model_fits[i], attribute_str).value)
return_this.append(attribute)
return return_this
def get_median_fitmodel_attribute(self, attribute_strs, filter=''):
n_attributes = len(attribute_strs)
median_attributes = zeros(n_attributes)
results = self.get_fitmodel_attribute(attribute_strs, filter, return_labels=False)
median_results = []
for i in range(n_attributes):
median_results.append(nanmedian(results[i]))
return median_results
# def get_moment(self, moment, line): #Specify desired moment and line and return the result
# if not hasattr(self, 'moments'): #Check if moments have been calculated yet
# print('Moments not yet calculated. Claculating now.')
# self.calculate_moments() #Calculate moments, if not done already
# i = where(self.label == line)[0][0] #Search for line by label
# return self.moments[moment, i, :] #Return moment
#@jit #Compile JIT using numba
def fit_mask(mask_contours, data, variance, pixel_range=[-10,10]): #Find optimal position (in velocity space) for mask for extracting
smoothed_data = median_filter(data, size=[5,5])
shift_pixels = arange(pixel_range[0], pixel_range[1]) #Set up array for rolling mask
s2n = zeros(shape(shift_pixels)) #Set up array to store S/N of each shift
for i in range(len(shift_pixels)):
shifted_mask_contours = roll(mask_contours, shift_pixels[i], 1) #Shift the mask contours by a certain number of pixels
shifted_mask = shifted_mask_contours == 1.0 #Create new maskf from shifted mask countours
flux = nansum(smoothed_data[shifted_mask]) - nanmedian(smoothed_data[~shifted_mask])*size(smoothed_data[shifted_mask]) #Calculate flux from shifted mask, do simple background subtraction
sigma = nansum(variance[shifted_mask])**0.5 #Calculate sigma from shifted_mask
s2n[i] = flux/sigma #Store S/N of mask in this position
if all(isnan(s2n)): #Check if everything in the s2n array is nan, if so this is a bad part of the spectrum
return 0 #so return a zero and move along
else: #Otherwise we got something decent so...
return shift_pixels[s2n == flat_nanmax(s2n)][0] #Return pixel shift that maximizes the s2n
#@jit #Compile JIT using numba
def fit_weights(weights, data, variance, pixel_range=[-10,10]): #Find optimal position for an optimal extraction
#median_smoothed_data = median_filter(data, [5,5])
#median_smoothed_variance = median_filter(variance, [5,5])
shift_pixels = arange(pixel_range[0], pixel_range[1]) #Set up array for rolling weights
s2n = zeros(shape(shift_pixels)) #Set up array to store S/N of each shift
#max_weight = nanmax(weights) #Find maximum of weights
#background_weight = max_weight * background_threshold_scale #Set weight below which will be used as background, typicall 1000x less than the peak signal
for i in range(len(shift_pixels)): #Loop through each position in velocity space to test the optimal extraction
shifted_weights = roll(weights, shift_pixels[i], 1) #Shift weights by some amount of km/s for searching for the optimal shift
background = nanmedian(data[shifted_weights == 0.0]) #Calculate typical background per pixel
flux = nansum((data-background)*shifted_weights) #Calcualte weighted flux
sigma = nansum(variance*shifted_weights**2)**0.5 #Calculate weighted sigma
#flux = nansum((median_smoothed_data-background)*shifted_weights) #Calcualte weighted flux
#sigma = sqrt( nansum(median_smoothed_variance*shifted_weights**2) ) #Calculate weighted sigma
if flux == 0. or sigma == 0.: #Divide by zero error catch
s2n[i] == 0.
else:
s2n[i] = flux / sigma
if all(isnan(s2n)): #Check if everything in the s2n array is nan, if so this is a bad part of the spectrum
return 0 #so return a zero and move along
else: #Otherwise we got something decent so...
return shift_pixels[s2n == flat_nanmax(s2n)][0] #Return pixel shift that maximizes the s2n
class region: #Class for reading in a DS9 region file, and applying it to a position_velocity object
def __init__(self, pv, name='flux', file='', background='', s2n_cut = -99.0, show_regions=True, s2n_mask = 0.0, line='', pixel_range=[-10,10],
savepdf=True, optimal_extraction=False, weight_threshold=1e-3, systematic_uncertainity=0.0):
path = save.path + name #Store the path to save files in so it can be passed around, eventually to H2 stuff
use_background_region = False
line_labels = pv.label #Read out line labels
line_wave = pv.lab_wave #Read out (lab) line wavelengths
mask_shift = zeros(len(line_wave)) #Array to store shift (in pixels) of mask for s2n mask fitting
pv_data = pv.pv #Holder for flux datacube
pv_variance = pv.var2d #holder for variance datacube
dv = pv.velocity[1]-pv.velocity[0] #delta-velocity
print('dv = ', dv)
#bad_data = pv_data < -10000.0 #Mask out bad pixels and cosmic rays that somehow made it through, commented out for now since it doesn't seem to help with anything
#pv_data[bad_data] = nan
#pv_variance[bad_data] = nan
pv_shape = shape(pv_data[0,:,:]) #Read out shape of a 2D slice of the pv diagram cube
n_lines = len(pv_data[:,0,0]) #Read out number of lines
velocity_range = [flat_nanmin(pv.velocity), flat_nanmax(pv.velocity)]
if file == '' and line == '': #If no region file is specified by the user, prompt user for the path to the region file
file = input('What is the name of the region file? ')
if background == '': #If no background region file is specified by the user, ask if user wants to specify region, and if so ask for path
answer = input('Do you want to designate a specific region to measure the median background (y) or just use the whole postage stamp (n)? ')
if answer == 'y':
print('Draw DS9 region around part(s) of line you want to measure the median background for and save it as a .reg file in the scratch directory.')
background == input('What is the name of the region file? ')
use_background_region = True
else:
use_background_region = False
if background == 'all': #If user specifies to use whole
use_background_region = False
if optimal_extraction: #If user specifies optimal extraction, we will weight each pixel by the signal of a bright line
line_for_weighting = line_labels == line #Find index of line to weight by
signal = copy.deepcopy(pv_data[line_for_weighting,:,:][0])-nanmedian(pv_data[line_for_weighting,:,:][0])
signal = median_filter(signal, size=[5,5]) #Median filter signal before calculating weights to get rid of noise spikes, cosmics rays, etc.
signal[signal < nanmax(signal) * weight_threshold] = 0. #Zero out pxiels below the background threshold scale
weights = signal**2.0 #Grab signal of line to weight by, this signal is what will be used for the optimal extraction
weights = weights / nansum(weights) #Normalize weights
#weights[weights < weight_threshold]
elif s2n_mask == 0.0: #If user specifies to use a region
on_region = pyregion.open(file) #Open region file for reading flux
on_mask = on_region.get_mask(shape = pv_shape) #Make mask around region file
else: #If user specifies to mask with a specific spectral line's S/N
line_for_masking = line_labels == line #Find index of line to weight by
s2n = pv_data[line_for_masking,:,:][0] / pv_variance[line_for_masking,:,:][0]**0.5
if any(isfinite(s2n)):
on_mask = s2n > s2n_mask #Set on mask to be where line is above some s2n threshold
if nansum(on_mask)>1:
off_mask = ~on_mask
#stop()
mask_contours = zeros(shape(s2n)) #Set up 2D array of 1s and 0s that store the mask, 0 = outside mask, 1 = inside mask
mask_contours[on_mask] = 1.0
if use_background_region: #If you want to use another region to designate the background, read it in here
off_region = pyregion.open(background) #Read in background region file
off_mask = off_region.get_mask(shape = pv_shape) #Set up mask
#figure(figsize=(4.0,3.0), frameon=False) #Set up figure check size
#if weight != '': #If user specifies a line to weight by
# g = Gaussian2DKernel(stddev=5) #Set up gaussian smoothing to get rid of any grainyness between pixels
# line_for_weighting = line_labels == weight #Find index of line to weight by
# weights = convolve(pv_data[line_for_weighting,:,:][0] / pv_variance[line_for_weighting,:,:][0], g) [on_mask] #Weight by the S/N ratio of that line
#
#else: #If user specifies no weighting shoiuld be used
# weights = ones(shape(pv_variance[0,:,:])) #Give everything inside the region equal weight
line_flux = zeros(n_lines) #Set up array to store line fluxes
line_s2n = zeros(n_lines) #Set up array to store line S/N, set = 0 if no variance is found
line_sigma = zeros(n_lines) #Set up array to store 1 sigma uncertainity
if s2n_mask > 0.0: #If user is using a s2n mask...
rolled_masks = zeros(shape(pv_data)) #Create array for storing rolled masks for later plotting, to save time computing the roll
elif optimal_extraction: #If user wants an optimal extraction
rolled_weights = zeros(shape(pv_data)) #Create array for storing rolled weights for later plotting, to save time computing the roll
for i in range(n_lines): #Loop through each line
if s2n_mask > 0.0: #If user specifies a s2n mask
#shift_mask_pixels = self.fit_mask(mask_contours, pv_data[i,:,:], pv_variance[i,:,:], pixel_range=pixel_range) #Try to find the best shift in velocity space to maximize S/N
shift_mask_pixels = fit_mask(mask_contours, pv_data[i,:,:], pv_variance[i,:,:], pixel_range=pixel_range) #Try to find the best shift in velocity space to maximize S/N
mask_shift[i] == shift_mask_pixels
try:
use_mask = roll(on_mask, shift_mask_pixels, 1) #Set mask to be shifted to maximize S/N
mask_shift[i] = shift_mask_pixels #Store how many pixels the mask has been shifted for later readout
rolled_masks[i,:,:] = use_mask #store rolled mask for later plotting
except:
stop()
elif optimal_extraction: #If user wantes to use optimal extraction
shift_weight_pixels = fit_weights(weights, pv_data[i,:,:], pv_variance[i,:,:], pixel_range=pixel_range)
mask_shift[i] = shift_weight_pixels
shifted_weights = roll(weights, shift_weight_pixels, 1) #Set mask to be shifted to maximize S/N
#print('SUM SHIFTED WEIGHTS = ', nansum(shifted_weights))
rolled_weights[i,:,:] = shifted_weights #store shifted weights for later plotting the contours of
else:
use_mask = on_mask
if "use_mask" in locals(): #If mask is valid run the code, otherwise ignore code to skip errors
on_data = pv_data[i,:,:][use_mask] #Find data inside the region for grabbing the flux
on_variance = pv_variance[i,:,:][use_mask]
if use_background_region: #If a backgorund region is specified
off_data = pv_data[i,:,:][~use_mask] #Find data in the background region for calculating the background
background = nanmedian(off_data) * size(on_data) #Calculate backgorund from median of data in region and multiply by area of region used for summing flux
#background = biweight_location(off_data, ignore_nan=True) * size(on_data) #Calculate backgorund from median of data in region and multiply by area of region used for summing flux
else: #If no background region is specified by the user, use the whole field
background = nanmedian(pv_data[i,:,:]) * size(on_data) #Get background from median of all data in field and multiply by area of region used for summing flux
#background = biweight_location(pv_data[i,:,:], ignore_nan=True) * size(on_data) #Get background from median of all data in field and multiply by area of region used for summing flux
line_flux[i] = (nansum(on_data) - background)*dv #Calculate flux from sum of pixels in region minus the background (which is the median of some region or the whole field, multiplied by the area of the flux region)
line_sigma[i] = (nansum(on_variance)*dv)**0.5 #Store 1 sigma uncertainity for line
line_s2n[i] = line_flux[i] / line_sigma[i] #Calculate the S/N in the region of the line
#print('i = ', i)
#print('nansum(on_data) = ', nansum(on_data))
#print('background = ', background)
elif optimal_extraction: #Okay if the user specifies to use optimal extraction now that we know how the weights have been shifted to maximize S/N
### Horne 1986 optimal extraction method, tests show it doesn't work so well as the weighting scheme below so it's commented out, left here if I wever want to revivie it
p = (shifted_weights)**0.5
p = p - nanmedian(p)
p[p < 0.] = 0.
p = p / nansum(p)
v = pv_variance[i,:,:]
f = pv_data[i,:,:]
s = nanmedian(f[p == 0.0])
m = ones(pv_shape)
sigma_clip = 7.5
sigma_clip_bad_pix = (f - s - nansum(f-s)*p)**2 > sigma_clip**2 * v
m[sigma_clip_bad_pix] = 0.
p_squared_divided_by_v = nansum(m * p**2 / v)
try:
line_flux[i] = nansum((m * p * (f-s)) / v) / p_squared_divided_by_v
line_sigma[i] = sqrt( nansum((m*p)) / p_squared_divided_by_v )
except:
line_flux[i] = nan
line_sigma[i] = nan
# ### Current version of the extraction, appears to work best
# background = nanmedian(pv_data[i,:,:][shifted_weights == 0.0]) #Find background from all pixels below the background thereshold
# weighted_data = (pv_data[i,:,:]-background) * shifted_weights #Extract the weighted data, while subtracting the background from each pixel
# weighted_variance = pv_variance[i,:,:] * shifted_weights**2 #And extract the weighted variance
# line_flux[i] = nansum(weighted_data)*dv #Calculate flux sum of weighted pixels
# line_sigma[i] = (nansum(weighted_variance) * dv)**0.5 #Store 1 sigma uncertainity for line
# line_s2n[i] = line_flux[i] / line_sigma[i] #Calculate the S/N in the region of the line
# New version of "optimal extraction" to use chisq minimization
# background = nanmedian(pv_data[i,:,:][shifted_weights == 0.0]) #Find background from all pixels below the background thereshold
# p = copy.deepcopy(shifted_weights**0.5)
# use_pix = shifted_weights != 0.0
# p[shifted_weights == 0.0] = 0.0
# mean_flux = nansum(p * (pv_data[i,:,:] - background) * dv) #/ nansum(p)
# mean_sigma = nansum(p**2 * pv_variance[i,:,:] * dv**2)**0.5 #/ nansum(p**2))**0.5
# line_flux[i] = mean_flux
# line_sigma[i] = mean_sigma
# line_s2n[i] = mean_flux / mean_sigma
if savepdf: #If user specifies to save a PDF of the PV diagram + flux results
with PdfPages(save.path + name + '.pdf') as pdf: #Make a multipage pdf
figure(figsize=[11.0,8.5])
for i in range(n_lines): #Loop through each line
#subplot(n_subfigs, n_subfigs, i+1)
clf() #Clear plot field
gs = grd.GridSpec(2, 1, wspace = 0.2, hspace=0.05, width_ratios=[1], height_ratios=[0.5,0.5]) #Set up a grid of stacked plots for putting the excitation diagrams on
subplots_adjust(hspace=0.05, left=0.08,right=0.96,bottom=0.08,top=0.93) #Set all plots to have no space between them vertically
#ax = subplot(211) #Turn on "ax", set first subplot
fig = gcf()#Adjust aspect ratio
fig.set_size_inches([11.0,8.5]) #Adjust aspect ratio
if line_s2n[i] > s2n_cut: #If line is above the set S/N threshold given by s2n_cut, plot it
ax=subplot(gs[0])
frame = gca() #Turn off axis number labels
frame.axes.get_xaxis().set_ticks([]) #Turn off axis number labels
frame.axes.get_yaxis().set_ticks([]) #Turn off axis number labels
#if not optimal_extraction: #if not optimal extraction just show the results
imshow(pv_data[i,:,:]+1e7, cmap='gray', interpolation='Nearest', origin='lower', norm=LogNorm(), aspect='auto') #Save preview of line and region(s)
suptitle('i = ' + str(i+1) + ', '+ line_labels[i] +' '+str(line_wave[i])+', Flux = ' + '%.3e' % line_flux[i] + ', $\sigma$ = ' + '%.3e' % line_sigma[i] + ', S/N = ' + '%.1f' % line_s2n[i] ,fontsize=14)
#ax[0].set_title('i = ' + str(i+1) + ', '+ line_labels[i] +' '+str(line_wave[i])+', Flux = ' + '%.3e' % line_flux[i] + ', S/N = ' + '%.1f' % line_s2n[i])
#xlabel('Velocity [km s$^{-1}$]')
#ylabel('Along slit')
ylabel('Position', fontsize=12)
#xlabel('Velocity [km s$^{-1}$]')
if show_regions and s2n_mask == 0.0 and not optimal_extraction: #By default show the
on_patch_list, on_text_list = on_region.get_mpl_patches_texts() #Do some stuff
for p in on_patch_list: #Display DS9 regions in matplotlib
try:
ax.add_patch(p)
except:
print('Glitch plotting pyregion. Weird.')
for t in on_text_list:
ax.add_artist(t)
if use_background_region:
off_patch_list, off_text_list = off_region.get_mpl_patches_texts() #Do some stuff
for p in off_patch_list:
ax.add_patch(p)
for t in off_text_list:
ax.add_artist(t)
if s2n_mask > 0.: #Plot s2n mask if user sets it
try:
#contour(roll(mask_contours, line_shift[i], 1))
contour(rolled_masks[i,:,:])
except:
stop()
elif optimal_extraction: #Plot weight contours if user specifies using optimal extraction
#try:
contour(rolled_weights[i,:,:]**0.5, linewidths=0.5) #Plot weight contours
background_mask = rolled_weights[i,:,:] == 0.0 #Find pixels used for background
find_background = ones(shape(rolled_weights[i,:,:])) #Set up array to store 1 where backgorund is and 0 where it is not
find_background[background_mask] = 0.0 #Set background found to 1 for plotting below
contour(find_background, colors='red', linewidths=0.25) #Plot the backgorund with a dotted line
#stop()
#except:
# stop
#ax = subplot(212) #Turn on "ax", set first subplot
ax = subplot(gs[1])
pv.plot_1d_velocity(i, clear=False, fontsize=10) #Test plotting 1D spectrum below 2D spectrum
# print('SAVING PLOT OF ', line_labels[i])
pdf.savefig() #Add figure as a page in the pdf
#figure(figsize=(11, 8.5), frameon=False) #Reset figure size
if systematic_uncertainity > 0.: #If user specifies some fractional systematic uncertainity
line_sigma = (line_sigma**2 + (line_flux*systematic_uncertainity)**2)**0.5 #Combine the statistical uncertainity with the systematic uncertainity
line_s2n = line_flux / line_sigma #And then recalculate the S/N based on the new value
self.wave = line_wave #Save wavelength of lines
self.label = line_labels #Save labels of lines
self.flux = line_flux #Save line fluxes
self.s2n = line_s2n #Save line S/N
self.sigma = line_sigma #Save the 1 sigma limit
if hasattr(pv, 'shift_v'): #Check if the pv object has a stored shift_v variable
self.shift_v = pv.shift_v #Carry over velocity shifts from position_velocity (if they exist) into the region object for later tabulation
self.shift_wave = pv.shift_wave #Carry over wavelength shifts from position_velocity (if they exist) into the region object for later tabulation
if s2n_mask: #If user uses masked equal weighted extraction save the following....
self.mask_contours = mask_contours #store mask contours for later inspection or plotting if needed (for making advnaced 2D figures in papers)
self.mask_shift = mask_shift #Store mask shift (in pixels) to later recall what the S/N maximization routine found
elif optimal_extraction: #else if the user uses optimal extraction save the following
self.weights = weights #Store weights used in extraction
self.rolled_weights = rolled_weights #Store pixel shifts in weights used for extractionx
self.mask_shift = mask_shift
elif s2n_mask == 0.0 and not optimal_extraction: #If user specified region in DS0 is used
self.on_region = on_region #Store the region data for later plotting if necessary
self.path = path #Save path to
# def fit_mask(self, mask_contours, data, variance, pixel_range=[-10,10]): #Find optimal position (in velocity space) for mask for extracting
# smoothed_data = median_filter(data, size=[5,5])
# shift_pixels = arange(pixel_range[0], pixel_range[1]) #Set up array for rolling mask
# s2n = zeros(shape(shift_pixels)) #Set up array to store S/N of each shift
# for i in range(len(shift_pixels)):
# shifted_mask_contours = roll(mask_contours, shift_pixels[i], 1) #Shift the mask contours by a certain number of pixels
# shifted_mask = shifted_mask_contours == 1.0 #Create new mask from shifted mask countours
# flux = nansum(smoothed_data[shifted_mask]) - nanmedian(smoothed_data[~shifted_mask])*size(smoothed_data[shifted_mask]) #Calculate flux from shifted mask, do simple background subtraction
# sigma = sqrt( nansum(variance[shifted_mask]) ) #Calculate sigma from shifted_mask
# s2n[i] = flux/sigma #Store S/N of mask in this position
# if all(isnan(s2n)): #Check if everything in the s2n array is nan, if so this is a bad part of the spectrum
# return 0 #so return a zero and move along
# else: #Otherwise we got something decent so...
# return shift_pixels[s2n == nanmax(s2n)][0] #Return pixel shift that maximizes the s2n
def make_latex_table(self, output_filename, s2n_cut = 3.0, normalize_to='5-3 O(3)'): #Make latex table of line fluxes
lines = []
#lines.append(r"\begin{table}") #Set up table header
lines.append(r"\begin{longtable}{rrlrr}")
lines.append(r"\caption{Line Fluxes}{} \label{tab:fluxes} \\")
#lines.append("\begin{scriptsize}")
#lines.append(r"\begin{tabular}{cccc}")
lines.append(r"\hline")
lines.append(r"$\lambda_{\mbox{\tiny vacuum}}$ & $\Delta\lambda$ & Line ID & $\log_{10} \left(F_i / F_{\mbox{\tiny "+normalize_to+r"}}\right)$ & S/N \\")
lines.append(r"\hline\hline")
lines.append(r"\endfirsthead")
lines.append(r"\hline")
lines.append(r"$\lambda_{\mbox{\tiny vacuum}}$ & $\Delta\lambda$ & Line ID & $\log_{10} \left(F_i / F_{\mbox{\tiny "+normalize_to+r"}}\right)$ & S/N \\")
lines.append(r"\hline\hline")
lines.append(r"\endhead")
lines.append(r"\hline")
lines.append(r"\endfoot")
lines.append(r"\hline")
lines.append(r"\endlastfoot")
flux_norm_to = self.flux[self.label == normalize_to]
for i in range(len(self.label)):
if self.s2n[i] > s2n_cut:
lines.append(r"%1.6f" % self.wave[i] + " & " + "%1.6f" % self.shift_wave[i] + " & " + self.label[i] + " & $" + "%1.2f" % log10(self.flux[i]/flux_norm_to) +
r"^{+%1.2f" % (-log10(self.flux[i]/flux_norm_to) + log10(self.flux[i]/flux_norm_to+self.sigma[i]/flux_norm_to))
+r"}_{%1.2f" % (-log10(self.flux[i]/flux_norm_to) + log10(self.flux[i]/flux_norm_to-self.sigma[i]/flux_norm_to)) +r"} $ & %1.1f" % self.s2n[i] + r" \\")
#lines.append(r"\hline\hline")
#lines.append(r"\end{tabular}")
lines.append(r"\end{longtable}")
#lines.append(r"\end{table}")
savetxt(output_filename, lines, fmt="%s") #Output table
def normalize(self, normalize_to): #Normalize all line fluxes by dividing by this number
self.flux = self.flux / normalize_to
self.sigma = self.sigma / normalize_to
def save_table(self, output_filename, s2n_cut = 3.0): #Output simple text table of wavelength, line label, flux
lines = []
lines.append('#Label\tWave [um]\tFlux\t1 sigma uncertainity')
for i in range(len(self.label)):
lines.append(self.label[i] + "\t%1.5f" % self.wave[i] + "\t%1.3e" % self.flux[i] + "\t%1.3e" % self.sigma[i])
savetxt(save.path + output_filename, lines, fmt="%s") #Output Table
#~~~~~~~save line ratios~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def save_ratios(self, to_line='', factor=1.0):
if to_line == '': #If user does not specify line to take ratio relative to
to_line = self.label[0] #Take ratio relative to the first line in the
ratios = factor * self.flux / self.flux[self.label == to_line] #Take ratios, multiply by some factor if comparing to some other table (ie. compared to H-beta as found in Osterbrock & Ferland 2006)
ratios_sigma = (ratios**2 *((self.sigma/self.flux)**2+(self.sigma[self.label == to_line]/self.flux[self.label == to_line])**2))**0.5
fname = save.path + 'line_ratios_relative_to_' + to_line + '.dat' #Set up file path name to save
printme = [] #Array that will hold file ouptut
for i in range(len(self.label)): #Loop thorugh each line
printme.append(self.label[i] + '/'+ to_line + '\t%1.5f' % ratios[i] + '\t%1.5f' % ratios_sigma[i]) #Save ratio to an array that will be outputted as the .dat file
savetxt(fname, printme, fmt="%s") #Output Table, and we're done
# def calculate_HI_extinction(self, s2n_cut=3.0, max_n=16): #Calculate extinction by comparing measured H I line fluxes to theory predicted by pyneb
# data_file = 'data/predicted_HI_fluxes.dat' #File storing predicted H I line fluxes
# state_i, state_j = loadtxt(data_file, unpack=True, dtype='i', delimiter='\t', usecols=(0,1,)) #read in states
# wave, flux = loadtxt(data_file, unpack=True, dtype='f', delimiter='\t', usecols=(2,3,)) #Read in wavelength and predicted fluxes
# #construct a list of all found H I lines
# found_wavelengths = []
# found_observed_fluxes = []
# found_predicted_fluxes = []
# found_labels = []
# #loop through each possible H I line in our predicted flux list and see if they exist in this region object, if they do, append the founds lists
# for i in range(len(state_is[] # label = 'H I '+str(state_j[i])+'-'+str(state_i[i]) #construct a string to match the line label strings in the H I line list
# find_line = where(self.label == label)[0] #Check if line exists in this region object
# if len(find_line)==1 and state_i[i] < max_n and state_j[i] < max_n: #If it is found
# find_line = find_line[0] #Get found line index out of array and into a simple integer
# if self.s2n[find_line] > s2n_cut:
# found_labels.append(label)
# found_wavelengths.append(self.wave[find_line]) #Store waveneghts...
# found_observed_fluxes.append(self.flux[find_line]) #observed fluxes...
# found_predicted_fluxes.append(flux[i]) #and predicted fluxes for every H I line found
# found_wavelengths = array(found_wavelengths) #Convert everything to numpy arrays for easy processing
# found_observed_fluxes = array(found_observed_fluxes)
# found_predicted_fluxes = array(found_predicted_fluxes)
# #Now wee construct the extinction curve
# A_lambda = array([ 0.482, 0.282, 0.175, 0.112, 0.058]) #(A_lambda / A_V) extinction curve from Rieke & Lebofsky (1985) Table 3
# l = array([ 0.806, 1.22 , 1.63 , 2.19 , 3.45 ]) #Wavelengths for extinction curve from Rieke & Lebofsky (1985)
# extinction_curve = interp1d(l, A_lambda, kind='quadratic') #Create interpolation object for extinction curve from Rieke & Lebofsky (1985)
# AVs = arange(0,20.0,0.1) #Create array of exctinations to test
# n_AVs = len(AVs)
# chisq = zeros(n_AVs)
# br14_brgamma_chisq = zeros(n_AVS)
# br14 = found_labels == 'H I 4-14'
# brgamma = found_labels == 'H I 4-7'
# for i in range(n_AVs):
# reddened_predicted_flux = found_predicted_fluxes * 10**(-0.4*extinction_curve(found_wavelengths)*AVs[i]) #Apply artificial def to predicted line fluxes
# ratio = found_observed_fluxes / reddened_predicted_flux #Calculate ratio to observed to artificially reddened predicted line fluxes
# median_ratio = nanmedian(ratio) #Find the median ratio
# chisq[i] = nansum(log10(ratio/median_ratio)**2) #calculate the chisq'
# chisq_br14_brgamma = nansum(log10((ratio[])/))
# print 'Best A_V for all lines used (minus cuts) = ', AVs[chisq==nanmin(chisq)]
# #return AVs[chisq==nanmin(chisq)] #return the AV that matches the minimum chisq
def calculate_HI_extinction(self, plot_result=True): #Calculate extinction by comparing measured H I line fluxes to theory predicted by pyneb
data_file = 'data/predicted_HI_fluxes.dat' #File storing predicted H I line fluxes
state_i, state_j = loadtxt(data_file, unpack=True, dtype='i', delimiter='\t', usecols=(0,1,)) #read in states
wave, flux = loadtxt(data_file, unpack=True, dtype='f', delimiter='\t', usecols=(2,3,)) #Read in wavelength and predicted fluxes
#construct a list of all found H I lines
predicted_br14_flux = (flux[(state_i==14) & (state_j==4)])[0]
predicted_brgamma_flux = (flux[(state_i==7) & (state_j==4)])[0]
#Now wee construct the extinction curve
A_lambda = array([ 0.482, 0.282, 0.175, 0.112, 0.058]) #(A_lambda / A_V) extinction curve from Rieke & Lebofsky (1985) Table 3
l = array([ 0.806, 1.22 , 1.63 , 2.19 , 3.45 ]) #Wavelengths for extinction curve from Rieke & Lebofsky (1985)
extinction_curve = interp1d(l, A_lambda, kind='quadratic') #Create interpolation object for extinction curve from Rieke & Lebofsky (1985)
AVs = arange(0,20.0,0.01) #Create array of exctinations to test
n_AVs = len(AVs)
chisq = zeros(n_AVs)
observed_br14_brgamma_ratio = (self.flux[self.label=='H I 4-14'] / self.flux[self.label=='H I 4-7'])[0]
br14_extinction_curve = extinction_curve(1.5884880)
brgamma_extinction_curve = extinction_curve(2.166120)
for i in range(n_AVs):
reddened_predicted_br_14_flux = predicted_br14_flux * 10**(-0.4*br14_extinction_curve*AVs[i]) #Apply artificial reddening to predicted line fluxes
reddened_predcited_br_gamma_flux = predicted_brgamma_flux * 10**(-0.4*brgamma_extinction_curve*AVs[i]) #Apply artificial reddening to predicted line fluxes
reddened_predicted_ratio = reddened_predicted_br_14_flux / reddened_predcited_br_gamma_flux #Calculate ratio to observed to artificially reddened predicted line fluxes
chisq[i] = (observed_br14_brgamma_ratio-reddened_predicted_ratio)**2 #calculate the chisq'
best_AV = AVs[chisq==nanmin(chisq)]
print('AV = ', best_AV) #print the AV that matches the minimum chisq
if plot_result:
found_wavelengths = []
found_observed_fluxes = []
found_predicted_fluxes = []
found_labels = []
#loop through each possible H I line in our predicted flux list and see if they exist in this region object, if they do, append the founds lists
for i in range(len(state_i)):
label = 'H I '+str(state_j[i])+'-'+str(state_i[i]) #construct a string to match the line label strings in the H I line list
find_line = where(self.label == label)[0] #Check if line exists in this region object
if len(find_line)==1 : #If it is found
find_line = find_line[0] #Get found line index out of array and into a simple integer
found_labels.append(label)
found_wavelengths.append(self.wave[find_line]) #Store waveneghts...
found_observed_fluxes.append(self.flux[find_line]) #observed fluxes...
found_predicted_fluxes.append(flux[i]) #and predicted fluxes for every H I line found
found_wavelengths = array(found_wavelengths) #Convert everything to numpy arrays for easy processing
found_observed_fluxes = array(found_observed_fluxes)
found_predicted_fluxes = array(found_predicted_fluxes)
clf()
plot(found_wavelengths, found_observed_fluxes / found_predicted_fluxes, 'o', label='Reddening Uncorrected')
plot(found_wavelengths, found_observed_fluxes * 10**(0.4*extinction_curve(found_wavelengths)*best_AV) / found_predicted_fluxes, 'o', label='Reddening Corrected')
xlabel('Wavelength')
ylabel('Dereddend fluxes / predicted fluxes')
suptitle('Br-14/Br-$\gamma$ A$_V$ = '+str(best_AV))
def combine_regions(region_A, region_B, name='combined_region'): #Definition to combine two regions by adding their fluxes and variances together
combined_region = copy.deepcopy(region_A) #Start by created the combined region
combined_region.flux += region_B.flux #Add fluxes together
combined_region.sigma = (combined_region.sigma**2 + region_B.sigma**2)**0.5 #Add uncertianity in quadrature
combined_region.s2n = combined_region.flux / combined_region.sigma #Recalculate new S/N
combined_region.path = save.path + name #Store the path to save files in so it can be passed around, eventually to H2 stuff
return(combined_region) #Returned combined region
class extract: #Class for extracting fluxes in 1D from a position_velocity object
def __init__(self, pv, name='flux_1d', file='', background=True, s2n_cut = -99.0, vrange=[0,0], use2d=False, show_extraction=True, systematic_uncertainity=0.0):
path = save.path + name #Store the path to save files in so it can be passed around, eventually to H2 stuff
line_labels = pv.label #Read out line labels
line_wave = pv.lab_wave #Read out (lab) line wavelengths
if use2d: #By default use the 1D spectrum (for ABBA observations), but use 2D for extended objects
flux = nansum(pv.pv, 1) #Collapse flux along slit
var = nansum(pv.var2d, 1)#Collapse variance along slit
else:
flux = pv.flux #Holder for flux datacube
var = pv.var1d #holder for variance datacube
velocity = pv.velocity
dv = velocity[1]-velocity[0] #Chunk of velocity space
#bad_data = pv_data < -10000.0 #Mask out bad pixels and cosmic rays that somehow made it through
#pv_data[bad_data] == nan
#pv_variance[bad_data] == nan
#pv_shape = shape(pv_data[0,:,:]) #Read out shape of a 2D slice of the pv diagram cube
n_lines = pv.n_lines #Read out number of lines
if range == [0,0]: #If user does not specify velocity range, ask for it from user
low_range = float(input('What is blue velocity limit? '))
high_range = float(input('What is red velocity limit? '))
on_target = (velocity > vrange[0]) & (velocity < vrange[1]) #Find points inside user chosen velocity range
off_target = ~on_target #Find points outside user chosen velocity range
#figure(figsize=(4.0,3.0), frameon=False) #Set up figure check size
figure(0)
with PdfPages(save.path + name + '.pdf') as pdf: #Make a multipage pdf
line_flux = zeros(n_lines) #Set up array to store line fluxes
line_s2n = zeros(n_lines) #Set up array to store line S/N, set = 0 if no variance is found
line_sigma = zeros(n_lines) #Set up array to store 1 sigma uncertainity
for i in range(n_lines): #Loop through each line
clf() #Clear plot field
data = flux[i]
variance = var[i]
if background: #If user
background_level = nanmedian(data[off_target]) #Calculate level (per pixel) of the background level
else: #If no background region is specified by the user, use the whole field
background_level = 0.0 #Or if you don't want to subtract the background, just make the level per pixel = 0
line_flux[i] = (nansum(data[on_target]) - background_level * size(data[on_target]))*dv #Calculate flux from sum of pixels in region minus the background (which is the median of some region or the whole field, multiplied by the area of the flux region)
line_sigma[i] = (nansum(variance[on_target])*dv)**0.5 #Store 1 sigma uncertainity for line
line_s2n[i] = line_flux[i] / line_sigma[i] #Calculate the S/N in the region of the line
if line_s2n[i] > s2n_cut: #If line is above the set S/N threshold given by s2n_cut, plot it
suptitle('i = ' + str(i+1) + ', '+ line_labels[i] +' '+str(line_wave[i])+', Flux = ' + '%.3e' % line_flux[i] + ', $\sigma$ = ' + '%.3e' % line_sigma[i] + ', S/N = ' + '%.1f' % line_s2n[i] ,fontsize=10)
pv.plot_1d_velocity(i, clear=False, fontsize=16, show_zero=show_extraction) #Test plotting 1D spectrum below 2D spectrum
if show_extraction: #By default, the extraction velocity limits and background level are shown. If user sets show_extraction = False, these are not shown
plot([vrange[0], vrange[0]], [-1e50,1e50], linestyle='--', color = 'blue') #Plot blueshifted velocity limits
plot([vrange[1], vrange[1]], [-1e50,1e50], linestyle='--', color = 'blue') #Plot redshifted velocity limits
plot([flat_nanmin(velocity), flat_nanmax(velocity)], [background_level/1e3, background_level/1e3], linestyle='--', color = 'blue') #Plot background level
#print("background_level = ",background_level)
pdf.savefig() #Add figure as a page in the pdf
#dfigure(figsize=(11, 8.5), frameon=False) #Reset figure size
if systematic_uncertainity > 0.: #If user specifies some fractional systematic uncertainity
line_sigma = (line_sigma**2 + (line_flux*systematic_uncertainity)**2)**0.5 #Combine the statistical uncertainity with the systematic uncertainity
line_s2n = line_flux / line_sigma
self.velocity = velocity #Save velocity grid
self.wave = line_wave #Save wavelength of lines
self.label = line_labels #Save labels of lines
self.flux = line_flux #Save line fluxes
self.s2n = line_s2n #Save line S/N
self.sigma = line_sigma #Save the 1 sigma limit
self.path = path #Save path to
def normalize(self, normalize_to): #Normalize all line fluxes by dividing by this number
self.flux = self.flux / normalize_to
self.sigma = self.sigma / normalize_to
def save_table(self, output_filename, s2n_cut = 3.0): #Output simple text table of wavelength, line label, flux
lines = []
lines.append('#Label\tWave [um]\tFlux\t1 sigma uncertainity')
for i in range(len(self.label)):
lines.append(self.label[i] + "\t%1.5f" % self.wave[i] + "\t%1.3e" % self.flux[i] + "\t%1.3e" % self.sigma[i])
savetxt(save.path + output_filename, lines, fmt="%s") #Output Table
#~~~~~~~~~~~~~~~~~~~~~~~~~Code for reading in analyzing spectral data~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~j
#Convenience function for making a single spectrum object in 1D or 2D that combines both H & K bands while applying telluric correction and flux calibration
#The idea is that the user can call a single line and get a single spectrum ready to go
def getspec(date, waveno, frameno, stdno, oh=0, oh_scale=0.0, oh_flexure=0., std_flexure=0., B=0.0, V=0.0, y_scale=1.0, wave_smooth=0.0, y_power=1.0, y_sharpen=0.0,
twodim=True, usestd=True, no_flux=False, make_1d=False, median_1d=False, tellurics=False, savechecks=True, mask_cosmics=False,
telluric_power=1.0, telluric_spectrum=[], calibration=[], telluric_quality_cut=False, interpolate_slit=False, std_shift=0.0,
phoenix_model=''):
if usestd or tellurics:
#Make 1D spectrum object for standard star
H_std_obj = makespec(date, 'H', waveno, stdno) #Read in H-band
K_std_obj = makespec(date, 'K', waveno, stdno) #Read in H-band
std_obj = H_std_obj #Create master object=
std_obj.orders = K_std_obj.orders + H_std_obj.orders #Combine orders
std_obj.n_orders = K_std_obj.n_orders + H_std_obj.n_orders #Find new total number of orders
#Made 1D spectrum for flattened standard star (used for telluric correction)
H_stdflat_obj = makespec(date, 'H', waveno, stdno, std=True) #Read in H-band
K_stdflat_obj = makespec(date, 'K', waveno, stdno, std=True) #Read in K-band
stdflat_obj = H_stdflat_obj #Create master object
stdflat_obj.orders = K_stdflat_obj.orders + H_stdflat_obj.orders #Combine orders
stdflat_obj.n_orders = K_stdflat_obj.n_orders + H_stdflat_obj.n_orders #Find new total number of orders
if std_flexure != 0.: #If user specifies a flexure correction
if size(std_flexure) == 1: #If the correction is only one number, correct all orders
for i in range(std_obj.n_orders): #Loop through each order
std_obj.orders[i].flux = flexure(std_obj.orders[i].flux, std_flexure) #Apply flexure correction to 1D array
stdflat_obj.orders[i].flux = flexure(stdflat_obj.orders[i].flux, std_flexure) #Apply flexure correction to 1D array
else: #Else if correction has two numbers, the first number is the H band and hte second number is the K band
for i in range(std_obj.n_orders):#Loop through each order
if std_obj.orders[i].wave[0] < 1.85: #check which band we are in, index=0 is H band, 1 is K band
flexure_index = 0
else:
flexure_index = 1
std_obj.orders[i].flux = flexure(std_obj.orders[i].flux, std_flexure[flexure_index]) #Apply flexure correction to 1D array
stdflat_obj.orders[i].flux = flexure(stdflat_obj.orders[i].flux, std_flexure[flexure_index]) #Apply flexure correction to 1D array
#Make 1D spectrum object
H_sci1d_obj = makespec(date, 'H', waveno, frameno) #Read in H-band
K_sci1d_obj = makespec(date, 'K', waveno, frameno) #Read in K-band
sci1d_obj = H_sci1d_obj #Create master object
sci1d_obj.orders = K_sci1d_obj.orders + H_sci1d_obj.orders #Combine orders
sci1d_obj.n_orders = K_sci1d_obj.n_orders + H_sci1d_obj.n_orders #Find new total number of orders
if twodim: #If user specifies also to make a 2D spectrum object
#Make 2D spectrum object
H_sci2d_obj = makespec(date, 'H', waveno, frameno, twodim=True, mask_cosmics=mask_cosmics, interpolate_slit=interpolate_slit) #Read in H-band
K_sci2d_obj = makespec(date, 'K', waveno, frameno, twodim=True, mask_cosmics=mask_cosmics, interpolate_slit=interpolate_slit) #Read in K-band
#if H_sci2d_obj.slit_pixel_length != K_sci2d_obj.slit_pixel_length:
#print('H slit length: ', H_sci2d_obj.slit_pixel_length)
#print('K slit length: ', K_sci2d_obj.slit_pixel_length)
sci2d_obj = H_sci2d_obj #Create master object
sci2d_obj.orders = K_sci2d_obj.orders + H_sci2d_obj.orders #Combine orders
sci2d_obj.n_orders = K_sci2d_obj.n_orders + H_sci2d_obj.n_orders #Find new total number of orders
if make_1d: #If user specifies they want to make a 1D spectrum, we will overwrite the spec1d
for i in range(sci2d_obj.n_orders): #Loop through each order to....
sci1d_obj.orders[i].flux = nansum(sci2d_obj.orders[i].flux, 0) #Collapse 2D spectrum into 1D
sci1d_obj.orders[i].noise = nansum(sci2d_obj.orders[i].noise**2, 0)**0.5 #Collapse 2D noise in 1D
elif median_1d: #If user specifies they want to make a 1D spectrum by median collapsing, overwrite the old spec1d, for now we calculate uncertainity by summing variance
for i in range(sci2d_obj.n_orders): #Loop through each order to....
sci1d_obj.orders[i].flux = nanmedian(sci2d_obj.orders[i].flux, 0) #Collapse 2D spectrum into 1D
sci1d_obj.orders[i].noise = nansum(sci2d_obj.orders[i].noise**2, 0)**0.5 #Collapse 2D noise in 1D
#Read in sky difference frame to correct for OH lines, with user interacting to set the scaling
if oh != 0: #If user specifies a sky correction image number
oh1d = getspec(date, waveno, oh, oh, usestd=False, median_1d=True, twodim=False) #Create 1D and 2D spectra objects for all orders combining both H and K bands (easy eh?)
if (oh_scale == 0.0) or (oh_scale == [0.0,0.0]): #If scale is not specified by user find it automatically, along with flexure, independently for H and K bands
oh1d.combine_orders() #Combine OH sky difference orders so we can examine the entire H and K bands
sci_obj = copy.deepcopy(sci1d_obj) #Make copy of science 1D object so we don't accidently modify the original data
sci_obj.combine_orders() #Combine the science data orders
oh_flux = oh1d.combospec.flux #Grab OH sky difference 1D flux
wave = oh1d.combospec.wave #Grab wavelength array
oh_flux[isinf(oh_flux)] = nan #turn infininte oh values into nan to fix errors
flux = sci_obj.combospec.flux #Grab science 1D flux
g = Gaussian1DKernel(stddev=20) #Prepare to smooth the OH sky difference data to find where there are OH residuals and where there are none
oh_smoothed = abs(convolve(oh_flux,g)) #Smooth OH sky difference frame
oh_smoothed = oh_smoothed / nanmax(oh_smoothed) #Normalize to brightest OH residual
#oh_mask = oh_smoothed > 0.05 #Find all the smoothed OH sky difference residuals above 1/20th the brightness of the smoothed brightest residual
oh_mask = zeros(len(oh_smoothed)) #Set up oh mask as an array of numbers
oh_mask[(oh_smoothed/nanmedian(oh_smoothed) > 10.0)] += 1 #Find all pixels above 3x the median of the smoothed residuals
OH_lines = lines(OH_line_list, delta_v=0.0) #Load OH line list
parsed_OH_lines = OH_lines.parse( flat_nanmin(wave), flat_nanmax(wave))
width=0.00006
for i in range(len(parsed_OH_lines.wave)): #Loop through each line
oh_mask[abs(wave-parsed_OH_lines.wave[i]) < width] += 1 #Create mask of OH lines...
g = Gaussian1DKernel(stddev=200) #Prepare to smooth the science data to zero out any continuum
flux = flux - convolve(flux, g) #Subtract a crude fit to the continuum so that most of the OH residuals start around 0 flux
flex = arange(-4.0, 4.0, 0.05) #Range of flexure shifts to test
scales = arange(-4.0,4.0,0.01) #Range of OH scales to test
in_h_band = (sci_obj.combospec.wave < 1.85) & (oh_mask == 2) #Find only pixels in the H band and near a bright OH residual
in_k_band = (sci_obj.combospec.wave > 1.85) & (oh_mask == 2) #Find only pixels in the K band and near a bright OH residual
h_store_chi_sq = zeros([len(scales), len(flex)]) #Array for storing chi-sq for h band
k_store_chi_sq = zeros([len(scales), len(flex)]) #Array for storing chi-sq for k band
for i in range(len(scales)): #Loop through each possible scaling of the OH residuals
for k in range(len(flex)): #Look through each possible flexure value of the OH residuals
tweaked_oh = flexure(oh_flux*scales[i], flex[k]) #Apply the flexure and scaling to the OH sky difference residuals
diff = (flux - tweaked_oh) #Subtract the tweaked OH sky difference residuals from the residuals in the science flux
diff[~isfinite(diff)] = nan #Turn all values for diff that are infinite into nans so the nansum doesn't sum to infinity
h_store_chi_sq[i,k] = nansum((diff[in_h_band])**2) #Calculate chisq for H band
k_store_chi_sq[i,k] = nansum((diff[in_k_band])**2) #Calculate chisq for K band
h_store_chi_sq[h_store_chi_sq==0.] = nan #Nan out zeros to correct error in finding the real minimum chisq
k_store_chi_sq[k_store_chi_sq==0.] = nan
best_h_band_indicies = where(h_store_chi_sq == flat_nanmin(h_store_chi_sq)) #Find best fit by findinging the minimum chisq in the H band
best_k_band_indicies = where(k_store_chi_sq == flat_nanmin(k_store_chi_sq)) #Find best fit by findinging the minimum chisq in the K band
oh_scale = [scales[best_h_band_indicies[0][0]], scales[best_k_band_indicies[0][0]]] #Save OH scaling best fit
oh_flexure = [flex[best_h_band_indicies[1][0]], flex[best_k_band_indicies[1][0]]] #Save OH flexure best fit
print('No oh_scale specified by user, using automated chi-sq rediction routine.')
print('OH residual scaling found to be: ', oh_scale)
print('OH residual flexure found to be: ', oh_flexure)
if oh_flexure != 0.: #If user specifies a flexure correction
if size(oh_flexure) == 1: #If the correction is only one number, correct all orders
for i in range(sci1d_obj.n_orders): #Loop through each order
oh1d.orders[i].flux = flexure(oh1d.orders[i].flux, oh_flexure) #Apply flexure correction to 1D array
#oh2d.orders[i].flux = flexure(oh1d.orders[i].flux, oh_flexure) #Apply flexure correction to 2D array
else: #Else if correction has two numbers, the first number is the H band and hte second number is the K band
for i in range(sci1d_obj.n_orders):#Loop through each order
if oh1d.orders[i].wave[0] < 1.85: #check which band we are in, index=0 is H band, 1 is K band
flexure_index = 0
else:
flexure_index = 1
oh1d.orders[i].flux = flexure(oh1d.orders[i].flux, oh_flexure[flexure_index]) #Apply flexure correction to 1D array
#oh2d.orders[i].flux = flexure(oh1d.orders[i].flux, oh_flexure[flexure_index]) #Apply flexure correction to 2D array
if size(oh_scale) == 1: #if user specifies only one oh scale for the h and k band, use the same scale in both bands, else use the scale for each band seperately if the user provides two oh_scales
oh_scale = [oh_scale, oh_scale]
if savechecks: #If user specifies to save checks as a pdf
with PdfPages(save.path + 'check_OH_correction.pdf') as pdf: #Create PDF showing OH correction for user inspection
clf()
for i in range(sci1d_obj.n_orders): #Save whole spectrum at once
plot(oh1d.orders[i].wave, oh1d.orders[i].flux, color='red', label='Differential Sky Subtraction', linewidth=0.1)
plot(sci1d_obj.orders[i].wave, sci1d_obj.orders[i].flux, ':', color='black', label='Uncorrected Science Data', linewidth=0.1)
if oh1d.orders[i].wave[0] < 1.85: #check which band we are in, index=0 is H band, 1 is K band
plot(oh1d.orders[i].wave, sci1d_obj.orders[i].flux - oh1d.orders[i].flux*oh_scale[0], color='black', label='OH Corrected Science Data', linewidth=0.1)
else:
plot(oh1d.orders[i].wave, sci1d_obj.orders[i].flux - oh1d.orders[i].flux*oh_scale[1], color='black', label='OH Corrected Science Data', linewidth=0.1)
if i==0:
legend(loc='upper right', fontsize=9) #Only plot legend for first set
xlabel('$\lambda$ [$\mu$m]')
ylabel('Relative Flux')
title('Check whole spectrum')
tight_layout()
pdf.savefig()
for i in range(sci1d_obj.n_orders): #Then save each order for closer inspection
clf()
#()
plot(oh1d.orders[i].wave, oh1d.orders[i].flux, color='red', label='Differential Sky Subtraction', linewidth=0.1)
plot(sci1d_obj.orders[i].wave, sci1d_obj.orders[i].flux, ':', color='black', label='Uncorrected Science Data', linewidth=0.1)
if oh1d.orders[i].wave[0] < 1.85: #check which band we are in, index=0 is H band, 1 is K band
plot(oh1d.orders[i].wave, sci1d_obj.orders[i].flux - oh1d.orders[i].flux*oh_scale[0], color='black', label='OH Corrected Science Data', linewidth=0.1)
else:
plot(oh1d.orders[i].wave, sci1d_obj.orders[i].flux - oh1d.orders[i].flux*oh_scale[1], color='black', label='OH Corrected Science Data', linewidth=0.1)
xlabel('$\lambda$ [$\mu$m]')
ylabel('Relative Flux')
legend(loc='upper right',fontsize=9)
title('Check individual orders')
tight_layout()
pdf.savefig()
for i in range(sci1d_obj.n_orders):
if oh1d.orders[i].wave[0] < 1.85: #check which band we are in, index=0 is H band, 1 is K band
use_oh_scale = oh_scale[0]
else:
use_oh_scale = oh_scale[1]
sci1d_obj.orders[i].flux -= oh1d.orders[i].flux * use_oh_scale
if twodim: #If user specifies a two dimensional object
sci2d_obj.orders[i].flux -= oh1d.orders[i].flux * use_oh_scale / float(sci2d_obj.slit_pixel_length)
# if twodim: #If user specifies a two dimensional object
# #sci2d_obj.orders[i].flux = sci2d_obj.orders[i].flux - tile(nanmedian(oh2d.orders[i].flux, 0), [slit_length,1]) * oh_scale
# sci2d_obj.orders[i].flux -= nanmedian(oh2d.orders[i].flux, 0) * use_oh_scale
#Apply telluric correction & relative flux calibration
if tellurics: #If user specifies "tellurics", return only flattened standard star spectrum
return stdflat_obj
elif usestd: #If user wants to use standard star (True by default)
if phoenix_model == '': #If using the old standard star correction...
spec1d = telluric_and_flux_calib(sci1d_obj, std_obj, stdflat_obj, B=B, V=V, no_flux=no_flux, y_scale=y_scale, y_power=y_power, y_sharpen=y_sharpen, wave_smooth=wave_smooth, savechecks=savechecks,
telluric_power=telluric_power, telluric_spectrum=telluric_spectrum, calibration=calibration, quality_cut=telluric_quality_cut, current_frame=str(date)+'_'+str(frameno)) #For 1D spectrum
if twodim: #If user specifies this object has a 2D spectrum
spec2d = telluric_and_flux_calib(sci2d_obj, std_obj, stdflat_obj, B=B, V=V, no_flux=no_flux, y_scale=y_scale, y_power=y_power, y_sharpen=y_sharpen, wave_smooth=wave_smooth, savechecks=savechecks,
telluric_power=telluric_power, telluric_spectrum=telluric_spectrum, calibration=calibration, quality_cut=telluric_quality_cut, current_frame=str(date)+'_'+str(frameno)) #Run for 2D spectrum
else: #Else if using the new Pheonix stellar models for standard star correction...
print('YOU HAVE SPECIFIED YOU WANT TO USE THE PHEONIX STELLAR MODEL '+phoenix_model)
spec1d = process_standard_star_with_phoenix_model(sci1d_obj, std_obj, stdflat_obj, B, V, phoenix_model, std_shift)
if twodim: #If user specifies this object has a 2D spectrum
spec2d = process_standard_star_with_phoenix_model(sci2d_obj, std_obj, stdflat_obj, B, V, phoenix_model, std_shift)
#Return either 1D and 2D spectra, or just 1D spectrum if no 2D spectrum exists
if twodim:
return spec1d, spec2d #Return both 1D and 2D spectra objects
else:
return spec1d #Only return 1D spectra object
else: #If user does not want to use standard star
if twodim:
return sci1d_obj, sci2d_obj #Return both 1D and 2D spectra objects
else:
return sci1d_obj #Only return 1D spectra object
#Wrapper for easily creating a 1D or 2D comprehensive spectrum object of any type, allowing user to import an entire specturm object in one line
def makespec(date, band, waveno, frameno, std=False, twodim=False, mask_cosmics=False, interpolate_slit=False):
#spec_data = fits_file(date, frameno, band, std=std, twodim=twodim, s2n=s2n) #Read in data from spectrum
spec_data = fits_file(date, frameno, band, std=std, twodim=twodim)
try: #Try reading in new wavelength data from A0V
wave_data = fits_file(date, waveno, band, wave=True) #If 1D, read in data from wavelength solution
except: #If it does not exist, try reading in wavelength data the old way (from the calib directory)
try: #Try reading in fits file
wave_data = fits_file(date, waveno, band, wave_old=True) #If 1D, read in data from wavelength solution
except: #If no fits file is found, try reading in json file instead
filename = calib_path+str(date)+'/SDC'+band+'_'+str(date)+'_'+'%.4d' % int(frameno) +'.wvlsol_v0.json' #Set json file name
with open(filename) as data_file: #Read in Json file
data = json.load(data_file)
wave_data = data['wvl_sol'] #Splice out the wavelength solution
if twodim: #If spectrum is 2D but no variance data to be read in
var_data = fits_file(date, frameno, band, var2d=True) #Grab data for 2D variance cube
spec_obj = spec2d(wave_data, spec_data, fits_var=var_data, mask_cosmics=mask_cosmics, interpolate_slit=interpolate_slit) #Create 2D spectrum object, with variance data inputted to get S/N
else: #If spectrum is 1D
var_data = fits_file(date, frameno, band, var1d=True)
spec_obj = spec1d(wave_data, spec_data, var_data) #Create 1D spectrum object
return(spec_obj) #Return the fresh spectrum object!
#Class stores information about a fits file that has been reduced by the PLP
class fits_file:
def __init__(self, date, frameno, band, std=False, wave=False, twodim=False, s2n=False, var1d=False, var2d=False, wave_old=False):
self.date = '%.4d' % int(date) #Store date of observation
self.frameno = '%.4d' % int(frameno) #Store first frame number of observation
self.band = band #Store band name 'H' or 'K'
self.std = std #Store if file is a standard star
self.wave = wave #Store if file is a wavelength solution
self.wave_old = wave_old #Store if file is the old way wavelength solutions are done
self.s2n = s2n #Store if file is the S/N spectrum
self.twodim = twodim #Store if file is of a 2D spectrum instead of a 1D spectrum
self.var1d = var1d
self.var2d = var2d #Store if file is a 2D variance map (like twodim but with variance instead of signal)
self.path = self.filepath() #Determine path and filename for fits file
fits_container = fits.open(self.path) #Open fits file and put data into memory
self.data = fits_container[0].data.byteswap().newbyteorder() #Grab data from fits file
self.n_orders = len(fits_container[0].data[:,0]) #cound number of orders in fits file
fits_container.close() #Close fits file data
#self.data = fits.open(self.path) #Open fits file and put data into memory
#print(self.path)
def filepath(self): #Given input variables, determine the path to the target fits file
prefix = 'SDC' + self.band + '_' + self.date + '_' + self.frameno #Set beginning (prefix) of filename
if self.std: #If file is for a standard star and you want the flattened spectrum
postfix = '.spec_flattened.fits'
master_path = data_path
elif self.wave: #If file is the 1D wavelength calibration
#prefix = 'SKY_' + prefix #Old version reads in arclamp or sky wavelength calibraiton
#postfix = '.wvlsol_v1.fits'
#master_path = calib_path
postfix = '.wave.fits' #New version reads in A0V telluric wavelength solution
master_path = data_path
elif self.wave_old: #This is the old way wavelength soultions were read in, use it if the new way doesn't work
prefix = 'SKY_' + prefix #Old version reads in arclamp or sky wavelength calibraiton
postfix = '.wvlsol_v1.fits'
master_path = calib_path
elif self.twodim: #If file is the 2D spectrum
postfix = '.spec2d.fits'
master_path = data_path
elif self.var1d: #If file is 1D variance
postfix = '.variance.fits'
master_path = data_path
elif self.var2d: #If file is 2D variance map
postfix = '.var2d.fits'
master_path = data_path
elif self.s2n: #if the file is the 1D S/N spectrum
postfix = '.sn.fits'
master_path = data_path
else: #If you just want to read in a normal 1D spectrum (including the unnormalized standard star)
postfix = '.spec.fits'
master_path = data_path
return master_path + self.date +'/' + prefix + postfix #Return full path for fits file
def get(self): #Get fits file data with an easy to call definition
if self.wave: #If wavelength file, the wavelenghts are stored in nanometers, so we must convert them to um
return self.data / 1e3
else: #Else just return what was storted in the FITS file without modifying the data
return self.data
#Class to store and analyze a 1D spectrumc
class spec1d:
def __init__(self, fits_wave, fits_spec, fits_var):
try: #First try to see if the wavelength data is from a fits file
wavedata = fits_wave.get() #Grab fits data for wavelength out of object, first try as if it were a fits object
except: #If it is not a fits object, say something read out of a json file..
wavedata = array(fits_wave) #Just copy over the data and get on with it
specdata = fits_spec.get() #Grab fits data for flux out of object
vardata = fits_var.get() #Grab fits data for variance
orders = [] #Set up empty list for storing each orders
n_orders = fits_spec.n_orders
#n_orders = len(specdata[0].data[:,0]) #Count number of orders in spectrum
#wavedata = wavedata[0].data.byteswap().newbyteorder() #Read out wavelength and flux data from fits files into simpler variables
#fluxdata = specdata[0].data.byteswap().newbyteorder() #Read out wavelength and flux data from fits files into simpler variables
#noisedata = sqrt( vardata[0].data.byteswap().newbyteorder() ) #Read out noise from fits file into a simpler variable by taking the square root of the variance
noisedata = vardata**0.5 #Read out noise from fits file into a simpler variable by taking the square root of the variance
for i in range(n_orders): #Loop through to process each order seperately
orders.append( spectrum(wavedata[i,:], specdata[i,:], noise=noisedata[i,:]) ) #Append order to order list
self.n_orders = n_orders
self.orders = orders
# def subtract_continuum(self, show = False, size=0, sizes=[1000,500]): #Subtract continuum and background with an iterative running median
# if size != 0:
# sizes = [size]
# orders = self.orders
# for order in orders: #Apply continuum subtraction to each order seperately
# flux = copy.deepcopy(order.flux)
# whole_order_trace = nanmedian(flux)
# if whole_order_trace == nan: whole_order_trace = 0.
# flux = flux - whole_order_trace #Do an intiial removal of the flux
# nx = len(flux)
# for size in sizes:
# x_left = arange(nx) - size #Create array to store left side of running median
# x_left[x_left < 0] = 0 #Set pixels beyond edge of order to be nonexistant
# x_right = arange(nx) + size #Create array to store right side of running median
# x_right[x_right > nx] = nx - 1 #Set pixels beyond right edge of order to be nonexistant
# #x_size = x_right - x_left #Calculate number of pixels in the x (wavelength) direction
# unmodified_flux = copy.deepcopy(flux)
# for i in range(nx):
# trace = nanmedian(unmodified_flux[x_left[i]:x_right[i]])
# if trace == nan: trace = 0.
# flux[i] -= trace
# order.flux = flux
def to_muler_list(self): #Generate a muler list
echelle_list = []
for i in range(self.n_orders):
echelle_obj = EchelleSpectrum(flux=self.orders[i].flux*u.ct, spectral_axis=self.orders[i].wave*u.micron, uncertainty=StdDevUncertainty(self.orders[i].noise))
echelle_list.append(echelle_obj)
echelle_list = EchelleSpectrumList(echelle_list) #Convert eschelle_list from a ordinary python list to a EchelleSpectrumList object
return echelle_list
def subtract_continuum(self, show = False, size=0, sizes=[501], use_combospec=False): #Subtract continuum and background with an iterative running median
if size != 0:
sizes = [size]
if use_combospec: #If user specifies to use combined spectrum
orders = [self.combospec] #Use the combined spectrum
else: #But is usually better to use individual orders instead
orders = self.orders
for order in orders: #Apply continuum subtraction to each order seperately
flux = copy.deepcopy(order.flux)
whole_order_trace = nanmedian(flux)
flux = flux - whole_order_trace #Do an intiial removal of the flux
nx = len(flux)
for size in sizes:
if size%2 == 0: size = size + 1 #Get rid of even sizes and replace with an odd version
half_sizes = array([-(size-1)/2, ((size-1)/2)+1], dtype='int')
unmodified_flux = copy.deepcopy(flux)
for i in range(nx):
x_left, x_right = i + half_sizes
if x_left < 0:
x_left = 0
elif x_right > nx:
x_right = nx
trace = nanmedian(unmodified_flux[x_left:x_right])
if isnan(trace): #Zero out nans or infinities or other wierd things
trace = 0.
flux[i] -= trace
order.flux = flux
def old_subtract_continuum(self, show = False, size = half_block, lines=[], vrange=[-10.0,10.0], use_poly=False): #Subtract continuum using robust running median
if show: #If you want to watch the continuum subtraction
clf() #Clear interactive plot
first_order = True #Keep track of where we are in the so we only create the legend on the first order
for order in self.orders: #Apply continuum subtraction to each order seperately
old_order = copy.deepcopy(order) #Make copy of flux array so the original is not modified
if lines != []: #If user supplies a line list
old_order = mask_lines(old_order, lines, vrange=vrange, ndim=1) #Mask out lines with nan with some velocity range, before applying continuum subtraction
wave = order.wave #Read n wavelength array
if use_poly:
p_init = models.Polynomial1D(degree=4)
fit_p = fitting.SimplexLSQFitter()
nx = len(old_order.flux)
x = arange(nx)
p = fit_p(p_init, x, old_order.flux)
subtracted_flux = order.flux - p(x)
else:
median_result_1d = robust_median_filter(old_order.flux, size = size) #Take a robust running median along the trace, this is the found continuum
subtracted_flux = order.flux - median_result_1d #Apply continuum subtraction
if show: #If you want to watch the continuum subtraction
if first_order: #If on the first order, make the legend along with plotting the order
plot(wave, subtracted_flux, label='Science Target - Continuum Subtracted', color='black')
plot(wave, old_order.flux, label='Science Target - Continuum Not Subtracted', color='blue')
plot(wave, median_result_1d, label='Continuum Subtraction', color='green')
first_order = False #Now that we are done, just plot the ldata for all the other orders without making a long legend
else: #Else just plot the order
plot(wave, subtracted_flux, color='black')
plot(wave, old_order.flux, color='blue')
plot(wave, median_result_1d, color='green')
order.flux = subtracted_flux #Replace this order's flux array with one that has been continuum subtracted
if show: #If you want to watch the continuum subtraction
legend() #Show the legend in the plot
def normalize_continuum(self, show = False, size = half_block, lines=[], vrange=[-10.0,10.0], use_poly=False): #Normalize spectrum to continuum using robust running median
for order in self.orders: #Apply continuum subtraction to each order seperately
old_order = copy.deepcopy(order) #Make copy of flux array so the original is not modified
if lines != []: #If user supplies a line list
old_order = mask_lines(old_order, lines, vrange=vrange, ndim=1) #Mask out lines with nan with some velocity range, before applying continuum subtraction
wave = order.wave #Read n wavelength array
median_result_1d = robust_median_filter(old_order.flux, size = size) #Take a robust running median along the trace, this is the found continuum
normalized_flux = order.flux / median_result_1d #Normalize continuum
order.flux = normalized_flux #Replace this order's flux array with one that has been continuum normalized
def combine_orders(self, wave_pivot = default_wave_pivot): #Sitch orders together into one long spectrum
combospec = copy.deepcopy(self.orders[0]) #Create a spectrum object to append wavelength and flux to
order_length = len(combospec.flux)
blank = zeros([order_length*self.n_orders])#Create blanks to store new giant spectrum
combospec.flux = copy.deepcopy(blank) #apply blanks to everything
combospec.wave = copy.deepcopy(blank)
combospec.noise = copy.deepcopy(blank)
#combospec.s2n = copy.deepcopy(blank)
for i in range(self.n_orders-1, -1, -1): #Loop through each order to stitch one and the following one together
if i == self.n_orders-1: #If first order, simply throw it in
xl = 0
xr = order_length
goodpix_next_order = self.orders[i].wave > 0.
else: #Else find the wave pivots
[low_wave_limit, high_wave_limit] = [flat_nanmin(self.orders[i].wave), combospec.wave[xr-1]] #Find the wavelength of the edges of the already stitched orders and the order currently being stitched to the rest
wave_cut = low_wave_limit + wave_pivot*(high_wave_limit-low_wave_limit) #Find wavelength between stitched orders and order to stitch to be the cut where they are combined, with pivot set by global var wave_pivot
goodpix_next_order = self.orders[i].wave > wave_cut #Find pixels to the right of the where the order will be cut and stitched to the rest
if combospec.wave[xr-1] > wave_cut:
xl = where(combospec.wave > wave_cut)[0][0]-1 #Set left pixel to previous right pixel
else:
xl = xr-1
xr = xl + len(self.orders[i].wave[goodpix_next_order])
combospec.wave[xl:xr] = self.orders[i].wave[goodpix_next_order] #Stitch wavelength arrays together
combospec.flux[xl:xr] = self.orders[i].flux[goodpix_next_order] #Stitch flux arrays together
combospec.noise[xl:xr] = self.orders[i].noise[goodpix_next_order] #Stitch noise arrays together
#combospec.s2n[xl:xr] = self.orders[i].s2n[goodpix_next_order] #Stitch S/N arrays together
combospec.wave = combospec.wave[0:xr] #Get rid of extra pixels at end of arrays
combospec.flux = combospec.flux[0:xr]
combospec.noise = combospec.noise[0:xr]
#combospec.s2n = combospec.s2n[0:xr]
self.combospec = combospec #save the orders all stitched together
#Simple function for plotting a 1D spectrum orders
def plot(self, combospec=False, **kwargs):
#clf()
if combospec: #If user specifies, plot the combined spectrum (stitched together orders)
plot(self.combospec.wave, self.combospec.flux, **kwargs)
else: #or else just plot each order seperately (each a different color)
for order in self.orders: #Plot each order
plot(order.wave, order.flux, **kwargs)
xlabel('Wavelength [$\mu$m]')
ylabel('Relative Flux')
#show()
#draw()
#Plot spectrum with lines from line list overplotted
def plotlines(self, linelist, threshold=0.0, model='', rows=5, ymax=0.0, fontsize=9.5, relative=False):
if not hasattr(self, 'combospec'): #Check if a combined spectrum exists
print('No spectrum of combined orders found. Createing combined spectrum.')
self.combine_orders() #Combine spectrum before plotting, if not done already
#clf() #Clear plot
min_wave = flat_nanmin(self.combospec.wave) #Find maximum wavelength
max_wave = flat_nanmax(self.combospec.wave) #Find minimum wavelength
if ymax == 0.0: #If use does not set maximum y, do it automatically
max_flux = nanmax(self.combospec.flux, axis=0)
else: #else set it to what the user wants
max_flux = ymax / 1.4
if relative: #If relative set = true, make scale relative to whatever ymax was set to
self.combospec.flux = self.combospec.flux / ymax
ymax = 1.0
max_flux = ymax / 1.4
total_wave_coverage = max_wave - min_wave #Calculate total wavelength coverage
if (model != '') and (model != 'none'): #Load model for comparison if needed
model_wave, model_flux = loadtxt(model, unpack=True) #Read in text file of model with format of two columns with wave <tab> flux
model_max_flux = nanmax(model_flux[logical_and(model_wave > min_wave, model_wave < max_wave)], axis=0) #find tallest line in model
normalize_model = max_flux / model_max_flux #normalize tallest line in model to the tallest line in IGRINS data
model_flux = normalize_model * model_flux #Apply normalization to model spectrum to match IGRINS spectrum
#fig = figure(figsize=(15,11)) #Set proportions
for j in range(rows): #Loop breaks spectrum figure into multiple rows
wave_range = [min_wave + total_wave_coverage*(float(j)/float(rows)), #Calculate wavelength range for a single row
min_wave + total_wave_coverage*(float(j+1)/float(rows))]
subplot(rows,1,j+1) #split into multiple plots
sci_in_range = logical_and(self.combospec.wave > wave_range[0], self.combospec.wave < wave_range[1]) #Find portion of spectrum in single row
sub_linelist = linelist.parse(wave_range[0], wave_range[1]) #Find lines in single row
#wave_to_interp = append(insert(self.combospec.wave, 1.0, 0.0), 3.0) #Interpolate IGRINS spectrum to allow line labels to be placed in correct position in figure
#flux_to_interp = append(insert(self.combospec.flux, 0, 0.0), 0.0)
wave_to_interp = hstack([1.4, self.combospec.wave, 2.5]) ##Interpolate IGRINS spectrum to allow line labels to be placed in correct position in figure
flux_to_interp = hstack([0.0, self.combospec.flux, 0.0])
sci_flux_interp = interp1d(wave_to_interp, flux_to_interp) #Get interpolation object of science spec.
sub_linelist.flux = sci_flux_interp(sub_linelist.wave) #Get height of spectrum for each individual line
for i in range(len(sub_linelist.wave)):#Output label for each emission lin
other_lines = abs(sub_linelist.wave - sub_linelist.wave[i]) < 0.00001 #Window (in microns) to check for regions of higher flux nearby so only the brightest lines (in this given range) are labeled.
#if sub_linelist.flux[i] > max_flux*threshold and nanmax(sub_linelist.flux[other_lines], axis=0) == sub_linelist.flux[i]: #if line is the highest of all surrounding lines within some window
#if sub_linelist.label[i] == '{OH}': #If OH lines appear in line list.....
#mask_these_pixels = abs(self.combospec.wave-sub_linelist.wave[i]) < 0.00006 #Create mask of OH lines...
#self.combospec.flux[mask_these_pixels] = nan #Turn all pixels with OH lines into numpy nans so the OH lines don't get plotted
#plot([linelist_wave[i], linelist_wave[i]], [linelist_flux[i]+max_flux*0.025, max_flux*0.92], ':', color='gray')
#text(linelist_wave[i], linelist_flux[i]+max_flux*0.02, '$\oplus$', rotation=90, fontsize=9, verticalalignment='bottom', horizontalalignment='center', color='black')
#else: #If no OH lines found, plot lines on figure
#plot([sub_linelist.wave[i], sub_linelist.wave[i]], [sub_linelist.flux[i], sub_linelist.flux[i] + max_flux*0.065], ':', color='black') #Plot location of line as a dotted line a little bit above the spectrum
#text(sub_linelist.wave[i], sub_linelist.flux[i] + max_flux*0.073, sub_linelist.label[i], rotation=90, fontsize=fontsize, verticalalignment='bottom', horizontalalignment='center', color='black') #Label line with text
plot([sub_linelist.wave[i], sub_linelist.wave[i]], [sub_linelist.flux[i], sub_linelist.flux[i] + max_flux*0.065], ':', color='black') #Plot location of line as a dotted line a little bit above the spectrum
text(sub_linelist.wave[i], sub_linelist.flux[i] + max_flux*0.073, sub_linelist.label[i], rotation=90, fontsize=fontsize, verticalalignment='bottom', horizontalalignment='center', color='black') #Label line with text
plot(self.combospec.wave[sci_in_range], self.combospec.flux[sci_in_range], color='black') #Plot actual spectrum
if (model != '') and (model != 'none'): #Load model for comparison if needed
model_in_range = logical_and(model_wave > wave_range[0], model_wave < wave_range[1]) #Find portion of model spectrum in a given row
plot(model_wave[model_in_range], model_flux[model_in_range], color='red') #Plot model spectrum
ylim([-0.05*max_flux, 1.4*max_flux]) #Set y axis range to show spectrum but also allow user to vew lines that are labeled
xlim=(wave_range) #set x axis range
ylabel('Relative Flux') #Set y axis label
if j == rows-1: #only put x label on final plot
xlabel('Wavelength [$\mu$m]') #Set x-axis label
minorticks_on() #Show minor tick marks
gca().set_autoscale_on(False) #Turn off autoscaling
show() #Show spectrum
def mask_OH(self, width=0.00006, input_linelist=OH_line_list): #Mask OH lines, use only after processing and combinging spectrum to make a cleaner 1D spectrum
OH_lines = lines(input_linelist, delta_v=0.0) #Load OH line list
parsed_OH_lines = OH_lines.parse( flat_nanmin(self.combospec.wave), flat_nanmax(self.combospec.wave))
for i in range(len(parsed_OH_lines.wave)): #Loop through each line
mask_these_pixels = abs(self.combospec.wave-parsed_OH_lines.wave[i]) < width #Create mask of OH lines...
self.combospec.flux[mask_these_pixels] = nan #Turn all pixels with OH lines into numpy nans so the OH lines don't get plotted
def savespec(self, name='1d_spectrum.dat'): #Save 1D spectrum, set 'name' to be the filename yo uwant
if not hasattr(self, 'combospec'): #Check if a combined spectrum exists
print('No spectrum of combined orders found. Createing combined spectrum.')
self.combine_orders() #Combine spectrum before plotting, if not done already
savetxt(save.path + name, transpose([self.combospec.wave, self.combospec.flux, self.combospec.noise])) #Save 1D spectrum as simple .dat file with wavelength, flux, and noise in seperate columns
def fitgauss(self,line_list, v_range=[-30.0,30.0]): #Fit 1D gaussians to the 1D spectra and plot results
self.fwhm = zeros(len(line_list.lab_wave)) #Store FWHM of all found lines
fit_g = fitting.LevMarLSQFitter() #Initialize minimization algorithim for fitting gaussian
all_fwhm = array([])
all_wave = array([])
all_x_pixels = array([])
order_count = 0
interp_velocity_grid = arange(v_range[0], v_range[1], 0.1) #Velocity grid to interpolate line profiles onto
with PdfPages(save.path + 'check_line_widths.pdf') as pdf:
for order in self.orders:
parsed_line_list = line_list.parse(flat_nanmin(order.wave), flat_nanmax(order.wave))
finite_pixels = isfinite(order.flux) #store which pixels are finite
n_lines = len(parsed_line_list.label) #Number of spectral lines
fwhm = zeros(n_lines) #Create array to store FWHM of gaussian fits for each line
x_pixels = zeros(n_lines) #Create array to store which x pixel the line is centered on
for i in range(n_lines): #Loop through each individual line
line_wave = parsed_line_list.wave[i]
x_pixels[i] = where(order.wave >= line_wave)[0][0]
all_velocity = c * ( (order.wave - line_wave) / line_wave )
goodpix = finite_pixels & (all_velocity > v_range[0]) & (all_velocity < v_range[1])
flux = order.flux[goodpix]
velocity = all_velocity[goodpix]
if len(flux) > 2:
g_init = models.Gaussian1D(amplitude=max(flux), mean=0.0, stddev=8.0) #Initialize gaussian model for this specific line, centered at 0 km/s with a first guess at the dispersion to be the spectral resolution
g = fit_g(g_init, velocity, flux) #Fit gaussian to line
g_mean = g.mean.value #Grab mean of gaussian fit
g_stddev = g.stddev.value
g_fwhm = g_stddev * 2.355
self.fwhm[i] = g_fwhm
g_flux = g(velocity) #Grab
g_residuals = flux - g_flux
#check_pixels = abs(velocity - g_mean) <= quality_check_window
#if g_fwhm > 0.0 and g_fwhm < 50.0 and sum(abs(g_residuals[check_pixels]))/sum(flux[check_pixels]) < 0.10: #Quality check, gausdsian fit should (mostly) get most of the residual line flux
if abs(g_mean) < 3.0 and g_fwhm > 6.0 and g_fwhm < 10.0 and nansum(abs(g_residuals))/abs(nansum(flux)) < 0.3 and nansum(g_flux) > 0.: #Quality check, gausdsian fit should (mostly) get most of the residual line flux
#self.plot_1d_velocity(i+1) #Plot 1D spectrum in velocity space (corrisponding to a PV Diagram), called when viewing a line
#fwhm.append(g_fwhm)
fwhm[i] = g_fwhm
clf() #Clear plot
title(parsed_line_list.label[i] + ', FWHM='+str(g_fwhm) + ', Wavelength=' + str(line_wave))
plot(velocity, flux, color = 'blue', label='Flux')
plot(velocity, g(velocity), color = 'red', label='Gaussian')
plot(velocity, g_residuals, color = 'green', label='Residuals')
interpolate_line_profile = interp1d(velocity, flux, kind='cubic', bounds_error=False) #Interpolate line profile
plot(interp_velocity_grid, interpolate_line_profile(interp_velocity_grid), color='blue', label='Interpolation')
legend()
pdf.savefig()
#print('mean = ', g_mean)
#print('stddev = ', g_stddev)
#print('FWHM = ', g_fwhm)
goodfit = fwhm > 0.
clf()
plot(parsed_line_list.wave[goodfit], fwhm[goodfit], 'o')
title('Order = '+str(order_count))
xlabel('Wavelength')
ylabel('FWHM')
xlim([flat_nanmin(order.wave), flat_nanmax(order.wave)])
pdf.savefig()
clf()
plot(x_pixels[goodfit], fwhm[goodfit], 'o')
xlim([0, len(order.wave)])
title('Order = '+str(order_count))
xlabel('x pixel')
ylabel('FWHM')
pdf.savefig()
all_x_pixels = concatenate([all_x_pixels, x_pixels[goodfit]])
all_wave = concatenate([all_wave, parsed_line_list.wave[goodfit]])
all_fwhm = concatenate([all_fwhm, fwhm[goodfit]])
order_count = order_count + 1
clf()
plot(all_wave, all_fwhm, 'o')
title('ALL ORDERS')
xlabel('Wavelength')
ylabel('FWHM')
pdf.savefig()
clf()
plot(all_x_pixels, all_fwhm, 'o')
title('ALL ORDERS')
xlabel('x pixel')
ylabel('FWHM')
pdf.savefig()
print('Number of lines with decent Gaussian fits = ', len(all_fwhm))
print('All Lines Median FWHM = ', median(all_fwhm))
print('All Lines Mean FWHM = ', mean(all_fwhm))
print('All Lines std-dev FWHM = ', std(all_fwhm))
def c_deredden(self, c_value): #Deredden spectrum with a value of "c" measured for H-beta from the literature, while assuming the extinction law of Rieke & Lebofsky (1985)
#A_lambda = array([1.531, 1.324, 1.000, 0.748, 0.482, 0.282, 0.175, 0.112, 0.058]) #(A_lambda / A_V) extinction curve from Rieke & Lebofsky (1985) Table 3
#l = array([ 0.365, 0.445, 0.551, 0.658, 0.806, 1.22 , 1.63 , 2.19 , 3.45 ]) #Wavelengths for extinction curve from Rieke & Lebofsky (1985)
#extinction_curve = interp1d(l, A_lambda, kind='quadratic') #Create interpolation object for extinction curve from Rieke & Lebofsky (1985)
A_V = 0.83446 * 2.5 * c_value #Calcualte A_V from c(h-beta), use linearly interolated A_V/A_hbeta from Rieke & Lebofsky (1985)
A_K = 0.118 * A_V #Convert A_V to A_K from Fitspatrick (1998)
#a = 2.14 #extinction curve in the form of a power law from Stead and Hoare (2009)
a = 1.8
A_lambda = A_K * self.combospec.wave**(-a) / 2.19**(-a) #Calculate an extinction correction
#h.F *= 10**(0.4*A_lambda) #Apply extinction correction
#dereddening = 10**(0.4*extinction_curve(self.combospec.wave)*A_V) #Calculate dereddening as a function of wavelength
#self.combospec.flux = self.combospec.flux * dereddening #Apply dereddening to flux and noise
#self.combospec.noise = self.combospec.noise * dereddening
self.combospec.flux = self.combospec.flux * 10**(0.4*A_lambda) #Apply dereddening to flux and noise
self.combospec.noise = self.combospec.noise * 10**(0.4*A_lambda)
#Class to store and analyze a 2D spectrum
class spec2d:
def __init__(self, fits_wave, fits_spec, fits_var=[], mask_cosmics=False, interpolate_slit=False):
try: #First try to see if the wavelength data is from a fits file
wavedata = fits_wave.get() #Grab fits data for wavelength out of object, first try as if it were a fits object
except: #If it is not a fits object, say something read out of a json file..
wavedata = array(fits_wave) #Just copy over the data and get on with it spec2d = fits_spec.get() #grab all fits data
spec2d = fits_spec.get() #grab all fits data
var2d = fits_var.get() #Grab all variance data from fits file
n_orders = fits_spec.n_orders
#n_orders = len(spec2d[1].data[:,0]) #Calculate number of orders to use
#slit_pixel_length = len(spec2d[0].data[0,:,:]) #Height of slit in pixels for this target and band
# if interpolate_slit:
# slit_pixel_length = 500 #Height of slit in pixels if we reinterpolate the slit onto a common grid
# else:
# slit_pixel_length = slit_length #Height of slit in pixels for this target and band
slit_pixel_length = slit_length #Height of slit in pixels for this target and band
orders = [] #Set up empty list for storing each orders
#wavedata = wavedata[0].data.byteswap().newbyteorder()
for i in range(n_orders):
#wave1d = spec2d[1].data[i,:].byteswap().newbyteorder() #Grab wavelength calibration for current order
wave1d = wavedata[i,:] #Grab wavelength calibration for current order
#wave2d = tile(wave1d, [slit_pixel_length,1]) #Create a 2D array storing the wavelength solution, to be appended below the data
#nx, ny, nz = shape(spec2d[0].data.byteswap().newbyteorder())
#data2d = spec2d[0].data[i,ny-slit_pixel_length-1:ny-1,:].byteswap().newbyteorder() #Grab 2D Spectrum of current order
nx, ny, nz = shape(spec2d)
# zero_mask = zeros([ny,nz]) #Find any bad pixels/Cosmic-rays zeroed out by PLP
# zero_mask[spec2d[i,:,:]==0.] = 1.0
# zero_mask[roll(spec2d[i,:,:],1,axis=0)==0.] = 1.0 #...along with adjacent pixels...
# zero_mask[roll(spec2d[i,:,:],-1,axis=0)==0.] = 1.0
# zero_mask[roll(spec2d[i,:,:],1,axis=1)==0.] = 1.0
# zero_mask[roll(spec2d[i,:,:],-1,axis=1)==0.] = 1.0
# spec2d[i,:,:][zero_mask==1.0] = nan #...and turn them into nans, so I don't accidently subtract flux during continuum subtraction (and other possible glitches)
# var2d[i,:,:][zero_mask==1.0] = nan
if interpolate_slit or ny!=slit_pixel_length: #If user specifies interpoalting or the slit size does not match the slit size set by the user, interpolate the darn thing
data2d = regrid_slit(spec2d[i,:,:], size=slit_pixel_length)
noise2d = regrid_slit(var2d[i,:,:]**0.5, size=slit_pixel_length)
print('Slit pixel length does not match, interpolate to fix it!')
else: #Or just read it in, super simple right?
data2d = spec2d[i,:,:]
noise2d = var2d[i,:,:]**0.5
# else:
# data2d = spec2d[i,ny-slit_pixel_length-1:ny-1,:]
# #data2d = spec2d[0].data[i,ny-slit_pixel_length-1:ny-1,:].byteswap().newbyteorder() #Grab 2D Spectrum of current order
# #data2d = spec2d[0].data[i,0:slit_pixel_length,:].byteswap().newbyteorder() #Grab 2D Spectrum of current order
# #noise2d = sqrt( var2d[0].data[i,0:slit_pixel_length,:].byteswap().newbyteorder() ) #Grab 2D variance of current order and convert to noise with sqrt(variance)
# #noise2d = sqrt( var2d[0].data[i,ny-slit_pixel_length-1:ny-1,:].byteswap().newbyteorder() ) #Grab 2D variance of current order and convert to noise with sqrt(variance)
# noise2d = sqrt(var2d[i,ny-slit_pixel_length-1:ny-1,:])
if mask_cosmics: #If user specifies to filter out cosmic rays
#data2d_vert_sub = data2d - nanmedian(data2d, 0) #subtract vertical spectrum to get rid of sky lines and other junk
#cosmics_found = (abs( (data2d/robust_median_filter(data2d,size=cosmic_horizontal_mask))-1.0) >cosmic_horizontal_limit) & (abs(data2d/noise2d) > cosmic_s2n_min) #Find cosmics where the signal is 100x what is expected from a 3x3 median filter
cosmics_found = (abs( (data2d/median_filter(data2d,size=cosmic_horizontal_mask))-1.0) >cosmic_horizontal_limit) & (abs(data2d/noise2d) > cosmic_s2n_min) #Find cosmics where the signal is 100x what is expected from a 3x3 median filter
data2d[cosmics_found] = nan #And blank the cosmics out
noise2d[cosmics_found] = nan
orders.append( spectrum(wave1d, data2d, noise = noise2d) )
self.orders = orders
self.n_orders = n_orders
self.slit_pixel_length = slit_pixel_length
#This function applies continuum and background subtraction to one order
def old_subtract_continuum(self, show = False, size = half_block, lines=[], vrange=[-10.0,10.0], linear_fit=False, mask_outliers=False, use_combospec=False, split_trace=False):
if linear_fit: #WARNING EXPERIMENTAL, If a linear fit, initialize the polynomial fitting routines
p_init = models.Polynomial1D(degree=2)
fit_p = fitting.SimplexLSQFitter()
N = 16 #Divide the order into N segments and calculate the tace along each
set_size = 2048 / N #Size of each segment to find median of
median_set = zeros([N, self.slit_pixel_length]) #Initialize variable to hold the median set
x_for_median_set = arange(0, 2048, set_size) + (set_size/2) #Calculate x pixels for the linear fit for the continuum
x = arange(2048)
if use_combospec: #If user specifies to use combined spectrum
orders = [self.combospec] #Use the combined spectrum
else: #Else use each order seperately
orders = self.orders
for order in orders: #Apply continuum subtraction to each order seperately
#if sum(isnan(order.flux)) / size(order.flux) < 0.8: #Only try to subtract the continuum if at least 80% of the pixels exist
#print('order = ', i, 'number of dimensions = ', num_dimensions)
old_order = copy.deepcopy(order)
flux_length = shape(old_order.flux)[1] #Get length (in wavelength space) of flux array
old_order.flux[nansum(isnan(old_order.flux), axis=1).astype('float')/float(flux_length) > 0.5, :] = 0. #If an entire row is majority nan, zero it out
if lines != []: #If user supplies a line list
old_order = mask_lines(old_order, lines, vrange=vrange, ndim=2) #Mask out lines with nan with some velocity range, before applying continuum subtraction
#stop()
if use_combospec: #If user wants to use the whole combined spectrum, make seperate traces for H & K bands
h_band = old_order.wave < 1.85
k_band = old_order.wave > 1.85
h_trace = nanmedian(old_order.flux[:,h_band], axis=1) #Get trace of continuum from median of h-band
k_trace = nanmedian(old_order.flux[:,k_band], axis=1) #Get trace of continuum from median of k-band
max_y = where(h_trace == flat_nanmax(h_trace))[0][0] #Find peak of trace
norm_h_trace = h_trace / median(h_trace[max_y-1:max_y+1]) #Normalize trace
max_y = where(k_trace == flat_nanmax(k_trace))[0][0] #Find peak of trace
norm_k_trace = k_trace / median(k_trace[max_y-1:max_y+1]) #Normalize trace
#norm_h_trace[isnan(norm_h_trace)] = 0. #Zero out nans in trace incase an entire row in the spectrum is nans
#norm_k_trace[isnan(norm_k_trace)] = 0. #Zero out nans in trace incase an entire row in the spectrum is nans
else:
if split_trace: #If the trace varies siginificantly across orders (for whatever reason), you can try to split the trace and then average the results, this might give a better continuum subtraction
half_way_point = shape(old_order.flux)[1]/2
trace = 0.5*(nanmedian(old_order.flux[:,0:half_way_point], axis=1) + nanmedian(old_order.flux[:,half_way_point:]))
else: #Or else just use the whole order to find the trace, this is the default
trace = nanmedian(old_order.flux, axis=1) #Get trace of continuum from median of whole order
trace[isnan(trace)] = 0.0 #Set nan values near edges to zero
max_y = where(trace == flat_nanmax(trace))[0][0] #Find peak of trace
norm_trace = trace / median(trace[max_y-1:max_y+1]) #Normalize trace
#norm_trace[isnan(norm_trace)] = 0. #Zero out nans in trace incase an entire row in the spectrum is nans
if mask_outliers: #mask columns that deviate significantly from the median continuum trace (ie. emission lines, cosmics, ect.), if the user so desires
normalize_order_by_column = old_order.flux / expand_dims(nanmedian(old_order.flux, axis=0), axis=0) #Normalize each column
if use_combospec: #If user is using the combined spectrum to calculate the h and k band traces seperately
norm_trace = (norm_h_trace + norm_k_trace) / 2.0 #Simply average the two traces together and use that to find outliers
divide_normalized_order_by_trace = normalize_order_by_column / expand_dims(norm_trace, axis=1) #Divide the normalized columns by the normalized trace
deviant_pixels = abs(divide_normalized_order_by_trace) > 200.0 #Find pixels that significantly deviate from the trace, this would be anything from emission lines to high noise
find_deviant_columns = sum(deviant_pixels, axis=0) > 10 #Find columns with only a few deviant pixels
old_order.flux[:, find_deviant_columns] = nan #Mask out deviant pixels
#trace_order = ones([61,2048])*expand_dims(trace, axis = 1)
#flatten_order_by_trace = old_order.flux / trace_order
#set_each_column_to_be_unity = flatten_order_by_trace / nanmedian(flatten_order_by_trace, axis=0)
#old_order.flux[isnan(old_order.flux)] = 0. #Zero out nans when normalizing the flux to get rid of some annoying errors
if linear_fit: ##WARNING EXPERIMENTAL, If user wants to use a line fit of the trace along the x direction
for i in range(N): #Loop through each segment
median_set[i,:] = nanmedian(old_order.flux[:,set_size*i: set_size*(i+1)-1], axis=1) #Find trace of this single segment
normalized_median_set = nanmax(median_set, axis=1) #Normalize the median sets by the trace and then collapse result along slit
finite_pixels = isfinite(normalized_median_set)
p = fit_p(p_init, x_for_median_set[finite_pixels], normalized_median_set[finite_pixels])
result_2d = p(x) * expand_dims(trace, axis=1)
# p_init = models.Polynomial1D(degree=4)
# fit_p = fitting.SimplexLSQFitter()
# nx = len(old_order.flux[0,:])
# ny = len(old_order.flux[:,0])
# x = arange(nx)
# result_2d = zeros([ny,nx])
# for row in range(ny):
# if any(isfinite(old_order.flux[row,:])):
# #stop()
# p = fit_p(p_init, x, old_order.flux[row,:])
# result_2d[row,:] = p(x)
subtracted_flux = order.flux - result_2d
elif use_combospec: #If user wants to use the whole combined spectrum, make seperate traces for H & K bands
subtracted_flux = zeros(shape(old_order.flux))
#Do H-band
median_result_1d = robust_median_filter(old_order.flux[max_y-1:max_y+1, h_band], size = size) #Take a robust running median along the trace
median_result_2d = norm_h_trace * expand_dims(median_result_1d, axis = 1) #Expand trace into 2D by multiplying by the robust median
median_result_2d = median_result_2d.transpose() #Flip axes to match flux axes
subtracted_flux[:,h_band] = order.flux[:,h_band] - median_result_2d #Apply continuum subtraction
#Do K-band
median_result_1d = robust_median_filter(old_order.flux[max_y-1:max_y+1, k_band], size = size) #Take a robust running median along the trace
median_result_2d = norm_k_trace * expand_dims(median_result_1d, axis = 1) #Expand trace into 2D by multiplying by the robust median
median_result_2d = median_result_2d.transpose() #Flip axes to match flux axes
subtracted_flux[:,k_band] = order.flux[:,k_band] - median_result_2d #Apply continuum subtraction
else: #If user wants to use running median filter
median_result_1d = robust_median_filter(old_order.flux[max_y-1:max_y+1, :], size = size) #Take a robust running median along the trace
median_result_2d = norm_trace * expand_dims(median_result_1d, axis = 1) #Expand trace into 2D by multiplying by the robust median
median_result_2d = median_result_2d.transpose() #Flip axes to match flux axes
subtracted_flux = order.flux - median_result_2d #Apply continuum subtraction
order.flux = subtracted_flux
#if show: #Display subtraction in ds9 if user sets show = True
#if num_dimensions == 2:
#show_file = fits.PrimaryHDU(cont_sub.combospec.flux) #Set up fits file object
#show_file.writeto(scratch_path + 'test_contsub_median.fits', overwrite = True) #Save fits file
#show_file = fits.PrimaryHDU(old_sci.combospec.flux) #Set up fits file object
#show_file.writeto(scratch_path + 'test_contsub_before.fits', overwrite = True) #Save fits file
#show_file = fits.PrimaryHDU(sci.combospec.flux) #Set up fits file object
#show_file.writeto(scratch_path + 'test_contsub_after.fits', overwrite = True) #Save fits file
#ds9.open()
#ds9.show(scratch_path + 'test_contsub_before.fits')
#ds9.show(scratch_path + 'test_contsub_median.fits', new = True)
#ds9.show(scratch_path + 'test_contsub_after.fits', new = True)
#ds9.set('zoom to fit')
#ds9.set('scale log') #Set view to log scale
#ds9.set('scale ZScale') #Set scale limits to ZScale, looks okay
#ds9.set('lock scale')
#ds9.set('lock colorbar')
#ds9.set('frame lock image')
#wait()
##ds9.close()
#elif num_dimensions == 1:
#clf()
#plot(sci.combospec.wave, sci.combospec.flux, label='Science Target - Continuum Subtracted')
#plot(old_sci.combospec.wave, old_sci.combospec.flux, label='Science Target - Continuum Not Subtracted')
#plot(cont_sub.combospec.wave, cont_sub.combospec.flux, label='Continuum Subtraction')
#legend()
#else:
#print('ERROR: Unable to determine number of dimensions of data, something went wrong')
def subtract_continuum(self, lines=[], vrange=[-50,50], show = False, size=0, sizes=[501], use_combospec=False): #Subtract continuum and background with an iterative running median
if size != 0:
sizes = [size]
if use_combospec: #If user specifies to use combined spectrum
orders = [self.combospec] #Use the combined spectrum
else: #But is usually better to use individual orders instead
orders = self.orders
for order in orders: #Apply continuum subtraction to each order seperately
if lines != []: #If user supplies a line list
order_copy = mask_lines(copy.deepcopy(order), lines, vrange=vrange, ndim=2) #Mask out lines with nan with some velocity range, before applying continuum subtraction
flux = order_copy.flux
else:
flux = copy.deepcopy(order.flux)
whole_order_trace = nanmedian(flux, axis=1)
whole_order_trace[~isfinite(whole_order_trace)] = 0. #Zero out nans or infinities or other wierd things
flux = flux - whole_order_trace[:,newaxis] #Do an intiial removal of the flux
ny, nx = shape(flux)
trace = zeros(shape(flux)) + whole_order_trace[:,newaxis]
for size in sizes:
if size%2 == 0: size = size + 1 #Get rid of even sizes
half_sizes = array([-(size-1)/2, ((size-1)/2)+1], dtype='int')
unmodified_flux = copy.deepcopy(flux)
for i in range(nx):
x_left, x_right = i + half_sizes
if x_left < 0:
x_left = 0
elif x_right > nx:
x_right = nx
trace[:,i] += nanmedian(unmodified_flux[:,x_left:x_right], axis=1)
trace[~isfinite(trace)] = 0. #Zero out nans or infinities or other wierd things
order.flux -= trace
def test_fast_subtract_continuum(self, show = False, size=0, sizes=[501], use_combospec=False): #Subtract continuum and background with an iterative running median
if size != 0:
sizes = [size]
if use_combospec: #If user specifies to use combined spectrum
orders = [self.combospec] #Use the combined spectrum
else: #But is usually better to use individual orders instead
orders = self.orders
for order in orders: #Apply continuum subtraction to each order seperately
flux = copy.deepcopy(order.flux)
ny, nx = shape(flux)
whole_order_trace = nanmedian(flux, axis=1)
whole_order_trace[~isfinite(whole_order_trace)] = 0. #Zero out nans or infinities or other wierd things
flux = flux - whole_order_trace[:,newaxis] #Do an intiial removal of the flux
for size in sizes:
block_of_nans = empty([ny, size])
block_of_nans[:] = nan
flux = hstack([block_of_nans, flux, block_of_nans])
indicies = (arange(size)-(size/2))[:,newaxis] + (arange(nx) + size) #create a giant 2d array for indexes
flux -= nanmedian(hstack([block_of_nans, flux, block_of_nans])[newaxis, indicies], axis=1)[size:nx,:]
#flux -= median_filter(flux, size=[1,size], mode='reflect')
order.flux = flux[size:s]
def fill_nans(self, size=5): #Fill nans with median of nearby pixels in same column
#ny = self.slit_pixel_length
ny, nx = shape(self.combospec.flux)
half_sizes = array([-(size-1)/2, ((size-1)/2)+1], dtype='int')
#is_nan = ~isfinite(self.combospec.flux)
for i in range(nx):
current_column_flux = copy.deepcopy(self.combospec.flux[:,i])
current_column_noise = copy.deepcopy(self.combospec.noise[:,i])
#use_pixels = ~is_nan[y1:y2,j]
for j in range(ny):
if ~isfinite(current_column_flux[j]):
y1, y2 = j + half_sizes #Get top and bottom indicies
if y1 < 0: y1=0
if y2 > ny: y2 = ny
current_column_flux[j] = nanmedian(self.combospec.flux[y1:y2,i])
current_column_noise[j] = nanmedian(self.combospec.noise[y1:y2,i])
self.combospec.flux[:,i] = current_column_flux
self.combospec.noise[:,i] = current_column_noise
# def fill_nans(self, size=5): #Fill nans and empty edge pixels with a nanmedian filter of a given size on a column by column basis, done with the combined spectrum combospec
# ny = self.slit_pixel_length
# half_sizes = array([-(size-1)/2, ((size-1)/2)+1], dtype='int')
# #half_size = (size-1)/2 #Get +/- number of pixels for the size
# for i in range(ny):
# #y1, y2 = i - half_size, i+half_size #Get top and bottom indicies
# y1, y2 = i + half_sizes #Get top and bottom indicies
# if y1 < 0: y1=0
# if y2 > ny: y2 = ny
# nanmedian_row_flux = nanmedian(self.combospec.flux[y1:y2, :], axis=0) #Grab nanmedian flux and variance values for current row
# nanmedian_row_var = nanmedian((self.combospec.noise[y1:y2, :])**2, axis=0)
# find_nans = ~isfinite(self.combospec.flux[i, :]) #Locate holes to be filled
# self.combospec.flux[i, :][find_nans] = nanmedian_row_flux[find_nans] #Fill the holes with the median filter values
# self.combospec.noise[i, :][find_nans] = nanmedian_row_var[find_nans]**0.5
def subtract_median_vertical(self, use_edges=0, use_range=0): #Try to subtract OH residuals and other sky junk by median collapsing along slit and subtracting result. WARNING: ONLY USE FOR POINT OR SMALL SOURCES!
for i in range(self.n_orders-1): #Loop through each order
if use_edges > 0: #If user specifies using edges, use this many pixels from the edge on each side for median collapse
edges =concatenate([arange(use_edges), self.slit_pixel_length - arange(use_edges) -1])
median_along_slit = nanmedian(self.orders[i].flux[edges,:], axis=0) #Collapse median along slit
elif use_range != 0: #If user specifies a range of pixels to use along the slit (low values start at bottom)
median_along_slit = nanmedian(self.orders[i].flux[use_range[0]:use_range[1], :], axis=0)
else: #Else just median collapse the whole slit
median_along_slit = nanmedian(self.orders[i].flux, axis=0) #Collapse median along slit
self.orders[i].flux -= tile(median_along_slit, [self.slit_pixel_length,1]) #Subtract the median
def combine_orders(self, wave_pivot = default_wave_pivot): #Sitch orders together into one long spectrum
combospec = copy.deepcopy(self.orders[0]) #Create a spectrum object to append wavelength and flux to
[order_height, order_length] = shape(combospec.flux)
blank = zeros([order_height, order_length*self.n_orders])#Create blanks to store new giant spectrum
combospec.flux = copy.deepcopy(blank) #apply blanks to everything
combospec.wave = zeros(order_length*self.n_orders)
combospec.noise = copy.deepcopy(blank)
#combospec.s2n = copy.deepcopy(blank)
for i in range(self.n_orders-1, -1, -1): #Loop through each order to stitch one and the following one together
if i == self.n_orders-1: #If first order, simply throw it in
xl = 0
xr = order_length
#goodpix_next_order = self.orders[i].wave[0,:] > 0.
goodpix_next_order = self.orders[i].wave > 0.
else: #Else find the wave pivots
[low_wave_limit, high_wave_limit] = [flat_nanmin(self.orders[i].wave), combospec.wave[xr-1]] #Find the wavelength of the edges of the already stitched orders and the order currently being stitched to the rest
wave_cut = low_wave_limit + wave_pivot*(high_wave_limit-low_wave_limit) #Find wavelength between stitched orders and order to stitch to be the cut where they are combined, with pivot set by global var wave_pivot
#goodpix_combospec = combospec.wave >= wave_cut #Find pixels in already stitched orders to the left of where the next order will be cut and stitched to
goodpix_next_order = self.orders[i].wave > wave_cut #Find pixels to the right of the where the order will be cut and stitched to the rest
#nx = len(self.orders[i].wave[:goodpix_next_order]) #Count number of pixels to add to the blanks
if combospec.wave[xr-1] > wave_cut:
xl = where(combospec.wave > wave_cut)[0][0]-1 #Set left pixel to previous right pixel
else:
xl = xr-1
xr = xl + len(self.orders[i].wave[goodpix_next_order])
combospec.wave[xl:xr] = self.orders[i].wave[goodpix_next_order] #Stitch wavelength arrays together
combospec.flux[:, xl:xr] = self.orders[i].flux[:, goodpix_next_order] #Stitch flux arrays together
combospec.noise[:, xl:xr] = self.orders[i].noise[:, goodpix_next_order] #Stitch noise arrays together
#combospec.s2n[:, xl:xr] = self.orders[i].s2n[:, goodpix_next_order] #Stitch S/N arrays together
combospec.wave = combospec.wave[0:xr] #Get rid of extra pixels at end of arrays
combospec.flux = combospec.flux[:,0:xr]
combospec.noise = combospec.noise[:,0:xr]
#combospec.s2n = combospec.s2n[:,0:xr]
self.combospec = combospec #save the orders all stitched together
def plot(self, spec_lines='', pause = False, close = False, s2n = False, label_OH = True, num_wave_labels = 50):
if not hasattr(self, 'combospec'): #Check if a combined spectrum exists
print('No spectrum of combined orders found. Createing combined spectrum.')
self.combine_orders() #If combined spectrum does not exist, combine the orders
wave_fits = fits.PrimaryHDU(tile(self.combospec.wave, [self.slit_pixel_length,1])) #Create fits file containers
if s2n: #If you want to view the s2n
spec_fits = fits.PrimaryHDU(self.combospec.s2n())
else: #You will view the flux
spec_fits = fits.PrimaryHDU(self.combospec.flux)
wave_fits.writeto(save.path + 'longslit_wave.fits', overwrite=True) #Save temporary fits files for later viewing in DS9
spec_fits.writeto(save.path + 'longslit_spec.fits', overwrite=True)
ds9.open() #Display spectrum in DS9
self.make_label2d(spec_lines, label_lines = True, label_wavelength = True, label_OH = label_OH, num_wave_labels = num_wave_labels) #Label 2D spectrum,
ds9.show(save.path + 'longslit_wave.fits', new=False)
self.show_labels() #Load labels
ds9.show(save.path + 'longslit_spec.fits', new=True)
self.show_labels() #Load labels
ds9.set('zoom to fit')
ds9.set('scale log') #Set view to log scale
ds9.set('scale ZScale') #Set scale limits to Zscale, looks okay
ds9.set('frame lock image')
#Pause for viewing if user specified
if pause:
wait()
#Close DS9 after viewing if user specified (pause should be true or else DS9 will open then close)
if close:
ds9.close()
#Function for labeling up 2D spectrum in DS9, creates a region file storing all the labels and than reads it into, called by show()
def make_label2d(self, spec_lines='', label_lines = True, label_wavelength = True, label_OH = True, num_wave_labels = 50):
regions = [] #Create list to store strings for creating a DS9 region file
wave_pixels = self.combospec.wave #Extract 1D wavelength for each pixel
x = arange(len(wave_pixels)) + 1.0 #Number of pixels across detector
min_wave = flat_nanmin(wave_pixels) #Minimum wavelength
max_wave = flat_nanmax(wave_pixels) #maximum wavelength
#wave_interp = interp1d(x, wave_pixels, kind = 'linear') #Interpolation for inputting pixel x and getting back wavelength
x_interp = interp1d(wave_pixels, x, kind = 'linear', bounds_error=False) #Interpolation for inputting wavlength and getting back pixel x
top_y = str(self.slit_pixel_length)
bottom_y = '0'
label_y = str(1.25*self.slit_pixel_length)
oh_label_y = str(-0.375*self.slit_pixel_length)
#x_correction = 2048*(n_orders-i-1) #Push label x position to correct place depending on order
if label_wavelength: #Label wavelengths
interval = (max_wave - min_wave) / num_wave_labels #Interval between each wavelength label
wave_labels = arange(min_wave, max_wave, interval) #Store wavleengths of where wave labels are going to go
x_labels = x_interp(wave_labels) #Grab x positions of the wavelength labels
for j in range(num_wave_labels): #Label the wavelengths #Loop through each wavlength label\
x_label = str(x_labels[j])
regions.append('image; line(' + x_label +', '+ top_y + ', ' + x_label + ', ' + bottom_y + ' ) # color=blue ')
regions.append('image; text('+ x_label +', '+label_y+') # color=blue textangle=90 text={'+str("%12.5f" % wave_labels[j])+'}')
if label_OH: #Label OH lines
OH_lines = lines(OH_line_list, delta_v=0.0) #Load OH line list
show_lines = OH_lines.parse(min_wave, max_wave) #Only grab lines withen the wavelength rang
num_OH_lines = len(show_lines.wave)
x_labels = x_interp(show_lines.wave)
#labels_x of lines to display
for j in range(num_OH_lines): #Label the lines
x_label = str(x_labels[j])
regions.append('image; line(' + x_label +', '+ top_y + ', ' + x_label + ', ' + bottom_y + ' ) # color=green ')
regions.append('image; text('+ x_label +', '+oh_label_y+') # color=green textangle=90 text={OH}')
if label_lines and spec_lines != '': #Label lines from a line list
show_lines = spec_lines.parse(min_wave, max_wave) #Only grab lines withen the wavelength range of the current order
num_lines = len(show_lines.wave)
x_labels = x_interp(show_lines.wave)
#number of lines to display
for j in range(num_lines): #Label the lines
x_label = str(x_labels[j])
regions.append('image; line(' + x_label +', '+ top_y + ', ' + x_label + ', ' + bottom_y + ' ) # color=red ')
regions.append('image; text('+ x_label +', '+label_y+') # color=red textangle=90 text={'+show_lines.label[j]+'}')
region_file_path = save.path + '2d_labels.reg'
savetxt(region_file_path, regions, fmt="%s") #Save region template file for reading into ds9
#ds9.set('regions ' + region_file_path)
def show_labels(self): #Called by show() to put line labels in DS9
region_file_path = save.path + '2d_labels.reg'
ds9.set('regions ' + region_file_path)
# def s2n(self): #Estimate noise per pixel
# s2n_obj = copy.deepcopy(self)
# for i in range(self.n_orders): #Loop through each order
# median_flux = robust_median_filter(self.orders[i].flux, size=4) #median smooth by four pixels, about the specral & spatial resolution
# random_noise = abs(self.orders[i].flux - median_flux) #Subtract flux from smoothed flux, this should give back the noise
# total_noise = sqrt(random_noise**2 + abs(self.orders[i].flux)) #Calculate S/N from measured random noise and from poisson noise from signal
# s2n = self.orders[i].flux / total_noise
# s2n_obj.orders[i].flux = s2n
# return s2n_obj
def deredden(self, A_V): #Deredden spectrum with an assumed A_V, and the extinction curve from Rieke & Lebofsky (1985) Table 3 (see "redden" definition)
if not hasattr(self, 'combospec'): #Check if a combined spectrum exists
print('No spectrum of combined orders found. Createing combined spectrum.')
self.combine_orders() #If combined spectrum does not exist, combine the orders
R = 3.09 #Assume a Milky way like dust
E_BV = (-A_V) / R #Calcualte reverse E_BV, by taking the negative of the known A_V
V = 0. #For dereddening, we reverse redden everything, assuming V mag =0
B = E_BV #and assuming B mag is the difference between V and B magnitudes E(B-V)
self.combospec.flux = redden(B, V, self.combospec.wave, self.combospec.flux) #Artificially deredden flux
self.combospec.noise = redden(B, V, self.combospec.wave, self.combospec.noise) #Artificially scale noise to match S/N of dereddened flux
def c_deredden(self, c_value): #Deredden spectrum with a value of "c" measured for H-beta from the literature, while assuming the extinction law of Rieke & Lebofsky (1985)
#A_lambda = array([1.531, 1.324, 1.000, 0.748, 0.482, 0.282, 0.175, 0.112, 0.058]) #(A_lambda / A_V) extinction curve from Rieke & Lebofsky (1985) Table 3
#l = array([ 0.365, 0.445, 0.551, 0.658, 0.806, 1.22 , 1.63 , 2.19 , 3.45 ]) #Wavelengths for extinction curve from Rieke & Lebofsky (1985)
#extinction_curve = interp1d(l, A_lambda, kind='quadratic') #Create interpolation object for extinction curve from Rieke & Lebofsky (1985)
#A_V = 0.83446 * 2.5 * c_value #Calcualte A_V from c(h-beta), use linearly interolated A_V/A_hbeta from Rieke & Lebofsky (1985)
A_V = 2.387 * c_value #Calcualte A_V from c(h-beta), use value given on page 179 of Osterbrock & Ferland 2nd ed at the end of the first paragraph.
A_K = 0.118 * A_V #Convert A_V to A_K from Fitspatrick (1998)
#a = 2.14 #extinction curve in the form of a power law from Stead and Hoare (2009)
a = 1.8
A_lambda = A_K * self.combospec.wave**(-a) / 2.19**(-a) #Calculate an extinction correction
#h.F *= 10**(0.4*A_lambda) #Apply extinction correction
#dereddening = 10**(0.4*extinction_curve(self.combospec.wave)*A_V) #Calculate dereddening as a function of wavelength
#self.combospec.flux = self.combospec.flux * dereddening #Apply dereddening to flux and noise
#self.combospec.noise = self.combospec.noise * dereddening
self.combospec.flux = self.combospec.flux * 10**(0.4*A_lambda) #Apply dereddening to flux and noise
self.combospec.noise = self.combospec.noise * 10**(0.4*A_lambda)
#Generic class for storing a single spectrum, either an order or long spectrum, 1D or 2D
#This serves as the basis for all spectrum objects in this code
class spectrum:
def __init__(self, wave, flux, noise=[]): #Initialize spectrum by reading in two columned file
self.wave = wave #Set up wavelength array
self.flux = flux #Set up flux array
#if "noise" in locals(): #If user specifies a noise
self.noise_stored = len(noise) >= 1 #Store boolean if noise array actually exists
if self.noise_stored:
self.noise = noise #Save it like flux
#self.s2n = flux / noise
else:
self.noise = zeros(shape(flux))
#self.s2n = zeros(shape(flux))
def s2n(self):
if self.noise_stored: #If noise is stored
return self.flux/self.noise #Return proper S/N
else: #But if no noise is stored
return zeros(shape(self.flux)) #Return an array of zeros for S/N
def fill_holes(self, ylimit=10, xlimit=3): #Fill nan values with a 3x3 median filter, with the edges avoided by setting ylimit or xlimit
xsize, ysize = shape(self.flux) #grab size of 2D flux array
sub_flux = self.flux[xlimit:xsize-xlimit, ylimit:ysize-ylimit] #grab subset of that array with the edges trimmed off
filtered_flux = median_filter(self.flux, size=[2,2])[xlimit:xsize-xlimit, ylimit:ysize-ylimit] #Create median filtered data to peg the nan holes with
find_nans = isnan(sub_flux) #Find the holes
sub_flux[find_nans] = filtered_flux[find_nans] #Fill in the nan holes with the median filtered data, and our job is now done
def median_smooth(self, size=[3,3]): #Experimental feature to median smooth a spectrum to (presumeably) get rid of annoying artifacts
self.flux = median_filter(self.flux, size=size) #and smooth it, that's it! no more too it really
#~~~~~~~~~~~~~~~~~~~~~~~~~Code for dealing with lines and line lists~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Class for storing line list
class lines:
def __init__(self, files, delta_v=0.0, list_dir='line_lists/'): #Initialize line list by providing file in format of wavleength <tab> line label
if size(files) == 1: #If only one line list is inputted, put single list into array
files = [files]
delta_v = [delta_v]
lab_wave = array([], dtype='f') #Set up array for "lab frame" line wavelengths ie. delta-v = 0
wave = array([], dtype='f') #Set up array for line wavelengths
label = array([], dtype='a')
count = 0
for file in files: #Load multiple lists if needed
if file != 'none':
input_wave = loadtxt(list_dir+file, unpack=True, dtype='f', delimiter='\t', usecols=(0,)) #Read in line list wavelengths
input_label = loadtxt(list_dir+file, unpack=True, dtype='U', delimiter='\t', usecols=(1,)) #Read in line list labels
new_wave = input_wave + input_wave*(delta_v[count]/c) #Shift a line list by some delta_v given by the user
lab_wave = append(lab_wave, input_wave) #Save wavelengths in the lab frame as well, for later
wave = append(wave, new_wave) #Add lines from one list to the (new) wavelength array
label = append(label, input_label) #Add lines from one list to the label array
count = count + 1
sorted = argsort(wave) #sort lines by wavelength
self.lab_wave = lab_wave[sorted]
self.wave = wave[sorted]
self.label = label[sorted]
def parse(self, min_wave, max_wave): #Simple function for grabbing only lines with a certain wavelength range
subset = copy.deepcopy(self) #Make copy of this object to parse
found_lines = (subset.wave > min_wave) & (subset.wave < max_wave) & (abs(subset.wave - 1.87) > 0.062) #Grab location of lines only in the wavelength range, while avoiding region between H & K bands
subset.lab_wave = subset.lab_wave[found_lines] #Filter out lines outside the wavelength range
subset.wave = subset.wave[found_lines]
subset.label = subset.label[found_lines]
return subset #Returns copy of object but with only found lines
def recalculate_wavelengths(self, delta_v): #Recalculate the observed wavelengths from the lab wavelengths given a new delta_v
self.wave = self.lab_wave * (1.0 + (delta_v/c))
#~~~~~~~~~~~~~~~~~~~~~~~~Do a robost running median filter that ignores nan values and outliers, returns result in 1D~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#@jit #Compile Just In Time with numba
def robust_median_filter(input_flux, size = half_block):
if size%2 == 0: size = size+1 #Make even results odd
half_sizes = array([-(size-1)/2, ((size-1)/2)+1], dtype='int')
flux = copy.deepcopy(input_flux)
if ndim(flux) == 2: #For 2D spectrum
ny, nx = shape(flux) #Calculate npix in x and y
else: #Else for 1D spectrum
nx = len(flux) #Calculate npix
median_result = zeros(nx) #Create array that will store the smoothed median spectrum
if ndim(flux) == 2: #Run this loop for 2D
for i in range(nx): #This loop does the running of the median down the spectrum each pixel
x_left, x_right = i + half_sizes
if x_left < 0:
x_left = 0
elif x_right > nx:
x_right = nx
median_result[i] = nanmedian(flux[:,x_left:x_right]) #Calculate median between x_left and x_right for a given pixel
else: #Run this loop for 1D
for i in range(nx): #This loop does the running of the median down the spectrum each pixel
x_left, x_right = i + half_sizes
if x_left < 0:
x_left = 0
elif x_right > nx:
x_right = nx
median_result[i] = nanmedian(flux[x_left:x_right]) #Calculate median between x_left and x_right for a given pixel
return median_result
#~~~~~~~~~~~~~Mask out lines based on some velocity range, used for not including wide lines in continuum subtraction~~~~~~~~~~~~~~~~~~~~~~~~
def mask_lines(spec, linelist, vrange =[-10.0,10.0], ndim=1):
sub_linelist = linelist.parse(flat_nanmin(spec.wave), flat_nanmax(spec.wave)) #Pick lines only in wavelength range
if len(sub_linelist.wave) > 0: #Only do this if there are lines to subtract, if not just pass through the flux array
for line_wave in sub_linelist.wave: #loop through each line
velocity = c * ( (spec.wave - line_wave) / line_wave )
mask = (velocity >= vrange[0]) & (velocity <= vrange[1])
#mask = abs(spec.wave - line_wave) < mask_size #Set up mask around an emission line
if ndim == 1: #If the number of dimensions is 1
spec.flux[mask] = nan #Mask emission line in 1D
else: #else if the number of dimensions is 2
spec.flux[:,mask] = nan
return spec #Return flux with lines masked
#~~~~~~~~~~~~~~~~~~~~~~~~~~~ Various commands ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#Pauses execution of code to wait for user to hit a key on the command line
def wait():
input('Press Enter to continue.')
#Simple linear interpolation over nan values
def fill_nans(x, y):
filled_y = copy.deepcopy(y) #Make copy of y array
goodpix = isfinite(y) #Find values of y array not filled with nan
badpix = ~goodpix #Find nans
interp_y = interp1d(x[goodpix], y[goodpix], bounds_error=False) #Make interpolation object using only finite y values
filled_y[badpix] = interp_y(x[badpix]) #Replace nan values with interpolated values
return filled_y #And send it back to where it game from
#~~~~~~~~~~~~~~~~~~~~~~~~~~~Currently unused commands ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
##Apply simple telluric correction by dividing the science spectrum by the flattened standard star spectrum
##This function corrects the orders only and than restitches the orders together into a new combospec
#def simple_telluric_correction(sci, std, quality_cut = True):
#num_dimensions = ndim(sci.orders[0].wave) #Store number of dimensions
#if num_dimensions == 2:
#slit_pixel_length = len(sci.orders[0].flux[:,0]) #Height of slit in pixels for this target and band
#for i in range(sci.n_orders): #Loop through each order
#if quality_cut: #Generally we throw out bad pixels, but the user can turn this feature off by setting quality_cut = False
###goodpix = logical_and(sci.orders[i].flux > -100.0, std.orders[i].flux > .1) #apply the mask
#goodpix = std.orders[i].flux > .1
#badpix = ~goodpix
#std.orders[i].flux[badpix] = nan
#if num_dimensions == 2: #For 2D spectra, expand standard star spectrum from 1D to 2D
#std.orders[i].flux = tile(std.orders[i].flux, [slit_pixel_length,1]) #Expand standard star spectrum into two dimensions
#sci.orders[i].flux = sci.orders[i].flux / std.orders[i].flux #Divide science spectrum by standard spectrum
##sci.orders = combine_orders(sci.orders) #Combine the newly corrected orders into one long spectrum
#return(sci) #Return the new telluric corrected science spectrum
##Test expanding and than contracting 2D spectrum for continuum subtraction
#def test_expand_contract(flux):
#print('TESTING EXPANSION AND CONTRACTION')
##stop()
#flux[~isfinite(flux)] = 0.0 #Make the last few nans =0 so we can actually zoom, (most nans have already been filled)
#expand = zoom(flux, 4)
#plot_2d(expand, open = True, new = False, close = True)
#contract = zoom(expand, 0.25)
#plot_2d(flux, open = True, new = False, close = False)
#plot_2d(contract, open = False, new = True, close = True)
#Definition that is a wrapper for displaying a specturm or image in ds9
#Pauses execution of code, continue to close
def plot_2d(image, open = True, new = False, close = True):
if open: #Open DS9 typically
ds9.open() #Open DS9
show_file = fits.PrimaryHDU(image) #Set up fits file object
show_file.writeto(scratch_path + 'plot_2d.fits', overwrite = True) #Save fits file
ds9.show(scratch_path + 'plot_2d.fits', new = new) #Show image
ds9.set('zoom to fit')
ds9.set('scale log') #Set view to log scale
ds9.set('scale ZScale') #Set scale limits to ZScale, looks okay
if new:
ds9.set('lock scale')
ds9.set('lock colorbar')
ds9.set('frame lock image')
if close:
wait()
ds9.close()
#Find lines across all orders and saves it as a line list object
class find_lines:
def __init__(self, sci, delta_v=0.0, v_range=[-20.0, 20.0], s2n_cut = 100):
line_waves = array([])
clf()
interp_velocity_grid = arange(v_range[0], v_range[1], 0.01) #Velocity grid to interpolate line profiles onto
master_profile_stack = zeros(size(interp_velocity_grid))
#for i in range(sci.n_orders): #Loop through each order
for order in sci.orders:
wave = order.wave
flux = order.flux
sig = order.noise
#flux_filled_nans = fill_nans(wave, flux)
line_waves_found_for_order = self.search_order(wave, flux)
line_waves = concatenate([line_waves, line_waves_found_for_order])
#interp_flux = interp1d(wave, flux_filled_nans, kind='cubic')
for line_wave in line_waves_found_for_order:
velocity = c * ( (wave - line_wave) / line_wave ) #Get velocity of each pixel
in_range = (velocity >= 1.1*v_range[0]) & (velocity <= 1.1*v_range[1]) & isfinite(flux) #Isolate pixels in velocity space, near the velocity range desired
summed_flux = nansum(flux[in_range]) #Sum flux to check for errors
if summed_flux > 0.: #Fix some errors
centroid_estimate = abs(nansum(flux[in_range]*velocity[in_range]) /summed_flux) #Find precise centroid of line
#velocity = c * ( (wave - line_wave) / line_wave ) - centroid_estimate #Apply a correction for the line centroid
#in_range = (velocity >= 1.1*v_range[0]) & (velocity <= 1.1*v_range[1]) & isfinite(flux) #Isolate pixels in velocity space, near the velocity range desired
s2n = nansum(flux[in_range]) / nansum(sig[in_range]**2)**0.5
if s2n > s2n_cut and centroid_estimate < 0.75:
interp_flux = interp1d(velocity[in_range], flux[in_range], kind='cubic', bounds_error=False) #Cubic interpolate over line profile
profile = interp_flux(interp_velocity_grid) #Get profile over desired velocity range
profile = profile / flat_nanmax(profile) #Normalize profile
plot(interp_velocity_grid, profile)
master_profile_stack = dstack([master_profile_stack, profile])
#stop()
#flux_continuum_subtracted = self.line_continuum_subtract(sci.orders[i].wave, sci.orders[i].flux, line_waves)
#line_waves = self.search_order(sci.orders[i].wave, flux_continuum_subtracted)
#stop()
median_profile = nanmedian(master_profile_stack, 2)[0] #Take median
wave = line_waves #Skim over error for now, for some reason waves was = 0.
self.label = line_waves.astype('|S8') #Automatically make simple wavelength labels for the found lines
self.wave = wave #Stores (possibly dopper shifted) waves
self.lab_wave = line_waves #Save unshifted waves
self.profile = median_profile
#wave = line_waves*(delta_v/c) #Shift a line list by some delta_v given by the user
self.velocity = interp_velocity_grid
with PdfPages(save.path + 'save_median_line_profile.pdf') as pdf:
#clf()
title('N lines used = ' + str(len(master_profile_stack[0,0,:])))
ylim([-0.2,1.2])
plot(interp_velocity_grid, median_profile, '--', color='Black', linewidth=3)
self.gauss = self.median_fit_gauss() #Fit gaussian, report fwhm
plot(interp_velocity_grid, self.gauss, ':', color='Red', linewidth=3)
pdf.savefig()
#Function finds lines using the 2nd derivitive test and saves them as a line list
def search_order(self, wave, flux, per_order=30):
#plot(wave, flux)
finite = isfinite(flux) #Use only finitie pixels (ignore nans)
fit = UnivariateSpline(wave[finite], flux[finite], s=50.0, k=4) #Fit an interpolated spline
#for i in range(5):
#neo_flux = fit(wave)
#fit = UnivariateSpline(wave, neo_flux, s=50.0, k=4) #Fit an interpolated spline
extrema = fit.derivative().roots() #Grabe the roots (where the first derivitive = 0) of the fit, these are the extrema (maxes and mins)
second_deriv = fit.derivative(n=2) #Take second derivitive of fit where the extrema are for 2nd derivitive test
extrema_sec_deriv = second_deriv(extrema) #store 2nd derivitives
i_maxima = extrema_sec_deriv < 0. #Apply the concavity theorm to find maxima
#i_minima = extrema_sec_deriv > 0. #Ditto for minima
wave_maxima = extrema[i_maxima]
flux_maxima = fit(wave_maxima) #Grab flux of the maxima
#wave_minima = extrema[i_minima]
#flux_minima = fit(wave_minima)
flux_smoothed = fit(wave) #Read in spline smoothed fit for plotting
#plot(wave, flux_smoothed) #Plot fit
#plot(wave_maxima, flux_maxima, 'o', color='red') #Plot maxima found that pass the cut
#plot(wave, spline_obj.derivitive(
#print('TEST SMOOTHING CONTINUUM')
#for i in range(len(extrema)): #Print results
#print(extrema[i], extrema_sec_deriv[i])
#Now cut out lines that are below a standard deviation cut
#####stddev_flux = std(flux) #Stddeviation of the pixel fluxes
#####maxima_stddev = flux_maxima / stddev_flux
#####good_lines = maxima_stddev > threshold
n_maxima = len(wave_maxima)
distance_to_nearest_minima = zeros(n_maxima)
elevation_check = zeros(n_maxima)
dist_for_elevation_check = 0.00007 #um
height_fraction = 0.1 #fraction of height
for i in range(n_maxima):
#distance_to_nearest_minima[i] = min(abs(wave_maxima[i] - wave_minima))
peak_height = flux_maxima[i]
left_height = fit(wave_maxima[i] - dist_for_elevation_check)
right_height = fit(wave_maxima[i] + dist_for_elevation_check)
#elevation_check[i] = (peak_height > left_height + height_for_elevation_check) and (peak_height > right_height + height_for_elevation_check)
elevation_check[i] = (peak_height > left_height / height_fraction) and (peak_height > right_height / height_fraction)
#good_lines = (distance_to_nearest_minima > 0.00001) & (elevation_check == True)
good_lines = elevation_check == True
wave_maxima = wave_maxima[good_lines]
flux_maxima = flux_maxima[good_lines]
s = argsort(flux_maxima)[::-1][0:per_order] #Find brightest N per_order (default 15) lines per order
#plot(wave_maxima, flux_maxima, 'o', color='blue') #Plot maxima found that pass the cut
#line_object = found_lines(wave_maxima, flux_maxima)
#return [wave_maxima, flux_maxima]
#stop()
return wave_maxima[s]
#Function masks out existing lines, then tries to find lines again
def line_continuum_subtract(self, wave, flux, line_waves, line_cut=0.0005):
for line_wave in line_waves: #Mask out each emission line found
line_mask = abs(wave - line_wave) < line_cut #Set up mask around an emission line
flux[line_mask] = nan #Cut emission line out
fit = UnivariateSpline(wave, flux, s=1e4, k=4) #Smooth remaining continuum
continuum = fit(wave) #Grab smoothed continuum
#stop()
return flux - continuum #Return flux with continuum subtracted
def parse(self, min_wave, max_wave): #Simple function for grabbing only lines with a certain wavelength range
subset = copy.deepcopy(self) #Make copy of this object to parse
found_lines = (subset.wave > min_wave) & (subset.wave < max_wave) & (abs(subset.wave - 1.87) > 0.062) #Grab location of lines only in the wavelength range, while avoiding region between H & K bands
subset.lab_wave = subset.lab_wave[found_lines] #Filter out lines outside the wavelength range
subset.wave = subset.wave[found_lines]
subset.label = subset.label[found_lines]
return subset #Returns copy of object but with only found lines
def median_fit_gauss(self): #Fit gaussian to median line profile and print results
fit_g = fitting.LevMarLSQFitter() #Initialize minimization algorithim for fitting gaussian
g_init = models.Gaussian1D(amplitude=max(self.profile), mean=0.0, stddev=8.0) #Initialize gaussian model for this specific line, centered at 0 km/s with a first guess at the dispersion to be the spectral resolution
g = fit_g(g_init, self.velocity, self.profile) #Fit gaussian to line
g_mean = g.mean.value #Grab mean of gaussian fit
g_stddev = g.stddev.value
g_fwhm = g_stddev * 2.355
g_flux = g(self.velocity)
g_residuals = self.profile - g_flux
print('Median line profile FWHM: ', g_fwhm)
return(g(self.velocity)) #Return gaussian fit
#Generate a synthetic stellar spectrum for standard stars using Phoenix stellar atmosphere models, gollum, and muler
#Start with the parameters from Anders et al. (2022) https://ui.adsabs.harvard.edu/abs/2022yCat.1354....0A/abstract
#and adjust parameters to match V, J, H, K magnitudes and H I spectral lines
def process_standard_star_with_phoenix_model(sci, std, std_flattened, B, V, std_star_name, rv_shift, savechecks=True,
twomass_trans_curve_dir='../../../'):
#STUFF
min_wavelength = 1000 #Get min wavelength in spectrum
max_wavelength = 30000 #Get max wavelength in spectrum
resolving_power = 45000.0 #IGRINS resolution
extinction_model = GCC09_MWAvg() #Dust extinction model: https://dust-extinction.readthedocs.io/en/latest/api/dust_extinction.averages.G21_MWAvg.html#dust_extinction.averages.G21_MWAvg
if std_star_name == '18Lep':
#From RV template in Gaia DR3: Teff = 10500, logg=4.5, fe/h=0.25
fraction_a = 0.5
native_resolution_template_a = PHOENIXSpectrum(teff=10400, logg=4.5, Z=0.0, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
native_resolution_template_a = native_resolution_template_a.rv_shift(rv_shift)
native_resolution_template_a = native_resolution_template_a.rotationally_broaden(125.0) * extinction_model.extinguish(native_resolution_template_a.spectral_axis, Ebv=B-V)
native_resolution_template_b = PHOENIXSpectrum(teff=10600, logg=4.5, Z=0.5, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
native_resolution_template_b = native_resolution_template_b.rv_shift(rv_shift)
native_resolution_template_b = native_resolution_template_b.rotationally_broaden(125.0) * extinction_model.extinguish(native_resolution_template_b.spectral_axis, Ebv=B-V)
#From best fit photometry in Cardiel et al. (2021), Teff=9056, logg=3.867, z=-0.5
# fraction_a = 0.75
# native_resolution_template_a = PHOENIXSpectrum(teff=9000, logg=4.0, Z=-0.5, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
# native_resolution_template_a = native_resolution_template_a.rv_shift(rv_shift)
# native_resolution_template_a = native_resolution_template_a.rotationally_broaden(125.0)
# native_resolution_template_b = PHOENIXSpectrum(teff=9200, logg=3.5, Z=-0.5, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
# native_resolution_template_b = native_resolution_template_b.rv_shift(rv_shift)
# native_resolution_template_b = native_resolution_template_b.rotationally_broaden(125.0)
elif std_star_name == 'HD34317':
#From Teff and metallicities for Tycho-2 stars (Ammons+, 2006): Teff = 9296 K
#From RV template in Gaia DR3: Teff = 10500, logg=4.5, fe/h=0.25
#Freom Anders et al (2022): Teff = 9196, logg = 3.78, fe/h=0.208, Av=0.07 which if E(B-V) = Av/R where R=3.1 translates into a E(B-V) = 0.0226
fraction_a = 0.5
native_resolution_template_a = PHOENIXSpectrum(teff=9400, logg=4.0, Z=0.0, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
native_resolution_template_a = native_resolution_template_a.rv_shift(rv_shift)
native_resolution_template_a = native_resolution_template_a.rotationally_broaden(40.0) * extinction_model.extinguish(native_resolution_template_a.spectral_axis, Ebv=0.0226)
native_resolution_template_b = PHOENIXSpectrum(teff=9200, logg=3.5, Z=0.5, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
native_resolution_template_b = native_resolution_template_b.rv_shift(rv_shift)
native_resolution_template_b = native_resolution_template_b.rotationally_broaden(40.0) * extinction_model.extinguish(native_resolution_template_b.spectral_axis, Ebv=0.0226)
# fraction_a = 0.5
# native_resolution_template_a = PHOENIXSpectrum(teff=9200, logg=4.5, Z=0.0, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
# native_resolution_template_a = native_resolution_template_a.rv_shift(rv_shift)
# native_resolution_template_a = native_resolution_template_a.rotationally_broaden(40.0) #* extinction_model.extinguish(native_resolution_template_a.spectral_axis, Ebv=B-V)
# native_resolution_template_b = PHOENIXSpectrum(teff=9400, logg=4.5, Z=0.5, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
# native_resolution_template_b = native_resolution_template_b.rv_shift(rv_shift)
# native_resolution_template_b = native_resolution_template_b.rotationally_broaden(40.0)#* extinction_model.extinguish(native_resolution_template_b.spectral_axis, Ebv=B)
elif std_star_name == 'HR8422':
#From Gaia DR3 RV Template: Teff = 10000, logg=4.5, fe/h=0.25
#From Zorec et al. 2012: vsini=91 km/s, I found the rotational velocity to be lower when fitting the Br-gamma line.
#From Anders et al. (2022): Teff = 10217.59, logg = 3.798, fe/h = -0.395, Av= 0.0776 -> E(B-V)=0.025
fraction_a = 0.3
native_resolution_template_a = PHOENIXSpectrum(teff=10400, logg=3.5, Z=0.0, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
native_resolution_template_a = native_resolution_template_a.rv_shift(rv_shift)
native_resolution_template_a = native_resolution_template_a.rotationally_broaden(50.0) * extinction_model.extinguish(native_resolution_template_a.spectral_axis, Ebv=0.025)
native_resolution_template_b = PHOENIXSpectrum(teff=10400, logg=4.0, Z=-0.5, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
native_resolution_template_b = native_resolution_template_b.rv_shift(rv_shift)
native_resolution_template_b = native_resolution_template_b.rotationally_broaden(50.0) * extinction_model.extinguish(native_resolution_template_b.spectral_axis, Ebv=0.025)
# fraction_a = 0.5
# native_resolution_template_a = PHOENIXSpectrum(teff=10000, logg=4.5, Z=0.0, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
# native_resolution_template_a = native_resolution_template_a.rv_shift(rv_shift)
# native_resolution_template_a = native_resolution_template_a.rotationally_broaden(50.0) #* extinction_model.extinguish(native_resolution_template_a.spectral_axis, Ebv=B-V)
# native_resolution_template_b = PHOENIXSpectrum(teff=10000, logg=4.5, Z=0.5, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
# native_resolution_template_b = native_resolution_template_b.rv_shift(rv_shift)
# native_resolution_template_b = native_resolution_template_b.rotationally_broaden(50.0)#* extinction_model.extinguish(native_resolution_template_b.spectral_axis, Ebv=B)
elif std_star_name == 'HR598':
#From Gaia DR3 gsphot: Teff = 10473.918, logg=4.2997, fe/h=-0.1595
#From Gaia DR3 RV Template: Teff = 9000, logg=4.5, fe/h=0.25
#From Anders et al. (2022): Teff = 9961, logg = 4.28, fe/h = -0.088, Av= 0.066949 -> E(B-V)=0.0216
fraction_a = 0.2
native_resolution_template_a = PHOENIXSpectrum(teff=10400, logg=4.0, Z=-0.5, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
native_resolution_template_a = native_resolution_template_a.rv_shift(rv_shift)
native_resolution_template_a = native_resolution_template_a.rotationally_broaden(80.0) * extinction_model.extinguish(native_resolution_template_a.spectral_axis, Ebv=0.0216)
native_resolution_template_b = PHOENIXSpectrum(teff=10200, logg=4.5, Z=0.0, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
native_resolution_template_b = native_resolution_template_b.rv_shift(rv_shift)
native_resolution_template_b = native_resolution_template_b.rotationally_broaden(80.0) * extinction_model.extinguish(native_resolution_template_b.spectral_axis, Ebv=0.0216)
elif std_star_name == 'HD205314':
## FIT BASED ON GAIA DR3 RV fit
# Teff = 10000
# logg = 4.5
# [Fe/H] = 0.25
# fraction_a = 0.5
# native_resolution_template_a = PHOENIXSpectrum(teff=10000, logg=4.5, Z=0.0, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
# native_resolution_template_a = native_resolution_template_a.rv_shift(rv_shift)
# native_resolution_template_a = native_resolution_template_a.rotationally_broaden(100.0) #* extinction_model.extinguish(native_resolution_template_a.spectral_axis, Ebv=B-V)
# native_resolution_template_b = PHOENIXSpectrum(teff=10000, logg=4.5, Z=0.5, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
# native_resolution_template_b = native_resolution_template_b.rv_shift(rv_shift)
# native_resolution_template_b = native_resolution_template_b.rotationally_broaden(100.0) #* extinction_model.extinguish(native_resolution_template_b.spectral_axis, Ebv=B-V)
## Fit based on Gaia DR3 photometry fit
# Teff = 10470
# logg = 3.7715
# [Fe/H] = -0.7811
# fraction_a = 0.5
# native_resolution_template_a = PHOENIXSpectrum(teff=10400, logg=3.5, Z=-1.0, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
# native_resolution_template_a = native_resolution_template_a.rv_shift(rv_shift)
# native_resolution_template_a = native_resolution_template_a.rotationally_broaden(100.0) #* extinction_model.extinguish(native_resolution_template_a.spectral_axis, Ebv=B-V)
# native_resolution_template_b = PHOENIXSpectrum(teff=10600, logg=4.0, Z=-0.5, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
# native_resolution_template_b = native_resolution_template_b.rv_shift(rv_shift)
# native_resolution_template_b = native_resolution_template_b.rotationally_broaden(100.0) #* extinction_model.extinguish(native_resolution_template_b.spectral_axis, Ebv=B-V)
## BEST BY EYE FIT
fraction_a = 0.5
native_resolution_template_a = PHOENIXSpectrum(teff=9800, logg=4.5, Z=0.0, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
native_resolution_template_a = native_resolution_template_a.rv_shift(rv_shift)
native_resolution_template_a = native_resolution_template_a.rotationally_broaden(100.0) #* extinction_model.extinguish(native_resolution_template_a.spectral_axis, Ebv=B-V)
native_resolution_template_b = PHOENIXSpectrum(teff=10000, logg=4.5, Z=0.0, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
native_resolution_template_b = native_resolution_template_b.rv_shift(rv_shift)
native_resolution_template_b = native_resolution_template_b.rotationally_broaden(100.0) #* extinction_model.extinguish(native_resolution_template_b.spectral_axis, Ebv=B-V)
elif std_star_name == 'HR7098':
#Fit based on Monier et al (2019)
fraction_a = 0.5
native_resolution_template_a = PHOENIXSpectrum(teff=10200, logg=3.5, Z=0.0, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
native_resolution_template_a = native_resolution_template_a.rv_shift(rv_shift)
#native_resolution_template_a = native_resolution_template_a.rotationally_broaden(0.0) * extinction_model.extinguish(native_resolution_template_a.spectral_axis, Ebv=B-V)
native_resolution_template_b = PHOENIXSpectrum(teff=10200, logg=3.5, Z=0.0, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
native_resolution_template_b = native_resolution_template_b.rv_shift(rv_shift)
#native_resolution_template_b = native_resolution_template_b.rotationally_broaden(0.0) * extinction_model.extinguish(native_resolution_template_b.spectral_axis, Ebv=B-V)
elif std_star_name == 'HR6744':
#Values from Gaia DR3 RV fit
fraction_a = 0.5
native_resolution_template_a = PHOENIXSpectrum(teff=10800, logg=4.5, Z=0.0, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
native_resolution_template_a = native_resolution_template_a.rv_shift(rv_shift)
native_resolution_template_a = native_resolution_template_a.rotationally_broaden(150.0) * extinction_model.extinguish(native_resolution_template_a.spectral_axis, Ebv=B-V)
native_resolution_template_b = PHOENIXSpectrum(teff=10800, logg=4.5, Z=0.5, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
native_resolution_template_b = native_resolution_template_b.rv_shift(rv_shift)
native_resolution_template_b = native_resolution_template_b.rotationally_broaden(150.0) * extinction_model.extinguish(native_resolution_template_b.spectral_axis, Ebv=B-V)
elif std_star_name == 'HR7734':
#From Zorec 2012 https://vizier.cds.unistra.fr/viz-bin/VizieR-5?-ref=VIZ63892ff83017ac&-out.add=.&-source=J/A%2bA/537/A120/table1&recno=1729
# Teff = 9660 K
# vsini = 238 km/s
# ****These values from the literature don't fit the spectrum or magnitudes.....
# THIS STAR IS VERY STRANGE! Core of Br-gamma line seems stronger than any of the models can fit, could it be chemically pecululiar?
# Absoltue Vmag = -0.676 (based in V mag from simbad and Gaia DR3 parallax distance) implying this star is actually spectral type A0III, a giant
# Lower surface gravity does indeed seem to provide a better fit, core of Br-gamma still not perfectly fit but will probably have to live with it.
# Values used are best fit "chi by eye"
fraction_a = 0.5
native_resolution_template_a = PHOENIXSpectrum(teff=8800, logg=3.5, Z=0.0, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
native_resolution_template_a = native_resolution_template_a.rv_shift(rv_shift-0.0)
native_resolution_template_a = native_resolution_template_a.rotationally_broaden(35.0) #* extinction_model.extinguish(native_resolution_template_a.spectral_axis, Ebv=B-V)
native_resolution_template_b = PHOENIXSpectrum(teff=9000, logg=4.0, Z=0.0, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
native_resolution_template_b = native_resolution_template_b.rv_shift(rv_shift+0.0)
native_resolution_template_b = native_resolution_template_b.rotationally_broaden(35.0) #* extinction_model.extinguish(native_resolution_template_b.spectral_axis, Ebv=B-V)
elif std_star_name == 'HD184787':
#From Anders et al. (2022) Starhorse2: Teff = 9260, logg = 4.042, fe/h = -0.10, Av= 0.016538 -> E(B-V)=0.0053
fraction_a = 0.25
native_resolution_template_a = PHOENIXSpectrum(teff=9600, logg=4.0, Z=0.0, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
native_resolution_template_a = native_resolution_template_a.rv_shift(rv_shift-0.0)
native_resolution_template_a = native_resolution_template_a.rotationally_broaden(175.0) * extinction_model.extinguish(native_resolution_template_a.spectral_axis, Ebv=0.0053)
native_resolution_template_b = PHOENIXSpectrum(teff=9600, logg=4.0, Z=0.0, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
native_resolution_template_b = native_resolution_template_b.rv_shift(rv_shift+0.0)
native_resolution_template_b = native_resolution_template_b.rotationally_broaden(175.0) * extinction_model.extinguish(native_resolution_template_b.spectral_axis, Ebv=0.0053)
elif std_star_name == 'HIP80019':
fraction_a = 0.5
#From Iglesias et la. (2003) Table 3 (https://vizier.cds.unistra.fr/viz-bin/VizieR-5?-ref=VIZ647104af2c735&-out.add=.&-source=J/MNRAS/519/3958/table3&recno=137)
# Teff = 10500, logg=4.4, vsini=160, Av=1.08
native_resolution_template_a = PHOENIXSpectrum(teff=10800, logg=4.5, Z=0.0, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
native_resolution_template_a = native_resolution_template_a.rv_shift(rv_shift-0.0)
native_resolution_template_a = native_resolution_template_a.rotationally_broaden(80.0) * extinction_model.extinguish(native_resolution_template_a.spectral_axis, Ebv=1.57*(B-V))
native_resolution_template_b = PHOENIXSpectrum(teff=10600, logg=4.5, Z=0.0, path=path_to_pheonix_models, wl_lo=min_wavelength, wl_hi=max_wavelength)
native_resolution_template_b = native_resolution_template_b.rv_shift(rv_shift+0.0)
native_resolution_template_b = native_resolution_template_b.rotationally_broaden(80.0) * extinction_model.extinguish(native_resolution_template_b.spectral_axis, Ebv=1.57*(B-V))
else: # the underscore character is used as a catch-all.
raise Exception('The standard star name '+std_star_name+' does not match known standard stars.')
native_resolution_template_a = native_resolution_template_a.instrumental_broaden(resolving_power=resolving_power) #Degrade synthetic spec resolution to instrumental resolution
native_resolution_template_b = native_resolution_template_b.instrumental_broaden(resolving_power=resolving_power) #Degrade synthetic spec resolution to instrumental resolution
std_model_synthetic_spectrum = LinInterpResampler(native_resolution_template_a, native_resolution_template_a.spectral_axis)*(fraction_a)*1e-8 + LinInterpResampler(native_resolution_template_b, native_resolution_template_a.spectral_axis)*(1.0- fraction_a)*1e-8 #1e-8 scales flux from cm^-1 to angstrom^-1
#std_model_synthetic_spectrum = LinInterpResampler(std_model_synthetic_spectrum, input_spectrum.spectral_axis)
std_model_synthetic_spectrum.__class__ = EchelleSpectrum
interp_std_flux = interp1d(std_model_synthetic_spectrum.spectral_axis.micron, std_model_synthetic_spectrum.flux)
# #normalize synthetic spectrum to its magnitude in the V band
# tcurve_wave, tcurve_trans = loadtxt(path_to_pheonix_models + '/2MASS_transmission_curves/'+bands[i]+'.dat', unpack=True) #Read in 2MASS band filter transmission curve
# #tcurve_trans[tcurve_trans < 0] = 0.0 #Zero out negative values
# tcurve_interp = interp1d(tcurve_wave, tcurve_trans, kind='cubic', fill_value=0.0, bounds_error=False) #Create interp obj for the transmission curve
# tcurve_resampled = tcurve_interp(x)
# f_lambda = nansum(resampled_synthetic_spectrum * tcurve_resampled * x * delta_lambda) / nansum(tcurve_resampled * x * delta_lambda)
#magnitude_scale = 10**(0.4*(0.03 - V)) #Scale flux by difference in V magnitude between standard star and Vega (V for vega = 0.03 in Simbad)
magnitude_scale = 10**(0.4*(-V))
f = FilterGenerator()
#Test printing B,V,R mangitudes for the star
f0_lambda = 363.1e-11 * 1e4 #Source: Table A2 from Bessel (1998), with units converted from erg cm^-2 s^-1 ang^-1 to erg cm^-2 s^-1 um^-1 by multiplying by 1e-4
filt = f.reconstruct('Generic/Johnson.V')
tcurve_interp = interp1d(filt.wavelength.to('um'), filt.transmittance, kind='cubic', fill_value=0.0, bounds_error=False) #Create interp obj for the transmission curve
x = arange(0.0, 3.0, 1e-7)
delta_lambda = abs(x[1]-x[0])
tcurve_resampled = tcurve_interp(x)
resampled_synthetic_spectrum = LinInterpResampler(std_model_synthetic_spectrum , x*u.um).flux.value
f_lambda = nansum(resampled_synthetic_spectrum * tcurve_resampled * x * delta_lambda) / nansum(tcurve_resampled * x * delta_lambda)
#magnitude = -2.5 * log10(f_lambda / f0_lambda)# - (0.03 - V)
#scale_std_flux = vega_V_flambdla_zero_point / interp_std_flux(V_band_effective_lambda)
scale_std_flux = vega_V_flambdla_zero_point / f_lambda
# print('vega_V_flambdla_zero_point = ', vega_V_flambdla_zero_point)
# print('interp_std_flux(V_band_effective_lambda) = ', interp_std_flux(V_band_effective_lambda))
# print('scale_std_flux = ', scale_std_flux)
# print('magnitude_scale = ', magnitude_scale)
# print('scale_std_flux/magnitude_scale = [should be about 1]', scale_std_flux/magnitude_scale)
for i in range(std.n_orders):
synthetic_std_spec_for_order = LinInterpResampler(std_model_synthetic_spectrum, std.orders[i].wave * u.micron) * 1e4 #Convert angstrom^-1 -> um^-1
relative_flux_calibration = std.orders[i].flux / (synthetic_std_spec_for_order.flux.value * scale_std_flux * magnitude_scale * (1/(4*pi)) ) # divide by 4 pi sterradians so the final flux units are erg s^-1 cm^-2 um^-1 sr^-1
s2n = ((1.0/sci.orders[i].s2n()**2) + (1.0/std.orders[i].s2n()**2))**-0.5 #Error propogation after telluric correction, see https://wikis.utexas.edu/display/IGRINS/FAQ or http://chemwiki.ucdavis.edu/Analytical_Chemistry/Quantifying_Nature/Significant_Digits/Propagation_of_Error#Arithmetic_Error_Propagation
sci.orders[i].flux /= relative_flux_calibration #Apply telluric correction and flux calibration
sci.orders[i].noise = sci.orders[i].flux / s2n #It's easiest to just work back the noise from S/N after calculating S/N, plus it is now properly scaled to match the (relative) flux calibrati
# plot(std.orders[i].wave, std.orders[i].flux, color='black')
# normalized_synthetic_std_spec_for_order_flux = synthetic_std_spec_for_order.flux.value / nanmedian(synthetic_std_spec_for_order.flux.value)
# plot(std.orders[i].wave, std.orders[i].flux / normalized_synthetic_std_spec_for_order_flux, color='red')
# plot(std.orders[i].wave, std.orders[i].flux / std_flattened.orders[i].flux / normalized_synthetic_std_spec_for_order_flux, color='blue')
# show()
#Print estimated J,H,K magnitudes as a sanity check to compare to 2MASS
bands = ['J', 'H', 'Ks']
f0_lambda = array([3.129e-13, 1.133e-13, 4.283e-14]) * 1e7 #Convert units to from W cm^-2 um^-1 to erg s^-1 cm^-2 um^-1
x = arange(0.0, 3.0, 1e-6)
delta_lambda = abs(x[1]-x[0])
resampled_synthetic_spectrum = LinInterpResampler(std_model_synthetic_spectrum , x*u.um).flux.value * magnitude_scale * scale_std_flux #* 1e-4 * magnitude_scale * vega_R_over_D_squared
for i in range(len(bands)):
tcurve_wave, tcurve_trans = loadtxt(path_to_pheonix_models + '/2MASS_transmission_curves/'+bands[i]+'.dat', unpack=True) #Read in 2MASS band filter transmission curve
#tcurve_trans[tcurve_trans < 0] = 0.0 #Zero out negative values
tcurve_interp = interp1d(tcurve_wave, tcurve_trans, kind='cubic', fill_value=0.0, bounds_error=False) #Create interp obj for the transmission curve
tcurve_resampled = tcurve_interp(x)
f_lambda = nansum(resampled_synthetic_spectrum * tcurve_resampled * x * delta_lambda) / nansum(tcurve_resampled * x * delta_lambda)
magnitude = -2.5 * log10(f_lambda / f0_lambda[i])# - (0.03 - V)
print('For band '+bands[i]+' the estimated magnitude for '+std_star_name+': '+str(magnitude))
#Test comparison to Tynt (https://tynt.readthedocs.io/en/latest/index.html)
print('TESTING TYNT')
for i in range(len(bands)):
filt = f.reconstruct('2MASS/2MASS.'+bands[i])
tcurve_interp = interp1d(filt.wavelength.to('um'), filt.transmittance, kind='cubic', fill_value=0.0, bounds_error=False) #Create interp obj for the transmission curve
tcurve_resampled = tcurve_interp(x)
f_lambda = nansum(resampled_synthetic_spectrum * tcurve_resampled * x * delta_lambda) / nansum(tcurve_resampled * x * delta_lambda)
magnitude = -2.5 * log10(f_lambda / f0_lambda[i])# - (0.03 - V)
print('For band '+bands[i]+' the estimated magnitude is '+str(magnitude))
#Test printing B,V,R mangitudes for the star
f0_lambda = array([417.5e-11, 632e-11, 363.1e-11]) * 1e4 #Source: Table A2 from Bessel (1998), with units converted from erg cm^-2 s^-1 ang^-1 to erg cm^-2 s^-1 um^-1 by multiplying by 1e-4
bands = ['U','B','V']
for i in range(len(bands)):
filt = f.reconstruct('Generic/Johnson.'+bands[i])
tcurve_interp = interp1d(filt.wavelength.to('um'), filt.transmittance, kind='cubic', fill_value=0.0, bounds_error=False) #Create interp obj for the transmission curve
tcurve_resampled = tcurve_interp(x)
f_lambda = nansum(resampled_synthetic_spectrum * tcurve_resampled * x * delta_lambda) / nansum(tcurve_resampled * x * delta_lambda)
magnitude = -2.5 * log10(f_lambda / f0_lambda[i])# - (0.03 - V)
print('For band '+bands[i]+' the estimated magnitude is '+str(magnitude))
if savechecks: #If user specifies saving pdf check files
with PdfPages(save.path + 'check_flux_calib.pdf') as pdf: #Load pdf backend for saving multipage pdfs
#Plot easy preview check of how well the H I lines are being corrected
clf() #Clear page first
expected_continuum = copy.deepcopy(std_flattened) #Create object to store the "expected continuum" which will end up being the average of each order's adjacent blaze functions from what the PLP thinks the blaze is for the standard star
g = Gaussian1DKernel(stddev=5.0) #Do a little bit of smoothing of the blaze functions
for i in range(2,std.n_orders-2): #Loop through each order
adjacent_orders = array([convolve(std.orders[i-1].flux/std_flattened.orders[i-1].flux, g), #Combine the order before and after the current order, while applying a small amount of smoothing
convolve(std.orders[i+1].flux/std_flattened.orders[i+1].flux, g),])
mean_order = nanmean(adjacent_orders, axis=0) #Smooth the before and after order blazes together to estimate what we think the continuum/blaze should be
expected_continuum.orders[i].flux = mean_order #Save the expected continuum
expected_continuum.combine_orders()#Combine all the orders in the expected continuum
HI_line_waves = [2.166120, 1.7366850, 1.6811111, 1.5884880] #Wavelengths of H I lines will be previewing
HI_line_labes = ['Br-gamma','Br-10','Br-11', 'Br-14'] #Names of H I lines we will be previewing
delta_wave = 0.012 # +/- wavelength range to plot on the xaxis of each line preview
n_HI_lines = len(HI_line_waves) #Count up how many H I lines we will be plotting
subplots(nrows=2, ncols=2) #Set up subplots
figtext(0.02,0.5,r"Flux", fontsize=20,rotation=90) #Set shared y-axis label
figtext(0.4,0.02,r"Wavelength [$\mu$m]", fontsize=20,rotation=0) #Set shared x-axis label
#figtext(0.05,0.95,r"Check AOV H I line fits (y-scale: "+str(y_scale)+", y-power: "+str(y_power)+", y_sharpen: "+str(y_sharpen)+" wave_smooth: "+str(wave_smooth)+", std_shift: "+str(std_shift)+")", fontsize=12,rotation=0) #Shared title
std.combine_orders()
waves = std.combospec.wave #Wavelength array to interpolate to
normalized_HI_lines = LinInterpResampler(std_model_synthetic_spectrum, std.combospec.wave*u.um)
normalized_HI_lines = normalized_HI_lines / nansum(normalized_HI_lines.flux.value)
#normalized_HI_lines = a0v_synth_cont(waves)/a0v_synth_spec(waves) #Get normalized lines to the wavelength array
for i in range(n_HI_lines): #Loop through each H I line we want to preview
j = (std.combospec.wave > HI_line_waves[i]-delta_wave) & (std.combospec.wave < HI_line_waves[i]+delta_wave) #Find only pixels in window of x-axis range for automatically determining y axis range
m = (normalized_HI_lines.flux.value[j][0] - normalized_HI_lines.flux.value[j][-1]) / ((normalized_HI_lines.wavelength[j][0] / u.um) - (normalized_HI_lines.wavelength[j][-1] / u.um))
b = normalized_HI_lines.flux.value[j][-1] - m*(normalized_HI_lines.wavelength[j][-1]/u.um)
normalized_HI_lines_corrected = normalized_HI_lines.flux.value / (m*(normalized_HI_lines.wavelength / u.um) + b)
subplot(2,2,i+1) #Set up current line's subplot
#tight_layout(pad=5) #Use tightlayout so things don't overlap
fig = gcf()#Adjust aspect ratio
fig.set_size_inches([15,10]) #Adjust aspect ratio
plot(std.combospec.wave, std.combospec.flux, label='H I Uncorrected', color='gray') #Plot raw A0V spectrum, no H I correction applied
#plot(std.combospec.wave, std.combospec.flux*normalized_HI_lines, label='H I Corrected',color='black') #Plot raw A0V spectrum with H I correction applied
plot(std.combospec.wave, std.combospec.flux / normalized_HI_lines_corrected, label='H I Corrected',color='black') #Plot raw A0V spectrum with H I correction applied
plot(expected_continuum.combospec.wave, expected_continuum.combospec.flux, label='Expected Continuum', color='blue') #Plot expected continuu, which the average of each order's adjacent A0V continnua
xlim(HI_line_waves[i]-delta_wave, HI_line_waves[i]+delta_wave) #Set x axis range
max_flux = nanmax(std.combospec.flux[j] / normalized_HI_lines_corrected[j]) #Min y axis range
min_flux = nanmin(std.combospec.flux[j] / normalized_HI_lines_corrected[j]) #Max y axis range
ylim([0.9*min_flux,1.02*max_flux]) #Set y axis range
title(HI_line_labes[i]) #Set title
if i==n_HI_lines-1: #If last line is being plotted
legend(loc='lower right') #plot the legend
tight_layout(pad=4)
pdf.savefig() #Save plots showing how well the H I correciton (scaling H I lines from Vega) fits
return(sci) #Return the spectrum object (1D or 2D) that is now flux calibrated and telluric corrected
| 227,893
| 69.446368
| 314
|
py
|
plotspec
|
plotspec-master/ds9.py
|
#Python library for accessing DS9 and XPA.
#Written by Kyle Kaplan March 2014.
import pyds9
#import subprocess
#from subprocess import call, PIPE #Allow python to access command line
#from subprocess import check_output #Allow python to access command line and return result to a variable
import time #To put in delays
global d
d = None #Set d to None initially
#Open DS9
def open():
#call('xpans &', shell=True) #Load XPA immediately, so XPA commands can be sent to ds9, commented out for now, apparently doesn't work
#call('ds9 &', shell=True) #Load DS9
#if check_output('xpaaccess ds9 &', shell=True) == 'no\n':
# call(['/bin/bash', '-i', '-c', 'ds9 &']) #Load DS9 with bash, change bash to whatever shell you use if you are having issues
#while check_output('xpaaccess ds9 &', shell=True) == 'no\n': #While loop that checks every second or so if DS9 is open before continuing
# time.sleep(1) #Wait one second than check if DS9 is open again
global d
if d == None: #Is DS9 not open yet...
d = pyds9.DS9() #Open a DS9 object with pyds9
else: #If DS9 is already open...
print('WARNING: DS9 is already open.')
#Quit DS9
def close():
#call('xpaset -p ds9 exit')
global d
if d != None: #If DS9 is open
d.set('exit') #Quit DS9
d = None #Blank out holder for DS9 object
else:
print('WARNING: DS9 is already closed.')
#Get xpaget statements from ds9
def get(command):
#result = check_output('xpaget ds9 ' + command, shell=True) #Get information from ds9 using XPA get
global d
if d != None: #If DS9 is open
result = d.get(command) #Get information from DS9 and store in result
#wait(1.0)
return(str(result).strip()) #return information grabbed from ds9
else: #If DS9 is not open
print('Warning: DS9 is not open.')
return(None)
#Send xpaset commands to ds9
def set(command):
#call('xpaset -p ds9 ' + command, shell=True)
#time.sleep(0.5) #Set delay after each command to give computer time to respond before the next command
global d
if d != None: #If DS9 is open
d.set(command) #Send command to DS9
#wait(1.0)
else: #If DS9 is not open
print('Warning: DS9 is not open.')
#Allow user to set delays in DS9 scripts
def wait(delay):
time.sleep(delay)
#Special command for drawing regions onto ds9
def draw(command):
#print 'echo "' + command + '" | xpaset ds9 regions'
#call('echo "' + command + '" | xpaset ds9 regions', shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
global d
if d != None: #If DS9 is open
d.set('regions', command)
#wait(1.0)
else: #If DS9 is not open
print('Warning: DS9 is not open.')
def rot(angle):
set('rotate '+str(angle))
def rotto(angle):
set('rotate to '+str(angle))
def north():
set('rotate to 0')
def show(fits_file, new = False):
if new: #Check if there are any frames, default no, but user can set new = True
set('frame new') #If not create a new frame
set('fits '+ fits_file) #open fits file
set('scale log') #Set view to log scale
set('scale Zscale') #Set scale limits to Zmax, looks okay
| 3,086
| 33.685393
| 139
|
py
|
plotspec
|
plotspec-master/make_datacube.py
|
#Library for making and processing the IGRINS datacube
from pylab import *
from scipy.ndimage import zoom, binary_closing #For resizing images
from astropy.io import fits, ascii #Use astropy for processing fits files, ASCII text files
from astropy.convolution import interpolate_replace_nans
#from astropy.convolution import convolve, Gaussian2DKernel
from plotspec import * #Import plotspec library
import h2 #Import H2 library
from numpy import round
import gc #Load in python's garbage collector
from bottleneck import *
#~~~~~~~~~~~~~~~MODIFY THESE PARAMETERS FOR IGRINS DATA~~~~~~~~~~~~~~~~
save.name('Datacube Name') #Name to save results as
#~~~~~~~~~~~~~~~~~~~~~READ INPUT FILE~~~~~~~~~~~~~~~~~~~~~~~~~~~
input_file = 'demo_datacube_input.dat'
input = ascii.read(input_file)
n_pointings = input['Date'].size
#~~~~~~~~~~~~~~~MODIFY THESE PARAMETERS FOR ASTROMETRY~~~~~~~~~~~~~~~~
velocity_range = 100.0 #Set range of velocity axis in datacube
velocity_res = 1.0 #Set size of pixel in velocity space
ra = 83.83205 #RA of center of reference slit pointing in decimal degrees
dec = -5.42417 #Dec of center of reference slit pointing in decimal degrees
reference_pointing_date = 20141125 #Night of pointing where the center of the slit is used to determine the RA and Dec. for the astrometry
reference_pointing_frameno = 120 #Frame number of pointing where center of the slit is used to determine the RA and Dec for the astrometry
flux_calibrate = True #Turn on or off relative flux calibration
subtract_continuum = False #Turn on or off continuum subtraction
use_blocks = False #Use blocks for flux calibration (set in input file)? If not using blocks, turn it off here to speed up datacube building
fill_nans = True #Turn on or off filling nans
save_checks = True #Save pdfs of the output of each pointing (used for checking flux calibration, ect.)
flux_calibration_line = '1-0 S(1)' #Name of emission line to use for flux calibration, should be something bright!
flux_calibration_velocity_range = 10.0 #Range of velocities to collapse for flux calibration in +/- km/s
flux_calibration_s2n_thresh = 3.0 #Threshold for pixel S/N to be used for flux calibration
length_of_slit_on_sky = 15.0 #Length of the slit on the sky, in arcseconds, depends on the telescope IGRINS was on, here it is set for McDonald
#~~~~~~~~~~~~~ONLY MODIFY THESE PARAMETERS IF YOU KNOW WHAT YOU ARE DOING~~~~~~~~~~~~~~~~
plate_scale = length_of_slit_on_sky / float(slit_length) #Calculate slit length by dividing how many arcseconds the slit is on the sky by the number of pixels the slit is in the data
x_expand = int((length_of_slit_on_sky/15.0)/plate_scale) #Number of times to expand along the x axis perpendicular to the slit, this make the x and y plate scale match
#Shape of final 4D array in the form of (# of spectral lines, pixels along slit,velocity, pixels prependicular to slit, ), x_expand expands along the x-axis (pointings) to make place scale same in x & y
#~~~~~~~~~~~~~~~~~~~~Main Library~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def save(line_cube, fname): #Save datacube for an extracted cube for a spectral line as a fits file, for 3D datacubes add header info about velocity information
n_dimensions = ndim(line_cube) #Calculate number of dimensions
cube_fits = fits.PrimaryHDU(line_cube) #Open astropy fits object
cube_fits.header['RADECSYS'] = 'FK5' #Define coord system
cube_fits.header['EQUINOX'] = 2000 #Define equinox, here set to J2000
cube_fits.header['CTYPE1'] = 'RA--TAN' #Set unit to declination
cube_fits.header['CTYPE2'] = 'DEC--TAN' #Set unit for slit length to something generic
if n_dimensions == 3: #If velocity information exists.... else fits file saved will be a 2D image
cube_fits.header['CTYPE3'] = 'km/s' #Set unit to "Optical velocity" (I know it's really NIR but whatever...)
cube_fits.header['CRVAL1'] = ra #RA of reference pixel
cube_fits.header['CRVAL2'] = dec #Dec of reference pixel
if n_dimensions == 3: #If velocity information exists.... else fits file saved will be a 2D image
cube_fits.header['CRVAL3'] = 0.0 #Velocity of reference pixel
global pa_reference_pixel, x_reference_pixel, y_reference_pixel #Since the reference pixes are set as global variables, set to access them globally here
cube_fits.header['CRPIX1'] = x_reference_pixel #Set reference pixel along the x-axis
cube_fits.header['CRPIX2'] = y_reference_pixel #Set reference pixel along the y-axis
if n_dimensions == 3: #If velocity information exists.... else fits file saved will be a 2D image
cube_fits.header['CRPIX3'] = (velocity_range / velocity_res) + 1 #Set zero point to where v=0 km/s (middle of stamp)
cube_fits.header['CDELT3'] = velocity_res #Set zero point to where v=0 km/s (middle of stamp)
fits_plate_scale = plate_scale*(1./3600.) #Set plate scale to 0.25"
angle = pa_reference_pixel * pi/180.0 #Set angle with the PA of the reference pixel
cube_fits.header['CD1_1'] = fits_plate_scale*cos(angle) #cd[0,0] #Save rotation and plate scale transformation matrix into fits file header
cube_fits.header['CD1_2'] = fits_plate_scale*sin(angle) #cd[1,0]
cube_fits.header['CD2_1'] = -fits_plate_scale*sin(angle) #cd[0,1]
cube_fits.header['CD2_2'] = fits_plate_scale*cos(angle) #cd[1,1]
cube_fits.writeto(fname, overwrite = True) #Save fits file
class cube_geometry(): #Store all geometry based on reading in all the rotations and shifts for each slit
def __init__(self, input_PAs, input_x_shifts, input_y_shifts):
global pa_reference_pixel, x_reference_pixel, y_reference_pixel #Since the reference pixes are set as global variables, set to access them globally here
n = len(input_PAs) #Number of slits to store
PA_difference = (input_PAs - pa_reference_pixel) % 360.0 #Store difference in all PAs from the first frame
x_start = zeros(n) #Store starting pixels coords in x for a given position
x_end = zeros(n) #Store ending pixels coords in x for a given position
y_start = zeros(n) #Store starting pixels coords in y for a given position
y_end = zeros(n) #Store ending pixels coords in y for a given position
if x_expand % 2 == 1: #If x_expand is odd
negative_width_shift = -(x_expand-1) / 2 #Calculate position of the ends of the slit width
positive_width_shift = ((x_expand-1)/2) +1
else: #If xexpand is even
negative_width_shift = -x_expand / 2 #Calculate position of the ends of the slit width
positive_width_shift = x_expand / 2
negative_length_shift = -slit_length / 2
if slit_length % 2 == 1: #If slit length is odd
positive_length_shift = (slit_length / 2) #Add one to end of slit to acommidate the odd value of pixels in the slit length direction
else: #If slit length is even
positive_length_shift = (slit_length / 2) #Just divide an even numbered slit length by two, simple eh?
for i in xrange(n): #Loop through each slit
if PA_difference[i] == 0.0 or PA_difference[i] == 180.0: #or PA_difference[i] == 180.0: #If PA of this pointing is same PA as the first pointing
x_start[i] = input_x_shifts[i] + negative_width_shift #Set up coordinates for this slit to be painted into the datacube
x_end[i] = input_x_shifts[i] + positive_width_shift
y_start[i] = input_y_shifts[i] + negative_length_shift
y_end[i] = input_y_shifts[i] + positive_length_shift
elif PA_difference[i] == 90.0 or PA_difference[i] == 270.0: #or PA_difference[i] == 270.0: #If PA rotated 90 degrees from first pointing
x_start[i] = input_x_shifts[i] + negative_length_shift #Set up coordinates for this slit to be painted into the datacube
x_end[i] = input_x_shifts[i] + positive_length_shift
y_start[i] = input_y_shifts[i] + negative_width_shift
y_end[i] = input_y_shifts[i] + positive_width_shift
# elif PA_difference[i] == 180.0: #If PA rotated 180 degrees from first pointing
# x_start[i] = input_x_shifts[i] + negative_width_shift #Set up coordinates for this slit to be painted into the datacube
# x_end[i] = input_x_shifts[i] + positive_width_shift
# y_start[i] = input_y_shifts[i] + negative_length_shift
# y_end[i] = input_y_shifts[i] + positive_length_shift
# elif PA_difference[i] == 270.0: #If PA rotated 270 degrees from first pointing
# x_start[i] = input_x_shifts[i] + negative_length_shift#Set up coordinates for this slit to be painted into the datacube
# x_end[i] = input_x_shifts[i] + positive_length_shift
# y_start[i] = input_y_shifts[i] + negative_width_shift
# y_end[i] = input_y_shifts[i] + positive_width_shift
else: #catch error of improper PA inputs
print "WARNING: PA must be in 90 degree incriments from first "
stop()
all_x_start_and_end_points = concatenate([x_start, x_end]) #Cram all x and y start and ending variables into single arrays
all_y_start_and_end_points = concatenate([y_start, y_end])
x_size = max(all_x_start_and_end_points) - min(all_x_start_and_end_points) #Calculate x,y dimensions of datacube
y_size = max(all_y_start_and_end_points) - min(all_y_start_and_end_points)
cube_x_start = min(x_start) #Find the starting corner of the cube
cube_y_start = min(y_start)
x_start, x_end = [x_start - cube_x_start, x_end - cube_x_start] #Shift pixels in x direction to make left-most pixel 0
y_start, y_end = [y_start - cube_y_start, y_end - cube_y_start] #Shift pixels in y direction to make left-most pixel 0
min_x_start = min(x_start) #Find the minimum x and y starting pixels to search for negative numbers
min_y_start = min(y_start)
min_x_end = min(x_end)
min_y_end = min(y_end)
if min_x_start < 0: #If any negative numbers are found, shift pixel starting positions to make all numbers positive
x_start = x_start - min_x_start
x_end = x_end - min_x_start
if min_y_start < 0:
y_start = y_start - min_y_start
y_end = y_end - min_y_start
if min_x_end < 0:
x_start = x_start - min_x_end
x_end = x_end - min_x_end
if min_y_end < 0:
y_start = y_start - min_y_end
y_end = y_end - min_y_end
self.x_start = x_start #Store pixels shifts
self.y_start = y_start
self.x_end = x_end
self.y_end = y_end
self.PA_difference = PA_difference #Store PA differences
self.x_size = x_size #Store cube sizes
self.y_size = y_size
#Class that stores orion bar cube bar data
class data():
def __init__(self): #Initialize class and create datacube in memory
global x_reference_pixel, y_reference_pixel, pa_reference_pixel #Store reference pixels as global variables so the "save" definition can easily access them when trying to build fits headers, everything else are static varibles set at the beginning of the code so this is the only place we need to use globals
ref_pixel_index = (input["Date"] == reference_pointing_date) & (input["FrameNo"] == reference_pointing_frameno) #Grab index of the reference pixel
pa_reference_pixel = float(input["PA"][ref_pixel_index]) #Grab PA of the refernce pixel
xshift_pix = -round(input["X_Shift"]/plate_scale).astype(int) #Find starting x position in pixels for each pionting
yshift_pix = round(input["Y_Shift"]/plate_scale).astype(int) #Find starting y position in pixels for each pionting
geo = cube_geometry(input['PA'], xshift_pix, yshift_pix) #create object storing PA and coordinates for all pointings, along with dynamically sizing the cube
for i in xrange(n_pointings): #Loop through each pointing to paint into datacube
spectral_lines = lines(input['Spec_Line_File'][i], delta_v =input['Delta_V'][i]) #Spectral lines to use
v_shift_file = input['V_Shift_File'][i] #File that defines any velocity shifts between lines, can be used to correct for species moving at different velocities or
spec1d, spec2d = getspec(input["Date"][i], input["WaveNo"][i], input["FrameNo"][i], input["StdNo"][i], B=input['A0V_B'][i], V=input['A0V_V'][i], #Create 1D and 2D spectra objects for all orders combining both H and K bands (easy eh?)
y_scale=input["A0V_HI_Scale"][i], wave_smooth=input["A0V_Smooth"][i], savechecks=save_checks)
if subtract_continuum: #If user specifies to subtract the continuum
spec2d.subtract_continuum() #Subtract continuum from 2D spectrum
spec1d.combine_orders() #Combine all orders in 1D spectrum into one very long spectrum
spec2d.combine_orders() #Combine all orders in 2D spectrum into one very long spectrum
if fill_nans: #If user specifies to fill nans
spec2d.fill_nans(size=5) #Fill nans with a median filter on a column by column basis
if i==0: #if this is the first slit, trim line list and inialize arrays to store the flux and variacne datacubes
parsed_spectral_lines = spectral_lines.parse(nanmin(spec2d.combospec.wave), nanmax(spec2d.combospec.wave)) #Only grab lines withen the wavelength range of the current order
use_line_for_flux_calib = parsed_spectral_lines.label == flux_calibration_line
#Shape of final 4D array in the form of (# of spectral lines, pixels along slit,velocity, pixels prependicular to slit, ), x_expand expands along the x-axis (pointings) to make place scale same in x & y
cube_size = array([len(parsed_spectral_lines.label), geo.y_size, 2*velocity_range/velocity_res, geo.x_size]).astype(int)
master_cube = zeros(cube_size) #Initialize master array to store flux in a datacube
master_cube_var = zeros(cube_size) #Initialize master array to store variance
master_cube_overlap = zeros(cube_size) #Initialize master array to store how many pointings overlap with a given pixel
if use_blocks: #if user specifies to use blocks for relative flux calibrating overlapping pointings
block_cube = zeros(cube_size) #Initialize temporary block array to store flux in a datacube
block_cube_var = zeros(cube_size) #Initialize temporary block array to store variance
block_cube_overlap = zeros(cube_size) #Initialize temporary block array to store how many pointings overlap with a given pixel
else: #If this is not the first slit....
if use_blocks and input["Block"][i] != input["Block"][i-1]: #If we are in a new block...
block_cube[:] = 0. #re-blank the block arrays for the next block
block_cube_var[:] = 0.
block_cube_overlap[:] = 0
parsed_spectral_lines.recalculate_wavelengths(input['Delta_V'][i]) #Recalculate the wavelenfths in the line list based on the velocity of this observation
try: #Try to see if v_shift_file works
if v_shift_file == '-': #If user specifies no v_shift_file
v_shift_file = '' #Tell position_velocity object not to use it
pv = position_velocity(spec1d.combospec, spec2d.combospec, parsed_spectral_lines, shift_lines=v_shift_file) #Extract and create a datacube in position-velocity space of all lines in line list(s) found in spectrum
except: #If not just catch the error and ignore it for now
pv = position_velocity(spec1d.combospec, spec2d.combospec, parsed_spectral_lines) #Extract and create a datacube in position-velocity space of all lines in line list(s) found in spectrum
velocity = pv.velocity
velocity_cut = (velocity > -flux_calibration_velocity_range) & (velocity < flux_calibration_velocity_range) #Make a cut in velocity space, if the user so specifies, otherwise use all +/- 100 km/s in the cube
nans = isnan(pv.pv) #Zero out nans before stacking
pv.pv[nans] = 0.
pv.var2d[nans] = 0.
if input['Exp'][i] != 1.0: #If exposure times vary
pv.pv /= input['Exp'][i] #Scale flux
pv.var2d /= input['Exp'][i]**2 #Scale variance (to propogate uncertainity
print 'PA diff = ', geo.PA_difference[i]
if geo.PA_difference[i] == 0.0 or geo.PA_difference[i] == 180.0:#If PA of this pointing is same PA or 180 degrees as the first pointing
flux = tile(pv.pv[:,:,:, newaxis], x_expand) #Expand out flux and variance arrays to match this angle
var = tile(pv.var2d[:,:,:, newaxis], x_expand)
if geo.PA_difference[i] == 0.0: #If PAs between reference pointing and this are the same
print 'Flipping order of flux array for i=', i, 'Shape= ', shape(flux)
flux = flux[:,::-1,:,:] #Invert the flux and var arrays along the x axis
var = var[:,::-1,:,:]
elif geo.PA_difference[i] == 90.0 or geo.PA_difference[i] == 270.0: #If PA rotated 90 degrees one way or the other from first pointing
flux = swapaxes( tile(pv.pv[:,newaxis,:,:], (1,x_expand,1,1) ) , 2,3) #Expand out flux and variance arrays to match this angle
var = swapaxes( tile(pv.var2d[:,newaxis,:,:], (1,x_expand,1,1) ) , 2,3)
if geo.PA_difference[i] == 90.0: #if PAs between the reference pointing and this pointing arte the same
print 'Flipping order of flux array for i=', i, 'Shape= ', shape(flux)
flux = flux[:,:,:,::-1] #Invert the flux and var arrays along the y axis
var = var[:,:,:,::-1]
x1 = geo.x_start[i].astype(int) #Grab x and y pixel ranges to use for constructing the datacube
x2 = geo.x_end[i].astype(int)
y1 = geo.y_start[i].astype(int)
y2 = geo.y_end[i].astype(int)
find_nans = isnan(flux) #Blank set nans to zero
flux[find_nans] = 0.
var[find_nans] = 0.
flux_holder = flux #Place flux and variance into temporary holders
var_holder = var
#Flux calibrate if whole width of any part of the slit overlaps part of the datacube that is already built
ratio = 1.0
if flux_calibrate: #If user turns flux calibraiton on, flux calibrate the current slit inside current block
if use_blocks: #If user want to flux calibrate in blocks
cube_slice = block_cube[use_line_for_flux_calib,y1:y2,velocity_cut,x1:x2] #grab slice of flux and variance in block cube overlapping current pointing
cube_var_slice = block_cube_var[use_line_for_flux_calib,y1:y2,velocity_cut,x1:x2]
else: #If user does not want to flux calibrate in blocks
cube_slice = master_cube[use_line_for_flux_calib,y1:y2,velocity_cut,x1:x2] #grab slice of flux and variance in main datacube overlapping current pointing
cube_var_slice = master_cube_var[use_line_for_flux_calib,y1:y2,velocity_cut,x1:x2]
#if abs(x1-x2) == slit_length: #If the slit is placed in the x-direction
# useable_pixels = (cube_slice/sqrt(cube_var_slice) > flux_calibration_s2n_thresh) & isfinite(sum(cube_slice, axis=1, keepdims=True))#Find pixels along the slit length that can be used for flux calibration, if a nan exists along the slit width, it gets naned out in the sum above, thereby we only use pixels along the slit width if the existing cube fully covers them
#else: #If the slit is placed in the y directio
# useable_pixels = (cube_slice/sqrt(cube_var_slice) > flux_calibration_s2n_thresh) & isfinite(sum(cube_slice, axis=1, keepdims=True))#Find pixels along the slit length that can be used for flux calibration, if a nan exists along the slit width, it gets naned out in the sum above, thereby we only use pixels along the slit width if the existing cube fully covers them
useable_pixels = cube_slice/cube_var_slice**0.5 > flux_calibration_s2n_thresh #sigma clip out low S/N pixels so that we only use pixels of sufficient S/N for the flux calibration of this current pointing
if any(useable_pixels): #If any pixels are useable, do the flux calibration by summing everything up and taking the ratio
flux_slice = flux_holder[use_line_for_flux_calib,:,velocity_cut,:]
ratio = nansum(cube_slice[useable_pixels])/nansum(flux_slice[useable_pixels])
print 'i=',i,' Ratio =', ratio, ' # of usable pixels=', sum(useable_pixels), 'shape of cube slice=', shape(cube_slice)
if use_blocks: #If user wants to flux calibrate in blocks, run through the current block and flux calibrate that block. When the end of the block is reached, add it to the
block_cube_overlap[:,y1:y2,:,x1:x2] += 1.0 #Increment the counter for how many pointings these pixels used
inverse_overlap_scale = 1.0 / block_cube_overlap[:,y1:y2,:,x1:x2]
#block_cube[:,y1:y2,:,x1:x2] = (block_cube[:,y1:y2,:,x1:x2] * (1.0-inverse_overlap_scale)) + (flux_holder * ratio * inverse_overlap_scale) #Load slit into master data cube, normalize flux by expnasion done along x-axis
block_cube[:,y1:y2,:,x1:x2] *= 1.0-inverse_overlap_scale
block_cube[:,y1:y2,:,x1:x2] += flux_holder * ratio * inverse_overlap_scale
#block_cube_var[:,y1:y2,:,x1:x2] = block_cube_var[:,y1:y2,:,x1:x2] * (1.0-inverse_overlap_scale) + (var_holder * ratio**2 * inverse_overlap_scale) #Load slit into master variance cube
block_cube_var[:,y1:y2,:,x1:x2] *= 1.0-inverse_overlap_scale
block_cube_var[:,y1:y2,:,x1:x2] += var_holder * ratio**2 * inverse_overlap_scale
end_of_block = False #First set the end of block boolean to = False
if i > 0 or n_pointings == 1: #But now search if the end of the block is true
if (i == n_pointings-1): #If this is the last pointing, it is definitely the end of the last block
end_of_block = True
elif (input["Block"][i] != input["Block"][i+1]): #But if it is not the last pointing, check if the next pointing is in a different block, if it is this pointing is the end of the current block
end_of_block = True
if end_of_block: #If we are in a new block or at the end of the list, flux calibrate the last pionting in this block and then flux calibrate block to the master datacube
if flux_calibrate and input["Block"][i] > 1: #And flux calibration is set to on
block_cube_slice = block_cube[use_line_for_flux_calib,:,velocity_cut,:] #Make a cut of the block for the flux calibration velocity range and line to use
#block_var_slice = block_cube_var[use_line_for_flux_calib,:,velocity_cut,:] #Make a cut of the block variance for the flux calibration velocity range and line to use
master_cube_slice = master_cube[use_line_for_flux_calib,:,velocity_cut,:] #Make a cut of the master cube for the flux calibration velocity range and line to use
#master_cube_var_slice = master_cube_var[use_line_for_flux_calib,:,velocity_cut,:] #Make a cut of the master cube variance for the flux calibration velocity range and line to use
useable_pixels = (block_cube_slice/block_cube_var[use_line_for_flux_calib,:,velocity_cut,:]**0.5 > flux_calibration_s2n_thresh) & (master_cube_slice/master_cube_var[use_line_for_flux_calib,:,velocity_cut,:]**0.5 > flux_calibration_s2n_thresh) #Find pixels that overlap in block and
ratio = nansum(master_cube_slice[useable_pixels]) / nansum(block_cube_slice[useable_pixels]) #Calculate ratio to apply relative flux calibration of master cube to the current block
else: #If there is no flux calibration, do not flux calibrate and just set ratio = 1
useable_pixels = False
ratio = 1.0
print 'Block = ', input["Block"][i],' Ratio =', ratio, ' # of usable pixels=', sum(useable_pixels)
block_pixels = block_cube_overlap > 0.0 #Find all pixels that block occupies
block_fraction = block_cube_overlap[block_pixels] / (master_cube_overlap[block_pixels] + block_cube_overlap[block_pixels]) #Find fraction of overlapping slits the block occupies in the master cube
#master_cube[block_pixels] = (master_cube[block_pixels]*(1.0-block_fraction)) + (block_cube[block_pixels]*block_fraction*ratio) #Load slit into master data cube, normalize flux by expnasion done along x-axis
master_cube[block_pixels] *= 1.0-block_fraction
master_cube[block_pixels] += block_cube[block_pixels]*block_fraction*ratio
#master_cube_var[block_pixels] = (master_cube_var[block_pixels]*(1.0-block_fraction)) + (block_cube_var[block_pixels]*block_fraction*ratio**2) #Load slit into master variance cube
master_cube_var[block_pixels] *= 1.0-block_fraction
master_cube_var[block_pixels] += block_cube_var[block_pixels]*block_fraction*ratio**2
master_cube_overlap[block_pixels] += block_cube_overlap[block_pixels] #Increment the counter for how many pointings these pixels used
else: #If user does not want to use blocks for flux calibration, flux calibrate this pointing directly into the master datacube
master_cube_overlap[:,y1:y2,:,x1:x2] += 1.0 #Increment the counter for how many pointings these pixels used
inverse_overlap_scale = 1.0 / master_cube_overlap[:,y1:y2,:,x1:x2]
master_cube[:,y1:y2,:,x1:x2] *= 1.0-inverse_overlap_scale
master_cube[:,y1:y2,:,x1:x2] += flux_holder * ratio * inverse_overlap_scale
master_cube_var[:,y1:y2,:,x1:x2] *= 1.0-inverse_overlap_scale
master_cube_var[:,y1:y2,:,x1:x2] += var_holder * ratio**2 * inverse_overlap_scale
if input["Date"][i] == reference_pointing_date and input["FrameNo"][i] == reference_pointing_frameno:
x_reference_pixel = (float(x1)+float(x2))/2.0 #Set reference pixels to be center of first slit pointing
y_reference_pixel = (float(y1)+float(y2))/2.0 #Set reference pixels to be center of first slit pointing
#pa_reference_pixel = float(input["PA"][i]) #Set PA of reference pixel
gc.collect() #Do garbage collection to free up memory lying around
#master_cube /= master_cube_overlap.astype(float) * float(x_expand)#Scale overlapping pixels down by the number of pointings overlapping a given pixel , #store full datacube in bar object, and normalize flux to number of pointings (divide by factor we expand x axis by)
#master_cube_var /= master_cube_overlap.astype(float)**2 * float(x_expand)**2 #Also for the variance: scale overlapping pixels down by the number of pointings overlapping a given pixel #Store full variance datacube in bar object, and normalize uncertainity to number of pointings
master_cube /= float(x_expand)#Scale overlapping pixels down by the number of pointings overlapping a given pixel , #store full datacube in bar object, and normalize flux to number of pointings (divide by factor we expand x axis by)
master_cube_var /= float(x_expand)**2 #Also
empty_pixels = master_cube == 0.0 #Blank out empty pixels with nans
master_cube[empty_pixels] = nan
master_cube_var[empty_pixels] = nan
self.cube = master_cube# #store full datacube in bar object, and normalize flux to number of pointings (divide by factor we expand x axis by)
self.var = master_cube_var # #Store full variance datacube in bar object, and normalize uncertainity to number of pointings
self.label = parsed_spectral_lines.label #Save labels of spectral lines so that lines can be later retrieved by matching their label strings to what the user wants
self.velocity = velocity #Save velocity per pixel along velocity axis for later being able to cut and collapse parts of the datacubes
def getline(self, input_label, variance=False): #Grab a datacube for a chosen spectral line
chosen_line = where(self.label == input_label)[0][0] #Select which spectral line to extract from string inputted by user to match the label of the line
if variance: #If user specifies they want the variance...
grab_line_cube = swapaxes(self.var[chosen_line,:,:,:],0,1) #Extract datacube for that line
else: #Else grab the flux cube (default)
grab_line_cube = swapaxes(self.cube[chosen_line,:,:,:],0,1) #Extract datacube for that line
return(grab_line_cube) #Return the extracted cube
def getimage(self, input_label, vrange=velocity_range, variance=False): #Get a line and collapse into 2D, can specifiy a velocity range to reduce noise
line_cube = self.getline(input_label, variance=variance) #Grab line cube
velocity_cut = (self.velocity > vrange[0]) & (self.velocity < vrange[1]) #Make a cut in velocity space, if the user so specifies, otherwise use all +/- 100 km/s in the cube
collapsed_cube = nansum(line_cube[velocity_cut,:,:], 0) #Collapse cube along velocity access into an image
collapsed_cube[collapsed_cube == 0.] = nan #Blank out unused pixels with nan
return collapsed_cube
def saveimage(self, input_label, fname, variance=False, vrange=velocity_range): #Save a 2D collapsed image
img = self.getimage(input_label, vrange=vrange, variance=variance)
save(img, fname) #save image
def savecube(self, input_label, fname, variance=False, collapse=False): #Extract and save a spectral line cube as a fits file
line_cube = self.getline(input_label, variance=variance) #Grab line cube
save(line_cube, fname) #Save linecube as a fits file
def saveratio(self, input_label_1, input_label_2, vrange=velocity_range, s2n_cut=0.0, fname=''): #Save image of a ratio of two lines
flux1 = self.getimage(input_label_1, vrange=vrange) #Grab collapsed images of both lines
flux2 = self.getimage(input_label_2, vrange=vrange)
sig1 = self.getimage(input_label_1, vrange=vrange, variance=True)**0.5#Grab 1-sigma uncertainity for each image
sig2 = self.getimage(input_label_2, vrange=vrange, variance=True)**0.5
ratio = flux1 / flux2 #Take ratio of line 1 / line 2
sigma = ratio * ((sig1/flux1)**2 + (sig2/flux2)**2)**0.5 #Uncertainity in ratio
s2n = ratio/sigma #Calculate S/N on ratio
if s2n_cut != 0.0: #If user specifies a S/N cut, mask regions with lower S/N
low_s2n_regions = s2n < 3.0 #Find regions with low S/N
ratio[low_s2n_regions] = 0. #Set regions with low S/N to zero and ignore them
if fname =='': #If file name is not specified by user
fname = 'ratio_'+input_label_1+'_over_'+input_label_2+'.fits' #automatically create own filename
save(ratio, fname) #Save ratio map
def extract_half(self, vrange=velocity_range, dim=False, use_line='1-0 S(1)', median_multiplier=1.0): #Extract flux from above some fraction of flux for the specified line
h = h2.make_line_list() #Set up object for storing H2 transitions
img = self.getimage(use_line, variance=False, vrange=vrange)
median_flux = median(img)
if dim == False: #Use bright regions or...
use_region = img >= median_flux * median_multiplier
else: #Use dim regions
use_region = img <= median_flux * median_multiplier
for label in self.label: #Loop through each line and save the various results into an h2_transitions object
img = self.getimage(label, vrange=vrange)
var = self.getimage(label, vrange=vrange, variance=True)
total_flux = nansum(img[use_region])
total_var = nansum(var[use_region])
total_sigma = total_var**0.5
find_line = h.label == label
h.F[find_line] = total_flux
h.sigma[find_line] = total_sigma
h.s2n[find_line] = total_flux / total_sigma
return(h)
def extract_full(self, vrange=velocity_range): #Extract full datacube to get H2 line fluxes
h = self.extract_half(vrange=vrange, dim=False, median_multiplier=0.0) #Pretend we aqre extracting a bright or dim region, but extract everything by setting threshold to 0.
return(h) #Return the result
#Function fills any gaps
# def fill_gaps(self, size=1, axis='x'):
# cube = copy.deepcopy(self.cube)
# var = copy.deepcopy(self.var)
# n_lines, ny, n_velocity, nx = shape(cube) #Calculate pixel sizes
# filled_slice = zeros([ny,nx]) #Create array that will store the smoothed median spectrum
# filled_slice_var = zeros([ny,nx])
# if axis == 'x':
# x_left = arange(nx) - size #Create array to store left side of running median
# x_right = arange(nx) + size + 1 #Create array to store right side of running median
# x_left[x_left < 0] = 0 #Set pixels beyond edge of order to be nonexistant
# x_right[x_right > nx] = nx - 1 #Set pixels beyond right edge of order to be nonexistant
# x_size = x_right - x_left #Calculate number of pixels in the x (wavelength) direction
# #g = Gaussian2DKernel(stddev=5.0, x_size=3, y_size=3) #Set up convolution kernel
# for i in xrange(n_lines): #Loop through every spectral line
# for j in xrange(n_velocity): #Loop through every velocity slice
# for k in xrange(nx): #This loop does the running of the median down the spectrum each pixel
# filled_slice[:,k] = nanmedian(cube[i,:,j,x_left[k]:x_right[k]], axis=1) #Calculate median between x_left and x_right for a given pixel
# filled_slice_var[:,k] = nanmedian(var[i,:,j,x_left[k]:x_right[k]], axis=1) #Calculate median between x_left and x_right for a given pixel
# nans_found = ~isfinite(cube[i,:,j,:]) #Find nans
# cube[i,:,j,:][nans_found] = filled_slice[nans_found] #Fill in the gaps
# var[i,:,j,:][nans_found] = filled_slice_var[nans_found] #Fill in the gaps
# elif axis == 'y':
# y_left = arange(ny) - size #Create array to store left side of running median
# y_right = arange(ny) + size + 1 #Create array to store right side of running median
# y_left[y_left < 0] = 0 #Set pixels beyond edge of order to be nonexistant
# y_right[y_right > ny] = ny - 1 #Set pixels beyond right edge of order to be nonexistant
# y_size = y_right - y_left #Calculate number of pixels in the y (wavelength) direction
# #g = Gaussian2DKernel(stddev=5.0, x_size=3, y_size=3) #Set up convolution kernel
# for i in xrange(n_lines): #Loop through every spectral line
# for j in xrange(n_velocity): #Loop through every velocity slice
# for k in xrange(ny): #This loop does the running of the median down the spectrum each pixel
# filled_slice[k,:] = nanmedian(cube[i,y_left[k]:y_right[k],j,:], axis=0) #Calculate median between x_left and x_right for a given pixel
# filled_slice_var[k,:] = nanmedian(var[i,y_left[k]:y_right[k],j,:], axis=0) #Calculate median between x_left and x_right for a given pixel
# nans_found = ~isfinite(cube[i,:,j,:]) #Find nans
# cube[i,:,j,:][nans_found] = filled_slice[nans_found] #Fill in the gaps
# var[i,:,j,:][nans_found] = filled_slice_var[nans_found] #Fill in the gaps
# self.cube = cube #Update datacube
# self.var = var #Update datacube variance
def fill_gaps(self, size=[3,3]): #Function fills any gaps, experimental version at the moment, comment out this one and uncomment the above one to go back to the old versoin
n_lines, ny, n_velocity, nx = shape(self.cube) #Calculate pixel sizes
mask = zeros([ny+2, nx+2], dtype=int) #Create an array to store pixle
kernel = Gaussian2DKernel(stddev=0.25, x_size=size[0], y_size=size[1])
structure = ones(size, dtype=int)
for i in xrange(n_lines):
for j in xrange(n_velocity):
cube_slice = self.cube[i,:,j,:]
var_slice = self.var[i,:,j,:]
smoothed_cube_slice = interpolate_replace_nans(cube_slice, kernel=kernel)
smoothed_var_slice = interpolate_replace_nans(var_slice, kernel=kernel)
mask[1:-1,1:-1][isfinite(cube_slice)] = 1
pixels_to_replace = (mask - binary_closing(mask, structure=structure))[1:-1,1:-1] < 0
mask[:] = 0
cube_slice[pixels_to_replace] = smoothed_cube_slice[pixels_to_replace]
var_sclice = smoothed_var_slice[pixels_to_replace]
| 34,400
| 80.907143
| 372
|
py
|
plotspec
|
plotspec-master/line_lists/make_OH_line_list.py
|
#simple script for making a plotspec.py compatible OH line list based on the OH line list from http://www.gemini.edu/sciops/instruments/nir/wavecal/index.html
from pylab import *
line_data = loadtxt('OH_raw_rousselot_2000.dat') #Read in data from original line list
bright_lines = line_data[:,1] > 1e-1 #Find only bright lines we will probably see
line_waves = line_data[bright_lines, 0] / 10000.0 #Convert line wavelengths from angstroms to microns
output = open('OH_Rousselot_2000.dat', 'w') #Open output line list
for line in line_waves: #Loop through each line
output.write(str(line) + '\t{OH}\n') #Write line to line list
output.close() #Close line list, you are now done!
| 680
| 67.1
| 158
|
py
|
SkeletonGCL
|
SkeletonGCL-main/main.py
|
#!/usr/bin/env python
from __future__ import print_function
import argparse
import inspect
import os
import pickle
import random
import shutil
import sys
import time
from collections import OrderedDict
import traceback
from sklearn.metrics import confusion_matrix
import csv
import numpy as np
import glob
# torch
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
import yaml
from tensorboardX import SummaryWriter
from tqdm import tqdm
from model.loss import InfoNCEGraph
from torchlight import DictAction
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (2048, rlimit[1]))
def init_seed(seed):
torch.cuda.manual_seed_all(seed)
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# torch.backends.cudnn.enabled = False
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def import_class(import_str):
mod_str, _sep, class_str = import_str.rpartition('.')
__import__(mod_str)
try:
return getattr(sys.modules[mod_str], class_str)
except AttributeError:
raise ImportError('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info())))
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
def get_parser():
# parameter priority: command line > config > default
parser = argparse.ArgumentParser(
description='Spatial Temporal Graph Convolution Network')
parser.add_argument(
'--work-dir',
default='./work_dir/temp',
help='the work folder for storing results')
parser.add_argument('-model_saved_name', default='')
parser.add_argument(
'--config',
default='./config/nturgbd-cross-view/test_bone.yaml',
help='path to the configuration file')
# processor
parser.add_argument(
'--phase', default='train', help='must be train or test')
parser.add_argument(
'--save-score',
type=str2bool,
default=False,
help='if ture, the classification score will be stored')
# visulize and debug
parser.add_argument(
'--seed', type=int, default=1, help='random seed for pytorch')
parser.add_argument(
'--log-interval',
type=int,
default=100,
help='the interval for printing messages (#iteration)')
parser.add_argument(
'--save-interval',
type=int,
default=1,
help='the interval for storing models (#iteration)')
parser.add_argument(
'--save-epoch',
type=int,
default=30,
help='the start epoch to save model (#iteration)')
parser.add_argument(
'--eval-interval',
type=int,
default=5,
help='the interval for evaluating models (#iteration)')
parser.add_argument(
'--print-log',
type=str2bool,
default=True,
help='print logging or not')
parser.add_argument(
'--show-topk',
type=int,
default=[1, 5],
nargs='+',
help='which Top K accuracy will be shown')
# feeder
parser.add_argument(
'--feeder', default='feeder.feeder', help='data loader will be used')
parser.add_argument(
'--num-worker',
type=int,
default=32,
help='the number of worker for data loader')
parser.add_argument(
'--train-feeder-args',
action=DictAction,
default=dict(),
help='the arguments of data loader for training')
parser.add_argument(
'--test-feeder-args',
action=DictAction,
default=dict(),
help='the arguments of data loader for test')
# model
parser.add_argument('--model', default=None, help='the model will be used')
parser.add_argument(
'--model-args',
action=DictAction,
default=dict(),
help='the arguments of model')
parser.add_argument(
'--weights',
default=None,
help='the weights for network initialization')
parser.add_argument(
'--ignore-weights',
type=str,
default=[],
nargs='+',
help='the name of weights which will be ignored in the initialization')
# optim
parser.add_argument(
'--base-lr', type=float, default=0.01, help='initial learning rate')
parser.add_argument(
'--step',
type=int,
default=[20, 40, 60],
nargs='+',
help='the epoch where optimizer reduce the learning rate')
parser.add_argument(
'--device',
type=int,
default=0,
nargs='+',
help='the indexes of GPUs for training or testing')
parser.add_argument('--optimizer', default='SGD', help='type of optimizer')
parser.add_argument(
'--nesterov', type=str2bool, default=False, help='use nesterov or not')
parser.add_argument(
'--batch-size', type=int, default=256, help='training batch size')
parser.add_argument(
'--test-batch-size', type=int, default=256, help='test batch size')
parser.add_argument(
'--start-epoch',
type=int,
default=0,
help='start training from which epoch')
parser.add_argument(
'--num-epoch',
type=int,
default=80,
help='stop training in which epoch')
parser.add_argument(
'--weight-decay',
type=float,
default=0.0005,
help='weight decay for optimizer')
parser.add_argument(
'--temperature',
type=float,
default=0.8,
help='temperature for cross entropy loss in GCL')
parser.add_argument(
'--lr-decay-rate',
type=float,
default=0.1,
help='decay rate for learning rate')
parser.add_argument('--warm_up_epoch', type=int, default=0)
return parser
class Processor():
"""
Processor for Skeleton-based Action Recgnition
"""
def __init__(self, arg):
self.arg = arg
self.save_arg()
if arg.phase == 'train':
if not arg.train_feeder_args['debug']:
arg.model_saved_name = os.path.join(arg.work_dir, 'runs')
if os.path.isdir(arg.model_saved_name):
print('log_dir: ', arg.model_saved_name, 'already exist')
answer = input('delete it? y/n:')
if answer == 'y':
shutil.rmtree(arg.model_saved_name)
print('Dir removed: ', arg.model_saved_name)
input('Refresh the website of tensorboard by pressing any keys')
else:
print('Dir not removed: ', arg.model_saved_name)
self.train_writer = SummaryWriter(os.path.join(arg.model_saved_name, 'train'), 'train')
self.val_writer = SummaryWriter(os.path.join(arg.model_saved_name, 'val'), 'val')
else:
self.train_writer = self.val_writer = SummaryWriter(os.path.join(arg.model_saved_name, 'test'), 'test')
self.global_step = 0
# pdb.set_trace()
self.load_data()
self.load_model()
if self.arg.phase == 'model_size':
pass
else:
self.load_optimizer()
self.lr = self.arg.base_lr
self.best_acc = 0
self.best_acc_epoch = 0
self.model = self.model.cuda(self.output_device)
if type(self.arg.device) is list:
if len(self.arg.device) > 1:
self.model = nn.DataParallel(
self.model,
device_ids=self.arg.device,
output_device=self.output_device)
def load_data(self):
Feeder = import_class(self.arg.feeder)
self.data_loader = dict()
if self.arg.phase == 'train':
self.data_loader['train'] = torch.utils.data.DataLoader(
dataset=Feeder(**self.arg.train_feeder_args),
batch_size=self.arg.batch_size,
shuffle=True,
num_workers=self.arg.num_worker,
drop_last=True,
worker_init_fn=init_seed)
self.data_loader['test'] = torch.utils.data.DataLoader(
dataset=Feeder(**self.arg.test_feeder_args),
batch_size=self.arg.test_batch_size,
shuffle=False,
num_workers=self.arg.num_worker,
drop_last=False,
worker_init_fn=init_seed)
def load_model(self):
output_device = self.arg.device[0] if type(self.arg.device) is list else self.arg.device
self.output_device = output_device
Model = import_class(self.arg.model)
shutil.copy2(inspect.getfile(Model), self.arg.work_dir)
print(Model)
self.model = Model(**self.arg.model_args)
print(self.model)
self.loss = nn.CrossEntropyLoss().cuda(output_device)
mem_size = self.data_loader['train'].dataset.__len__() if self.arg.phase == 'train' else 0
label_all = self.data_loader['train'].dataset.label if self.arg.phase == 'train' else []
self.graphContrast = InfoNCEGraph(in_channels=3*25*25, out_channels=256, class_num=self.arg.model_args["num_class"], \
mem_size=mem_size, label_all=label_all, T=self.arg.temperature).cuda(output_device)
if self.arg.weights:
self.global_step = int(arg.weights[:-3].split('-')[-1])
self.print_log('Load weights from {}.'.format(self.arg.weights))
if '.pkl' in self.arg.weights:
with open(self.arg.weights, 'r') as f:
weights = pickle.load(f)
else:
weights = torch.load(self.arg.weights)
weights = OrderedDict([[k.split('module.')[-1], v.cuda(output_device)] for k, v in weights.items()])
keys = list(weights.keys())
for w in self.arg.ignore_weights:
for key in keys:
if w in key:
if weights.pop(key, None) is not None:
self.print_log('Sucessfully Remove Weights: {}.'.format(key))
else:
self.print_log('Can Not Remove Weights: {}.'.format(key))
try:
self.model.load_state_dict(weights)
except:
state = self.model.state_dict()
diff = list(set(state.keys()).difference(set(weights.keys())))
print('Can not find these weights:')
for d in diff:
print(' ' + d)
state.update(weights)
self.model.load_state_dict(state)
def load_optimizer(self):
if self.arg.optimizer == 'SGD':
self.optimizer = optim.SGD(
self.model.parameters(),
lr=self.arg.base_lr,
momentum=0.9,
nesterov=self.arg.nesterov,
weight_decay=self.arg.weight_decay)
elif self.arg.optimizer == 'Adam':
self.optimizer = optim.Adam(
self.model.parameters(),
lr=self.arg.base_lr,
weight_decay=self.arg.weight_decay)
else:
raise ValueError()
self.print_log('using warm up, epoch: {}'.format(self.arg.warm_up_epoch))
def save_arg(self):
# save arg
arg_dict = vars(self.arg)
if not os.path.exists(self.arg.work_dir):
os.makedirs(self.arg.work_dir)
with open('{}/config.yaml'.format(self.arg.work_dir), 'w') as f:
f.write(f"# command line: {' '.join(sys.argv)}\n\n")
yaml.dump(arg_dict, f)
def adjust_learning_rate(self, epoch):
if self.arg.optimizer == 'SGD' or self.arg.optimizer == 'Adam':
if epoch < self.arg.warm_up_epoch:
lr = self.arg.base_lr * (epoch + 1) / self.arg.warm_up_epoch
else:
lr = self.arg.base_lr * (
self.arg.lr_decay_rate ** np.sum(epoch >= np.array(self.arg.step)))
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
return lr
else:
raise ValueError()
def print_time(self):
localtime = time.asctime(time.localtime(time.time()))
self.print_log("Local current time : " + localtime)
def print_log(self, str, print_time=True):
if print_time:
localtime = time.asctime(time.localtime(time.time()))
str = "[ " + localtime + ' ] ' + str
print(str)
if self.arg.print_log:
with open('{}/log.txt'.format(self.arg.work_dir), 'a') as f:
print(str, file=f)
def record_time(self):
self.cur_time = time.time()
return self.cur_time
def split_time(self):
split_time = time.time() - self.cur_time
self.record_time()
return split_time
def train(self, epoch, save_model=False):
self.model.train()
self.print_log('Training epoch: {}'.format(epoch + 1))
loader = self.data_loader['train']
self.adjust_learning_rate(epoch)
loss_value = []
contrast_loss_value = []
acc_value = []
self.train_writer.add_scalar('epoch', epoch, self.global_step)
self.record_time()
timer = dict(dataloader=0.001, model=0.001, statistics=0.001)
process = tqdm(loader, ncols=40)
for batch_idx, (data, label, index) in enumerate(process):
self.global_step += 1
with torch.no_grad():
data = data.float().cuda(self.output_device)
label = label.long().cuda(self.output_device)
timer['dataloader'] += self.split_time()
# forward
output, graph = self.model(data)
loss = self.loss(output, label)
if graph is not None:
contrast_loss = self.graphContrast(graph, label, index)
else:
contrast_loss = torch.zeros(1, device=output.device)
if contrast_loss > 0:
loss = loss + contrast_loss
# backward
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
loss_value.append(loss.data.item())
contrast_loss_value.append(contrast_loss.data.item())
timer['model'] += self.split_time()
value, predict_label = torch.max(output.data, 1)
acc = torch.mean((predict_label == label.data).float())
acc_value.append(acc.data.item())
self.train_writer.add_scalar('acc', acc, self.global_step)
self.train_writer.add_scalar('loss', loss.data.item(), self.global_step)
self.train_writer.add_scalar('contrast loss', contrast_loss.data.item(), self.global_step)
# statistics
self.lr = self.optimizer.param_groups[0]['lr']
self.train_writer.add_scalar('lr', self.lr, self.global_step)
timer['statistics'] += self.split_time()
# statistics of time consumption and loss
proportion = {
k: '{:02d}%'.format(int(round(v * 100 / sum(timer.values()))))
for k, v in timer.items()
}
self.print_log(
'\tMean training loss: {:.4f}. Mean graph loss: {:.4f}. Mean training acc: {:.2f}%.'.format(np.mean(loss_value), np.mean(contrast_loss_value), np.mean(acc_value)*100))
self.print_log('\tTime consumption: [Data]{dataloader}, [Network]{model}'.format(**proportion))
if save_model:
state_dict = self.model.state_dict()
weights = OrderedDict([[k.split('module.')[-1], v.cpu()] for k, v in state_dict.items()])
torch.save(weights, self.arg.model_saved_name + '-' + str(epoch+1) + '-' + str(int(self.global_step)) + '.pt')
def eval(self, epoch, save_score=False, loader_name=['test'], wrong_file=None, result_file=None):
if wrong_file is not None:
f_w = open(wrong_file, 'w')
if result_file is not None:
f_r = open(result_file, 'w')
self.model.eval()
self.print_log('Eval epoch: {}'.format(epoch + 1))
for ln in loader_name:
loss_value = []
score_frag = []
label_list = []
pred_list = []
step = 0
process = tqdm(self.data_loader[ln], ncols=40)
for batch_idx, (data, label, index) in enumerate(process):
label_list.append(label.numpy())
with torch.no_grad():
data = data.float().cuda(self.output_device)
label = label.long().cuda(self.output_device)
output, _ = self.model(data)
loss = self.loss(output, label)
score_frag.append(output.data.cpu().numpy())
loss_value.append(loss.data.item())
_, predict_label = torch.max(output.data, 1)
pred_list.append(predict_label.data.cpu().numpy())
step += 1
# if step == 10:
# break
if wrong_file is not None or result_file is not None:
predict = list(predict_label.cpu().numpy())
true = list(label.data.cpu().numpy())
for i, x in enumerate(predict):
if result_file is not None:
f_r.write(str(x) + ',' + str(true[i]) + '\n')
if x != true[i] and wrong_file is not None:
f_w.write(str(index[i]) + ',' + str(x) + ',' + str(true[i]) + '\n')
score = np.concatenate(score_frag)
loss = np.mean(loss_value)
if 'ucla' in self.arg.feeder:
self.data_loader[ln].dataset.sample_name = np.arange(len(score))
accuracy = self.data_loader[ln].dataset.top_k(score, 1)
if accuracy > self.best_acc:
self.best_acc = accuracy
self.best_acc_epoch = epoch + 1
print('Accuracy: ', accuracy, ' model: ', self.arg.model_saved_name)
if self.arg.phase == 'train':
self.val_writer.add_scalar('loss', loss, self.global_step)
self.val_writer.add_scalar('acc', accuracy, self.global_step)
score_dict = dict(
zip(self.data_loader[ln].dataset.sample_name, score))
self.print_log('\tMean {} loss of {} batches: {}.'.format(
ln, len(self.data_loader[ln]), np.mean(loss_value)))
for k in self.arg.show_topk:
self.print_log('\tTop{}: {:.2f}%'.format(
k, 100 * self.data_loader[ln].dataset.top_k(score, k)))
if save_score:
with open('{}/epoch{}_{}_score.pkl'.format(
self.arg.work_dir, epoch + 1, ln), 'wb') as f:
pickle.dump(score_dict, f)
# acc for each class:
label_list = np.concatenate(label_list)
pred_list = np.concatenate(pred_list)
confusion = confusion_matrix(label_list, pred_list)
list_diag = np.diag(confusion)
list_raw_sum = np.sum(confusion, axis=1)
each_acc = list_diag / list_raw_sum
with open('{}/epoch{}_{}_each_class_acc.csv'.format(self.arg.work_dir, epoch + 1, ln), 'w') as f:
writer = csv.writer(f)
writer.writerow(each_acc)
writer.writerows(confusion)
def start(self):
if self.arg.phase == 'train':
self.print_log('Parameters:\n{}\n'.format(str(vars(self.arg))))
self.global_step = self.arg.start_epoch * len(self.data_loader['train']) / self.arg.batch_size
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
self.print_log(f'# Parameters: {count_parameters(self.model)}')
for epoch in range(self.arg.start_epoch, self.arg.num_epoch):
save_model = (((epoch + 1) % self.arg.save_interval == 0) or (
epoch + 1 == self.arg.num_epoch)) and (epoch+1) > self.arg.save_epoch
self.train(epoch, save_model=save_model)
self.eval(epoch, save_score=self.arg.save_score, loader_name=['test'])
# test the best model
weights_path = glob.glob(os.path.join(self.arg.work_dir, 'runs-'+str(self.best_acc_epoch)+'*'))[0]
weights = torch.load(weights_path)
if type(self.arg.device) is list:
if len(self.arg.device) > 1:
weights = OrderedDict([['module.'+k, v.cuda(self.output_device)] for k, v in weights.items()])
self.model.load_state_dict(weights)
wf = weights_path.replace('.pt', '_wrong.txt')
rf = weights_path.replace('.pt', '_right.txt')
self.arg.print_log = False
self.eval(epoch=0, save_score=True, loader_name=['test'], wrong_file=wf, result_file=rf)
self.arg.print_log = True
num_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
self.print_log(f'Best accuracy: {self.best_acc}')
self.print_log(f'Epoch number: {self.best_acc_epoch}')
self.print_log(f'Model name: {self.arg.work_dir}')
self.print_log(f'Model total number of params: {num_params}')
self.print_log(f'Weight decay: {self.arg.weight_decay}')
self.print_log(f'Base LR: {self.arg.base_lr}')
self.print_log(f'Batch Size: {self.arg.batch_size}')
self.print_log(f'Test Batch Size: {self.arg.test_batch_size}')
self.print_log(f'seed: {self.arg.seed}')
elif self.arg.phase == 'test':
wf = self.arg.weights.replace('.pt', '_wrong.txt')
rf = self.arg.weights.replace('.pt', '_right.txt')
if self.arg.weights is None:
raise ValueError('Please appoint --weights.')
self.arg.print_log = False
self.print_log('Model: {}.'.format(self.arg.model))
self.print_log('Weights: {}.'.format(self.arg.weights))
self.eval(epoch=0, save_score=self.arg.save_score, loader_name=['test'], wrong_file=wf, result_file=rf)
self.print_log('Done.\n')
if __name__ == '__main__':
parser = get_parser()
# load arg form config file
p = parser.parse_args()
if p.config is not None:
with open(p.config, 'r') as f:
default_arg = yaml.load(f, Loader=yaml.FullLoader)
key = vars(p).keys()
for k in default_arg.keys():
if k not in key:
print('WRONG ARG: {}'.format(k))
assert (k in key)
parser.set_defaults(**default_arg)
arg = parser.parse_args()
init_seed(arg.seed)
processor = Processor(arg)
processor.start()
| 23,323
| 38.2
| 180
|
py
|
SkeletonGCL
|
SkeletonGCL-main/torchlight/setup.py
|
from setuptools import find_packages, setup
setup(
name='torchlight',
version='1.0',
description='A mini framework for pytorch',
packages=find_packages(),
install_requires=[])
| 197
| 21
| 47
|
py
|
SkeletonGCL
|
SkeletonGCL-main/torchlight/torchlight/util.py
|
#!/usr/bin/env python
import argparse
import os
import sys
import traceback
import time
import pickle
from collections import OrderedDict
import yaml
import h5py
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
# from torchpack.runner.hooks import PaviLogger
class IO():
def __init__(self, work_dir, save_log=True, print_log=True):
self.work_dir = work_dir
self.save_log = save_log
self.print_to_screen = print_log
self.cur_time = time.time()
self.split_timer = {}
self.pavi_logger = None
self.session_file = None
self.model_text = ''
def log(self, *args, **kwargs):
try:
if self.pavi_logger is None:
url = 'http://pavi.parrotsdnn.org/log'
with open(self.session_file, 'r') as f:
info = dict(session_file=self.session_file, session_text=f.read(), model_text=self.model_text)
self.pavi_logger = PaviLogger(url)
self.pavi_logger.connect(self.work_dir, info=info)
self.pavi_logger.log(*args, **kwargs)
except: #pylint: disable=W0702
pass
def load_model(self, model, **model_args):
Model = import_class(model)
model = Model(**model_args)
self.model_text += '\n\n' + str(model)
return model
def load_weights(self, model, weights_path, ignore_weights=None, fix_weights=False):
if ignore_weights is None:
ignore_weights = []
if isinstance(ignore_weights, str):
ignore_weights = [ignore_weights]
self.print_log(f'Load weights from {weights_path}.')
weights = torch.load(weights_path)
weights = OrderedDict([[k.split('module.')[-1], v.cpu()] for k, v in weights.items()])
# filter weights
for i in ignore_weights:
ignore_name = list()
for w in weights:
if w.find(i) == 0:
ignore_name.append(w)
for n in ignore_name:
weights.pop(n)
self.print_log(f'Filter [{i}] remove weights [{n}].')
for w in weights:
self.print_log(f'Load weights [{w}].')
try:
model.load_state_dict(weights)
except (KeyError, RuntimeError):
state = model.state_dict()
diff = list(set(state.keys()).difference(set(weights.keys())))
for d in diff:
self.print_log(f'Can not find weights [{d}].')
state.update(weights)
model.load_state_dict(state)
if fix_weights:
for name, param in model.named_parameters():
if name in weights.keys():
param.requires_grad = False
self.print_log(f'Fix weights [{name}].')
return model
def save_pkl(self, result, filename):
with open(f'{self.work_dir}/{filename}', 'wb') as f:
pickle.dump(result, f)
def save_h5(self, result, filename, append=False):
with h5py.File(f'{self.work_dir}/{filename}', 'a' if append else 'w') as f:
for k in result.keys():
f[k] = result[k]
def save_model(self, model, name):
model_path = f'{self.work_dir}/{name}'
# symlink = f'{self.work_dir}/latest_model.pt'
state_dict = model.state_dict()
weights = OrderedDict([[''.join(k.split('module.')), v.cpu()] for k, v in state_dict.items()])
torch.save(weights, model_path)
# os.symlink(model_path, symlink)
self.print_log(f'The model has been saved as {model_path}.')
def save_arg(self, arg):
self.session_file = f'{self.work_dir}/config.yaml'
# save arg
arg_dict = vars(arg)
if not os.path.exists(self.work_dir):
os.makedirs(self.work_dir)
with open(self.session_file, 'w') as f:
f.write(f"# command line: {' '.join(sys.argv)}\n\n")
yaml.dump(arg_dict, f, default_flow_style=False, indent=4)
def print_log(self, str, print_time=True):
if print_time:
# localtime = time.asctime(time.localtime(time.time()))
str = time.strftime("[%m.%d.%y|%X] ", time.localtime()) + str
if self.print_to_screen:
print(str)
if self.save_log:
with open(f'{self.work_dir}/log.txt', 'a') as f:
print(str, file=f)
def init_timer(self, *name):
self.record_time()
self.split_timer = {k: 0.0000001 for k in name}
def check_time(self, name):
self.split_timer[name] += self.split_time()
def record_time(self):
self.cur_time = time.time()
return self.cur_time
def split_time(self):
split_time = time.time() - self.cur_time
self.record_time()
return split_time
def print_timer(self):
proportion = {
k: f'{int(round(v * 100 / sum(self.split_timer.values()))):02d}%'
for k, v in self.split_timer.items()
}
self.print_log(f'Time consumption:')
for k in proportion:
self.print_log(f'\t[{k}][{proportion[k]}]: {self.split_timer[k]:.4f}')
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def str2dict(v):
return eval(f'dict({v})') #pylint: disable=W0123
def _import_class_0(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def import_class(import_str):
mod_str, _sep, class_str = import_str.rpartition('.')
__import__(mod_str)
try:
return getattr(sys.modules[mod_str], class_str)
except AttributeError:
raise ImportError('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info())))
class DictAction(argparse.Action):
def __init__(self, option_strings, dest, nargs=None, **kwargs):
if nargs is not None:
raise ValueError("nargs not allowed")
super(DictAction, self).__init__(option_strings, dest, **kwargs)
def __call__(self, parser, namespace, values, option_string=None):
input_dict = eval(f'dict({values})') #pylint: disable=W0123
output_dict = getattr(namespace, self.dest)
for k in input_dict:
output_dict[k] = input_dict[k]
setattr(namespace, self.dest, output_dict)
| 6,649
| 32.756345
| 117
|
py
|
SkeletonGCL
|
SkeletonGCL-main/torchlight/torchlight/__init__.py
|
from .util import IO
from .util import str2bool
from .util import str2dict
from .util import DictAction
from .util import import_class
from .gpu import visible_gpu
from .gpu import occupy_gpu
from .gpu import ngpu
| 214
| 22.888889
| 30
|
py
|
SkeletonGCL
|
SkeletonGCL-main/torchlight/torchlight/gpu.py
|
import os
import torch
def visible_gpu(gpus):
"""
set visible gpu.
can be a single id, or a list
return a list of new gpus ids
"""
gpus = [gpus] if isinstance(gpus, int) else list(gpus)
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(list(map(str, gpus)))
return list(range(len(gpus)))
def ngpu(gpus):
"""
count how many gpus used.
"""
gpus = [gpus] if isinstance(gpus, int) else list(gpus)
return len(gpus)
def occupy_gpu(gpus=None):
"""
make program appear on nvidia-smi.
"""
if gpus is None:
torch.zeros(1).cuda()
else:
gpus = [gpus] if isinstance(gpus, int) else list(gpus)
for g in gpus:
torch.zeros(1).cuda(g)
| 750
| 19.861111
| 71
|
py
|
SkeletonGCL
|
SkeletonGCL-main/graph/ntu_rgb_d.py
|
import sys
import numpy as np
sys.path.extend(['../'])
from graph import tools
num_node = 25
self_link = [(i, i) for i in range(num_node)]
inward_ori_index = [(1, 2), (2, 21), (3, 21), (4, 3), (5, 21), (6, 5), (7, 6),
(8, 7), (9, 21), (10, 9), (11, 10), (12, 11), (13, 1),
(14, 13), (15, 14), (16, 15), (17, 1), (18, 17), (19, 18),
(20, 19), (22, 23), (23, 8), (24, 25), (25, 12)]
inward = [(i - 1, j - 1) for (i, j) in inward_ori_index]
outward = [(j, i) for (i, j) in inward]
neighbor = inward + outward
class Graph:
def __init__(self, labeling_mode='spatial'):
self.num_node = num_node
self.self_link = self_link
self.inward = inward
self.outward = outward
self.neighbor = neighbor
self.A = self.get_adjacency_matrix(labeling_mode)
def get_adjacency_matrix(self, labeling_mode=None):
if labeling_mode is None:
return self.A
if labeling_mode == 'spatial':
A = tools.get_spatial_graph(num_node, self_link, inward, outward)
else:
raise ValueError()
return A
| 1,146
| 32.735294
| 78
|
py
|
SkeletonGCL
|
SkeletonGCL-main/graph/ucla.py
|
import sys
import numpy as np
sys.path.extend(['../'])
from graph import tools
num_node = 20
self_link = [(i, i) for i in range(num_node)]
inward_ori_index = [(1, 2), (2, 3), (4, 3), (5, 3), (6, 5), (7, 6),
(8, 7), (9, 3), (10, 9), (11, 10), (12, 11), (13, 1),
(14, 13), (15, 14), (16, 15), (17, 1), (18, 17), (19, 18),
(20, 19)]
inward = [(i - 1, j - 1) for (i, j) in inward_ori_index]
outward = [(j, i) for (i, j) in inward]
neighbor = inward + outward
class Graph:
def __init__(self, labeling_mode='spatial', scale=1):
self.num_node = num_node
self.self_link = self_link
self.inward = inward
self.outward = outward
self.neighbor = neighbor
self.A = self.get_adjacency_matrix(labeling_mode)
def get_adjacency_matrix(self, labeling_mode=None):
if labeling_mode is None:
return self.A
if labeling_mode == 'spatial':
A = tools.get_spatial_graph(num_node, self_link, inward, outward)
else:
raise ValueError()
return A
| 1,105
| 30.6
| 78
|
py
|
SkeletonGCL
|
SkeletonGCL-main/graph/tools.py
|
import numpy as np
def get_sgp_mat(num_in, num_out, link):
A = np.zeros((num_in, num_out))
for i, j in link:
A[i, j] = 1
A_norm = A / np.sum(A, axis=0, keepdims=True)
return A_norm
def edge2mat(link, num_node):
A = np.zeros((num_node, num_node))
for i, j in link:
A[j, i] = 1
return A
def get_k_scale_graph(scale, A):
if scale == 1:
return A
An = np.zeros_like(A)
A_power = np.eye(A.shape[0])
for k in range(scale):
A_power = A_power @ A
An += A_power
An[An > 0] = 1
return An
def normalize_digraph(A):
Dl = np.sum(A, 0)
h, w = A.shape
Dn = np.zeros((w, w))
for i in range(w):
if Dl[i] > 0:
Dn[i, i] = Dl[i] ** (-1)
AD = np.dot(A, Dn)
return AD
def get_spatial_graph(num_node, self_link, inward, outward):
I = edge2mat(self_link, num_node)
In = normalize_digraph(edge2mat(inward, num_node))
Out = normalize_digraph(edge2mat(outward, num_node))
A = np.stack((I, In, Out))
return A
def normalize_adjacency_matrix(A):
node_degrees = A.sum(-1)
degs_inv_sqrt = np.power(node_degrees, -0.5)
norm_degs_matrix = np.eye(len(node_degrees)) * degs_inv_sqrt
return (norm_degs_matrix @ A @ norm_degs_matrix).astype(np.float32)
def k_adjacency(A, k, with_self=False, self_factor=1):
assert isinstance(A, np.ndarray)
I = np.eye(len(A), dtype=A.dtype)
if k == 0:
return I
Ak = np.minimum(np.linalg.matrix_power(A + I, k), 1) \
- np.minimum(np.linalg.matrix_power(A + I, k - 1), 1)
if with_self:
Ak += (self_factor * I)
return Ak
def get_multiscale_spatial_graph(num_node, self_link, inward, outward):
I = edge2mat(self_link, num_node)
A1 = edge2mat(inward, num_node)
A2 = edge2mat(outward, num_node)
A3 = k_adjacency(A1, 2)
A4 = k_adjacency(A2, 2)
A1 = normalize_digraph(A1)
A2 = normalize_digraph(A2)
A3 = normalize_digraph(A3)
A4 = normalize_digraph(A4)
A = np.stack((I, A1, A2, A3, A4))
return A
def get_uniform_graph(num_node, self_link, neighbor):
A = normalize_digraph(edge2mat(neighbor + self_link, num_node))
return A
| 2,193
| 26.425
| 71
|
py
|
SkeletonGCL
|
SkeletonGCL-main/graph/__init__.py
|
from . import tools
from . import ntu_rgb_d
from . import ucla
| 64
| 12
| 23
|
py
|
SkeletonGCL
|
SkeletonGCL-main/model/agcn.py
|
import math
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
def import_class(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def conv_branch_init(conv, branches):
weight = conv.weight
n = weight.size(0)
k1 = weight.size(1)
k2 = weight.size(2)
nn.init.normal_(weight, 0, math.sqrt(2. / (n * k1 * k2 * branches)))
nn.init.constant_(conv.bias, 0)
def conv_init(conv):
nn.init.kaiming_normal_(conv.weight, mode='fan_out')
nn.init.constant_(conv.bias, 0)
def bn_init(bn, scale):
nn.init.constant_(bn.weight, scale)
nn.init.constant_(bn.bias, 0)
class unit_tcn(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=9, stride=1):
super(unit_tcn, self).__init__()
pad = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0),
stride=(stride, 1))
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU()
conv_init(self.conv)
bn_init(self.bn, 1)
def forward(self, x):
x = self.bn(self.conv(x))
return x
class unit_gcn(nn.Module):
def __init__(self, in_channels, out_channels, A, coff_embedding=4, num_subset=3):
super(unit_gcn, self).__init__()
inter_channels = out_channels // coff_embedding
self.inter_c = inter_channels
self.PA = nn.Parameter(torch.from_numpy(A.astype(np.float32)))
nn.init.constant_(self.PA, 1e-6)
self.A = Variable(torch.from_numpy(A.astype(np.float32)), requires_grad=False)
self.num_subset = num_subset
self.conv_a = nn.ModuleList()
self.conv_b = nn.ModuleList()
self.conv_d = nn.ModuleList()
for i in range(self.num_subset):
self.conv_a.append(nn.Conv2d(in_channels, inter_channels, 1))
self.conv_b.append(nn.Conv2d(in_channels, inter_channels, 1))
self.conv_d.append(nn.Conv2d(in_channels, out_channels, 1))
if in_channels != out_channels:
self.down = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1),
nn.BatchNorm2d(out_channels)
)
else:
self.down = lambda x: x
self.bn = nn.BatchNorm2d(out_channels)
self.soft = nn.Softmax(-2)
self.relu = nn.ReLU()
for m in self.modules():
if isinstance(m, nn.Conv2d):
conv_init(m)
elif isinstance(m, nn.BatchNorm2d):
bn_init(m, 1)
bn_init(self.bn, 1e-6)
for i in range(self.num_subset):
conv_branch_init(self.conv_d[i], self.num_subset)
def forward(self, x):
N, C, T, V = x.size()
A = self.A.cuda(x.get_device())
A = A + self.PA
y = None
graph_list = []
for i in range(self.num_subset):
A1 = self.conv_a[i](x).permute(0, 3, 1, 2).contiguous().view(N, V, self.inter_c * T)
A2 = self.conv_b[i](x).view(N, self.inter_c * T, V)
graph = torch.matmul(A1, A2)
graph_list.append(graph)
A1 = self.soft(graph / A1.size(-1)) # N V V
A1 = A1 + A[i]
A2 = x.view(N, C * T, V)
z = self.conv_d[i](torch.matmul(A2, A1).view(N, C, T, V))
y = z + y if y is not None else z
y = self.bn(y)
y += self.down(x)
return self.relu(y), torch.stack(graph_list, 1)
class TCN_GCN_unit(nn.Module):
def __init__(self, in_channels, out_channels, A, stride=1, residual=True):
super(TCN_GCN_unit, self).__init__()
self.gcn1 = unit_gcn(in_channels, out_channels, A)
self.tcn1 = unit_tcn(out_channels, out_channels, stride=stride)
self.relu = nn.ReLU()
if not residual:
self.residual = lambda x: 0
elif (in_channels == out_channels) and (stride == 1):
self.residual = lambda x: x
else:
self.residual = unit_tcn(in_channels, out_channels, kernel_size=1, stride=stride)
def forward(self, x):
y, graph = self.gcn1(x)
x = self.tcn1(y) + self.residual(x)
return self.relu(x), graph
class Model(nn.Module):
def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3, drop_out=0, adaptive=True):
super(Model, self).__init__()
if graph is None:
raise ValueError()
else:
Graph = import_class(graph)
self.graph = Graph(**graph_args)
A = self.graph.A
self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point)
self.l1 = TCN_GCN_unit(3, 64, A, residual=False)
self.l2 = TCN_GCN_unit(64, 64, A)
self.l3 = TCN_GCN_unit(64, 64, A)
self.l4 = TCN_GCN_unit(64, 64, A)
self.l5 = TCN_GCN_unit(64, 128, A, stride=2)
self.l6 = TCN_GCN_unit(128, 128, A)
self.l7 = TCN_GCN_unit(128, 128, A)
self.l8 = TCN_GCN_unit(128, 256, A, stride=2)
self.l9 = TCN_GCN_unit(256, 256, A)
self.l10 = TCN_GCN_unit(256, 256, A)
self.fc = nn.Linear(256, num_class)
nn.init.normal_(self.fc.weight, 0, math.sqrt(2. / num_class))
bn_init(self.data_bn, 1)
def forward(self, x):
N, C, T, V, M = x.size()
x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T)
x = self.data_bn(x)
x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V)
x, _ = self.l1(x)
x, _ = self.l2(x)
x, _ = self.l3(x)
x, _ = self.l4(x)
x, _ = self.l5(x)
x, _ = self.l6(x)
x, _ = self.l7(x)
x, _ = self.l8(x)
x, _ = self.l9(x)
x, graph = self.l10(x)
# N*M,C,T,V
c_new = x.size(1)
x = x.view(N, M, c_new, -1)
x = x.mean(3).mean(1)
graph = graph.view(N, M, -1, V, V).mean(1).view(N, -1)
return self.fc(x), graph
| 6,153
| 32.086022
| 138
|
py
|
SkeletonGCL
|
SkeletonGCL-main/model/loss.py
|
from importlib_metadata import requires
import torch
import torch.nn as nn
from torch import einsum, positive
import math
import random
class InfoNCEGraph(nn.Module):
def __init__(self, in_channels=128, out_channels=256, mem_size=512, positive_num=128, negative_num=512, T=0.8, class_num=60, label_all=[]):
super(InfoNCEGraph, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.mem_size = mem_size
self.positive_num = positive_num
self.negative_num = negative_num
self.T = T
self.trans = nn.Linear(in_channels, out_channels)
self.Bank = nn.Parameter(
torch.zeros((mem_size, out_channels)), requires_grad=False
)
self.label_all = torch.from_numpy(label_all)
nn.init.normal_(self.trans.weight, 0, math.sqrt(2. / class_num))
nn.init.zeros_(self.trans.bias)
self.bank_flag = nn.Parameter(
torch.zeros(len(self.label_all)), requires_grad=False
)
self.cross_entropy = nn.CrossEntropyLoss()
def forward(self, f, label, input_index):
# f: n c label: n
n, _ = f.size()
f = self.trans(f)
f_norm = f.norm(dim=-1, p=2, keepdim=True)
f_normed = f / f_norm
self.Bank[input_index] = f_normed.detach()
self.bank_flag[input_index] = 1
all_pairs = einsum('n c, m c -> n m', f_normed, self.Bank)
bank_label = self.label_all.to(label.device) # mem_size
positive_mask = (label.view(n, 1) == bank_label.view(1, -1)).view(n, self.mem_size) # n mem_size
negative_mask = (1-positive_mask.float())
positive_mask = positive_mask * self.bank_flag
negative_mask = negative_mask * self.bank_flag
combined_pairs_list = []
for i in range(n):
if (positive_mask[i].sum(dim=-1) < self.positive_num) or (negative_mask[i].sum(dim=-1) < self.negative_num):
continue
positive_pairs = torch.masked_select(all_pairs[i], mask=positive_mask[i].bool()).view(-1)
positive_pairs_hard = positive_pairs.sort(dim=-1, descending=False)[0][:self.positive_num].view(1, self.positive_num, 1)
negative_pairs = torch.masked_select(all_pairs[i], mask=negative_mask[i].bool()).view(-1)
negative_pairs_hard = negative_pairs.sort(dim=-1, descending=True)[0][:self.negative_num].view(1, 1, self.negative_num)\
.expand(-1, self.positive_num, -1)
idx = random.sample(list(range(len(negative_pairs))), k=self.negative_num)
negative_pairs_random = negative_pairs[idx].view(1, 1, self.negative_num).expand(-1, self.positive_num, -1)
combined_pairs_hard2hard = torch.cat([positive_pairs_hard, negative_pairs_hard], -1).view(self.positive_num, -1)
combined_pairs_hard2random = torch.cat([positive_pairs_hard, negative_pairs_random], -1).view(self.positive_num, -1)
combined_pairs = torch.cat([combined_pairs_hard2hard, combined_pairs_hard2random], 0)
combined_pairs_list.append((combined_pairs))
if len(combined_pairs_list) == 0:
return torch.zeros(1, device=f.device)
combined_pairs = torch.cat(combined_pairs_list, 0)
combined_label = torch.zeros(combined_pairs.size(0), device=f.device).long()
loss = self.cross_entropy(combined_pairs/self.T, combined_label)
return loss
| 3,455
| 45.702703
| 143
|
py
|
SkeletonGCL
|
SkeletonGCL-main/model/__init__.py
| 0
| 0
| 0
|
py
|
|
SkeletonGCL
|
SkeletonGCL-main/model/baseline.py
|
import math
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
def import_class(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def conv_branch_init(conv, branches):
weight = conv.weight
n = weight.size(0)
k1 = weight.size(1)
k2 = weight.size(2)
nn.init.normal_(weight, 0, math.sqrt(2. / (n * k1 * k2 * branches)))
if conv.bias is not None:
nn.init.constant_(conv.bias, 0)
def conv_init(conv):
if conv.weight is not None:
nn.init.kaiming_normal_(conv.weight, mode='fan_out')
if conv.bias is not None:
nn.init.constant_(conv.bias, 0)
def bn_init(bn, scale):
nn.init.constant_(bn.weight, scale)
nn.init.constant_(bn.bias, 0)
class unit_tcn(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=5, stride=1):
super(unit_tcn, self).__init__()
pad = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0),
stride=(stride, 1))
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
conv_init(self.conv)
bn_init(self.bn, 1)
def forward(self, x):
x = self.bn(self.conv(x))
return x
class unit_gcn(nn.Module):
def __init__(self, in_channels, out_channels, A, adaptive=True):
super(unit_gcn, self).__init__()
self.out_c = out_channels
self.in_c = in_channels
self.num_subset = A.shape[0]
self.adaptive = adaptive
if adaptive:
self.PA = nn.Parameter(torch.from_numpy(A.astype(np.float32)), requires_grad=True)
else:
self.A = Variable(torch.from_numpy(A.astype(np.float32)), requires_grad=False)
self.conv_d = nn.ModuleList()
for i in range(self.num_subset):
self.conv_d.append(nn.Conv2d(in_channels, out_channels, 1))
if in_channels != out_channels:
self.down = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1),
nn.BatchNorm2d(out_channels)
)
else:
self.down = lambda x: x
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
conv_init(m)
elif isinstance(m, nn.BatchNorm2d):
bn_init(m, 1)
bn_init(self.bn, 1e-6)
for i in range(self.num_subset):
conv_branch_init(self.conv_d[i], self.num_subset)
def L2_norm(self, A):
# A:N,V,V
A_norm = torch.norm(A, 2, dim=1, keepdim=True) + 1e-4 # N,1,V
A = A / A_norm
return A
def forward(self, x):
N, C, T, V = x.size()
y = None
if self.adaptive:
A = self.PA
A = self.L2_norm(A)
else:
A = self.A.cuda(x.get_device())
for i in range(self.num_subset):
A1 = A[i]
A2 = x.view(N, C * T, V)
z = self.conv_d[i](torch.matmul(A2, A1).view(N, C, T, V))
y = z + y if y is not None else z
y = self.bn(y)
y += self.down(x)
y = self.relu(y)
return y
class TCN_GCN_unit(nn.Module):
def __init__(self, in_channels, out_channels, A, stride=1, residual=True, adaptive=True):
super(TCN_GCN_unit, self).__init__()
self.gcn1 = unit_gcn(in_channels, out_channels, A, adaptive=adaptive)
self.tcn1 = unit_tcn(out_channels, out_channels, stride=stride)
self.relu = nn.ReLU(inplace=True)
if not residual:
self.residual = lambda x: 0
elif (in_channels == out_channels) and (stride == 1):
self.residual = lambda x: x
else:
self.residual = unit_tcn(in_channels, out_channels, kernel_size=1, stride=stride)
def forward(self, x):
y = self.relu(self.tcn1(self.gcn1(x)) + self.residual(x))
return y
class Model(nn.Module):
def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3,
drop_out=0, adaptive=True, num_set=3):
super(Model, self).__init__()
if graph is None:
raise ValueError()
else:
Graph = import_class(graph)
self.graph = Graph(**graph_args)
A = np.stack([np.eye(num_point)] * num_set, axis=0)
self.num_class = num_class
self.num_point = num_point
self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point)
self.l1 = TCN_GCN_unit(3, 64, A, residual=False, adaptive=adaptive)
self.l2 = TCN_GCN_unit(64, 64, A, adaptive=adaptive)
self.l3 = TCN_GCN_unit(64, 64, A, adaptive=adaptive)
self.l4 = TCN_GCN_unit(64, 64, A, adaptive=adaptive)
self.l5 = TCN_GCN_unit(64, 128, A, stride=2, adaptive=adaptive)
self.l6 = TCN_GCN_unit(128, 128, A, adaptive=adaptive)
self.l7 = TCN_GCN_unit(128, 128, A, adaptive=adaptive)
self.l8 = TCN_GCN_unit(128, 256, A, stride=2, adaptive=adaptive)
self.l9 = TCN_GCN_unit(256, 256, A, adaptive=adaptive)
self.l10 = TCN_GCN_unit(256, 256, A, adaptive=adaptive)
self.fc = nn.Linear(256, num_class)
nn.init.normal_(self.fc.weight, 0, math.sqrt(2. / num_class))
bn_init(self.data_bn, 1)
if drop_out:
self.drop_out = nn.Dropout(drop_out)
else:
self.drop_out = lambda x: x
def forward(self, x):
N, C, T, V, M = x.size()
x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T)
x = self.data_bn(x)
x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V)
x = self.l1(x)
x = self.l2(x)
x = self.l3(x)
x = self.l4(x)
x = self.l5(x)
x = self.l6(x)
x = self.l7(x)
x = self.l8(x)
x = self.l9(x)
x = self.l10(x)
# N*M,C,T,V
c_new = x.size(1)
x = x.view(N, M, c_new, -1)
x = x.mean(3).mean(1)
x = self.drop_out(x)
return self.fc(x)
| 6,316
| 31.06599
| 110
|
py
|
SkeletonGCL
|
SkeletonGCL-main/model/ctrgcn.py
|
import math
import pdb
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
def import_class(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def conv_branch_init(conv, branches):
weight = conv.weight
n = weight.size(0)
k1 = weight.size(1)
k2 = weight.size(2)
nn.init.normal_(weight, 0, math.sqrt(2. / (n * k1 * k2 * branches)))
nn.init.constant_(conv.bias, 0)
def conv_init(conv):
if conv.weight is not None:
nn.init.kaiming_normal_(conv.weight, mode='fan_out')
if conv.bias is not None:
nn.init.constant_(conv.bias, 0)
def bn_init(bn, scale):
nn.init.constant_(bn.weight, scale)
nn.init.constant_(bn.bias, 0)
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
if hasattr(m, 'weight'):
nn.init.kaiming_normal_(m.weight, mode='fan_out')
if hasattr(m, 'bias') and m.bias is not None and isinstance(m.bias, torch.Tensor):
nn.init.constant_(m.bias, 0)
elif classname.find('BatchNorm') != -1:
if hasattr(m, 'weight') and m.weight is not None:
m.weight.data.normal_(1.0, 0.02)
if hasattr(m, 'bias') and m.bias is not None:
m.bias.data.fill_(0)
class TemporalConv(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=5, stride=1, dilation=1):
super(TemporalConv, self).__init__()
pad = (kernel_size + (kernel_size-1) * (dilation-1) - 1) // 2
self.conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=(kernel_size, 1),
padding=(pad, 0),
stride=(stride, 1),
dilation=(dilation, 1))
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class MultiScale_TemporalConv(nn.Module):
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
stride=1,
dilations=[1,2,3,4],
residual=True,
residual_kernel_size=1):
super().__init__()
assert out_channels % (len(dilations) + 2) == 0, '# out channels should be multiples of # branches'
# Multiple branches of temporal convolution
self.num_branches = len(dilations) + 2
branch_channels = out_channels // self.num_branches
if type(kernel_size) == list:
assert len(kernel_size) == len(dilations)
else:
kernel_size = [kernel_size]*len(dilations)
# Temporal Convolution branches
self.branches = nn.ModuleList([
nn.Sequential(
nn.Conv2d(
in_channels,
branch_channels,
kernel_size=1,
padding=0),
nn.BatchNorm2d(branch_channels),
nn.ReLU(inplace=True),
TemporalConv(
branch_channels,
branch_channels,
kernel_size=ks,
stride=stride,
dilation=dilation),
)
for ks, dilation in zip(kernel_size, dilations)
])
# Additional Max & 1x1 branch
self.branches.append(nn.Sequential(
nn.Conv2d(in_channels, branch_channels, kernel_size=1, padding=0),
nn.BatchNorm2d(branch_channels),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=(3,1), stride=(stride,1), padding=(1,0)),
nn.BatchNorm2d(branch_channels) # 为什么还要加bn
))
self.branches.append(nn.Sequential(
nn.Conv2d(in_channels, branch_channels, kernel_size=1, padding=0, stride=(stride,1)),
nn.BatchNorm2d(branch_channels)
))
# Residual connection
if not residual:
self.residual = lambda x: 0
elif (in_channels == out_channels) and (stride == 1):
self.residual = lambda x: x
else:
self.residual = TemporalConv(in_channels, out_channels, kernel_size=residual_kernel_size, stride=stride)
# initialize
self.apply(weights_init)
def forward(self, x):
# Input dim: (N,C,T,V)
res = self.residual(x)
branch_outs = []
for tempconv in self.branches:
out = tempconv(x)
branch_outs.append(out)
out = torch.cat(branch_outs, dim=1)
out += res
return out
class CTRGC(nn.Module):
def __init__(self, in_channels, out_channels, rel_reduction=8, mid_reduction=1):
super(CTRGC, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
if in_channels == 3 or in_channels == 9:
self.rel_channels = 8
self.mid_channels = 16
else:
self.rel_channels = in_channels // rel_reduction
self.mid_channels = in_channels // mid_reduction
self.conv1 = nn.Conv2d(self.in_channels, self.rel_channels, kernel_size=1)
self.conv2 = nn.Conv2d(self.in_channels, self.rel_channels, kernel_size=1)
self.conv3 = nn.Conv2d(self.in_channels, self.out_channels, kernel_size=1)
self.conv4 = nn.Conv2d(self.rel_channels, self.out_channels, kernel_size=1)
self.tanh = nn.Tanh()
for m in self.modules():
if isinstance(m, nn.Conv2d):
conv_init(m)
elif isinstance(m, nn.BatchNorm2d):
bn_init(m, 1)
def forward(self, x, A=None, alpha=1):
x1, x2, x3 = self.conv1(x), self.conv2(x), self.conv3(x)
graph = self.tanh(x1.mean(-2).unsqueeze(-1) - x2.mean(-2).unsqueeze(-2))
graph = self.conv4(graph)
graph_c = graph * alpha + (A.unsqueeze(0).unsqueeze(0) if A is not None else 0) # N,C,V,V
y = torch.einsum('ncuv,nctv->nctu', graph_c, x3)
return y, graph
class unit_tcn(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=9, stride=1):
super(unit_tcn, self).__init__()
pad = int((kernel_size - 1) / 2)
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=(kernel_size, 1), padding=(pad, 0),
stride=(stride, 1))
self.bn = nn.BatchNorm2d(out_channels)
self.relu = nn.ReLU(inplace=True)
conv_init(self.conv)
bn_init(self.bn, 1)
def forward(self, x):
x = self.bn(self.conv(x))
return x
class unit_gcn(nn.Module):
def __init__(self, in_channels, out_channels, A, coff_embedding=4, adaptive=True, residual=True):
super(unit_gcn, self).__init__()
inter_channels = out_channels // coff_embedding
self.inter_c = inter_channels
self.out_c = out_channels
self.in_c = in_channels
self.adaptive = adaptive
self.num_subset = A.shape[0]
self.convs = nn.ModuleList()
for i in range(self.num_subset):
self.convs.append(CTRGC(in_channels, out_channels))
if residual:
if in_channels != out_channels:
self.down = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 1),
nn.BatchNorm2d(out_channels)
)
else:
self.down = lambda x: x
else:
self.down = lambda x: 0
if self.adaptive:
self.PA = nn.Parameter(torch.from_numpy(A.astype(np.float32)))
else:
self.A = Variable(torch.from_numpy(A.astype(np.float32)), requires_grad=False)
self.alpha = nn.Parameter(torch.zeros(1))
self.bn = nn.BatchNorm2d(out_channels)
self.soft = nn.Softmax(-2)
self.relu = nn.ReLU(inplace=True)
for m in self.modules():
if isinstance(m, nn.Conv2d):
conv_init(m)
elif isinstance(m, nn.BatchNorm2d):
bn_init(m, 1)
bn_init(self.bn, 1e-6)
def forward(self, x):
y = None
graph_list = []
if self.adaptive:
A = self.PA
else:
A = self.A.cuda(x.get_device())
for i in range(self.num_subset):
z, graph = self.convs[i](x, A[i], self.alpha)
graph_list.append(graph)
y = z + y if y is not None else z
y = self.bn(y)
y += self.down(x)
y = self.relu(y)
return y, torch.stack(graph_list, 1)
class TCN_GCN_unit(nn.Module):
def __init__(self, in_channels, out_channels, A, stride=1, residual=True, adaptive=True, kernel_size=5, dilations=[1,2]):
super(TCN_GCN_unit, self).__init__()
self.gcn1 = unit_gcn(in_channels, out_channels, A, adaptive=adaptive)
# self.tcn1 = TemporalConv(out_channels, out_channels, stride=stride)
self.tcn1 = MultiScale_TemporalConv(out_channels, out_channels, kernel_size=kernel_size, stride=stride, dilations=dilations,
residual=False)
self.relu = nn.ReLU(inplace=True)
if not residual:
self.residual = lambda x: 0
elif (in_channels == out_channels) and (stride == 1):
self.residual = lambda x: x
else:
self.residual = unit_tcn(in_channels, out_channels, kernel_size=1, stride=stride)
def forward(self, x):
z, graph = self.gcn1(x)
y = self.relu(self.tcn1(z) + self.residual(x))
return y, graph
class Model(nn.Module):
def __init__(self, num_class=60, num_point=25, num_person=2, graph=None, graph_args=dict(), in_channels=3,
drop_out=0, adaptive=True):
super(Model, self).__init__()
if graph is None:
raise ValueError()
else:
Graph = import_class(graph)
self.graph = Graph(**graph_args)
A = self.graph.A # 3,25,25
self.num_class = num_class
self.num_point = num_point
self.data_bn = nn.BatchNorm1d(num_person * in_channels * num_point)
base_channel = 64
self.l1 = TCN_GCN_unit(in_channels, base_channel, A, residual=False, adaptive=adaptive)
self.l2 = TCN_GCN_unit(base_channel, base_channel, A, adaptive=adaptive)
self.l3 = TCN_GCN_unit(base_channel, base_channel, A, adaptive=adaptive)
self.l4 = TCN_GCN_unit(base_channel, base_channel, A, adaptive=adaptive)
self.l5 = TCN_GCN_unit(base_channel, base_channel*2, A, stride=2, adaptive=adaptive)
self.l6 = TCN_GCN_unit(base_channel*2, base_channel*2, A, adaptive=adaptive)
self.l7 = TCN_GCN_unit(base_channel*2, base_channel*2, A, adaptive=adaptive)
self.l8 = TCN_GCN_unit(base_channel*2, base_channel*4, A, stride=2, adaptive=adaptive)
self.l9 = TCN_GCN_unit(base_channel*4, base_channel*4, A, adaptive=adaptive)
self.l10 = TCN_GCN_unit(base_channel*4, base_channel*4, A, adaptive=adaptive)
self.fc = nn.Linear(base_channel*4, num_class)
nn.init.normal_(self.fc.weight, 0, math.sqrt(2. / num_class))
bn_init(self.data_bn, 1)
if drop_out:
self.drop_out = nn.Dropout(drop_out)
else:
self.drop_out = lambda x: x
def partDivison(self, graph):
_, k, u, v = graph.size() # n k u v
head = [2, 3]
left_arm = [4, 5, 6, 7, 21, 22]
right_arm = [8, 9, 10, 11, 23, 24]
torso = [0, 1, 20]
left_leg = [12, 13, 14, 15]
right_leg = [16, 17, 18, 19]
graph_list = []
part_list = [head, torso, right_arm, left_arm, right_leg, left_leg]
for part in part_list:
part_grah = graph[:,:,part,:].mean(dim=2, keepdim=True)
graph_list.append(part_grah)
graph = torch.cat(graph_list, 2)
graph_list = []
for part in part_list:
part_grah = graph[:,:,:,part].mean(dim=-1, keepdim=True)
graph_list.append(part_grah)
return torch.cat(graph_list, -1)
def forward(self, x):
if len(x.shape) == 3:
N, T, VC = x.shape
x = x.view(N, T, self.num_point, -1).permute(0, 3, 1, 2).contiguous().unsqueeze(-1)
N, C, T, V, M = x.size()
x = x.permute(0, 4, 3, 1, 2).contiguous().view(N, M * V * C, T)
x = self.data_bn(x)
x = x.view(N, M, V, C, T).permute(0, 1, 3, 4, 2).contiguous().view(N * M, C, T, V)
x, _ = self.l1(x)
x, _ = self.l2(x)
x, _ = self.l3(x)
x, _ = self.l4(x)
x, _ = self.l5(x)
x, _ = self.l6(x)
x, _ = self.l7(x)
x, _ = self.l8(x)
x, _ = self.l9(x)
x, graph = self.l10(x)
# N*M,C,T,V
c_new = x.size(1)
x = x.view(N, M, c_new, -1)
x = x.mean(3).mean(1)
x = self.drop_out(x)
graph2 = graph.view(N, M, -1, c_new, V, V)
# graph4 = torch.einsum('n m k c u v, n m k c v l -> n m k c u l', graph2, graph2)
graph2 = graph2.view(N, M, -1, c_new, V, V).mean(1).mean(2).view(N, -1)
# graph4 = graph4.view(N, M, -1, c_new, V, V).mean(1).mean(2).view(N, -1)
# graph = torch.cat([graph2, graph4], -1)
return self.fc(x), graph2
| 13,345
| 35.664835
| 132
|
py
|
SkeletonGCL
|
SkeletonGCL-main/feeders/feeder_ucla.py
|
import numpy as np
import pickle
import json
import random
import math
from torch.utils.data import Dataset
class Feeder(Dataset):
def __init__(self, data_path, label_path, repeat=1, random_choose=False, random_shift=False, random_move=False,
window_size=-1, normalization=False, debug=False, use_mmap=True):
if 'val' in label_path:
self.train_val = 'val'
self.data_dict = [{"file_name": "a05_s04_e02_v03", "length": 21, "label": 5}, {"file_name": "a12_s09_e04_v03", "length": 26, "label": 10}, {"file_name": "a03_s03_e04_v03", "length": 35, "label": 3}, {"file_name": "a08_s02_e01_v03", "length": 101, "label": 7}, {"file_name": "a03_s05_e03_v03", "length": 26, "label": 3}, {"file_name": "a12_s10_e01_v03", "length": 21, "label": 10}, {"file_name": "a01_s07_e03_v03", "length": 31, "label": 1}, {"file_name": "a03_s08_e02_v03", "length": 21, "label": 3}, {"file_name": "a11_s10_e03_v03", "length": 51, "label": 9}, {"file_name": "a11_s03_e00_v03", "length": 46, "label": 9}, {"file_name": "a03_s02_e00_v03", "length": 32, "label": 3}, {"file_name": "a11_s01_e04_v03", "length": 16, "label": 9}, {"file_name": "a09_s08_e04_v03", "length": 63, "label": 8}, {"file_name": "a09_s06_e01_v03", "length": 41, "label": 8}, {"file_name": "a09_s07_e01_v03", "length": 51, "label": 8}, {"file_name": "a02_s08_e01_v03", "length": 21, "label": 2}, {"file_name": "a01_s04_e01_v03", "length": 23, "label": 1}, {"file_name": "a02_s02_e02_v03", "length": 31, "label": 2}, {"file_name": "a02_s07_e05_v03", "length": 31, "label": 2}, {"file_name": "a06_s02_e00_v03", "length": 16, "label": 6}, {"file_name": "a03_s02_e02_v03", "length": 22, "label": 3}, {"file_name": "a11_s09_e04_v03", "length": 22, "label": 9}, {"file_name": "a09_s03_e04_v03", "length": 61, "label": 8}, {"file_name": "a04_s01_e02_v03", "length": 23, "label": 4}, {"file_name": "a12_s01_e01_v03", "length": 17, "label": 10}, {"file_name": "a02_s07_e03_v03", "length": 9, "label": 2}, {"file_name": "a05_s08_e04_v03", "length": 19, "label": 5}, {"file_name": "a02_s07_e02_v03", "length": 31, "label": 2}, {"file_name": "a04_s07_e02_v03", "length": 16, "label": 4}, {"file_name": "a01_s08_e03_v03", "length": 27, "label": 1}, {"file_name": "a08_s03_e01_v03", "length": 68, "label": 7}, {"file_name": "a04_s08_e03_v03", "length": 21, "label": 4}, {"file_name": "a03_s10_e00_v03", "length": 17, "label": 3}, {"file_name": "a04_s03_e03_v03", "length": 21, "label": 4}, {"file_name": "a06_s06_e02_v03", "length": 21, "label": 6}, {"file_name": "a09_s03_e00_v03", "length": 81, "label": 8}, {"file_name": "a09_s03_e03_v03", "length": 46, "label": 8}, {"file_name": "a04_s02_e02_v03", "length": 21, "label": 4}, {"file_name": "a08_s01_e02_v03", "length": 78, "label": 7}, {"file_name": "a04_s04_e00_v03", "length": 11, "label": 4}, {"file_name": "a03_s02_e03_v03", "length": 39, "label": 3}, {"file_name": "a05_s04_e00_v03", "length": 21, "label": 5}, {"file_name": "a05_s07_e03_v03", "length": 36, "label": 5}, {"file_name": "a06_s10_e00_v03", "length": 31, "label": 6}, {"file_name": "a11_s07_e00_v03", "length": 31, "label": 9}, {"file_name": "a03_s01_e01_v03", "length": 24, "label": 3}, {"file_name": "a04_s06_e01_v03", "length": 16, "label": 4}, {"file_name": "a08_s02_e04_v03", "length": 96, "label": 7}, {"file_name": "a09_s08_e03_v03", "length": 46, "label": 8}, {"file_name": "a05_s07_e00_v03", "length": 36, "label": 5}, {"file_name": "a05_s02_e02_v03", "length": 21, "label": 5}, {"file_name": "a04_s06_e04_v03", "length": 21, "label": 4}, {"file_name": "a05_s09_e03_v03", "length": 21, "label": 5}, {"file_name": "a03_s06_e02_v03", "length": 15, "label": 3}, {"file_name": "a01_s01_e00_v03", "length": 27, "label": 1}, {"file_name": "a06_s06_e03_v03", "length": 11, "label": 6}, {"file_name": "a06_s10_e02_v03", "length": 25, "label": 6}, {"file_name": "a02_s07_e04_v03", "length": 36, "label": 2}, {"file_name": "a09_s06_e00_v03", "length": 80, "label": 8}, {"file_name": "a04_s07_e04_v03", "length": 16, "label": 4}, {"file_name": "a05_s02_e01_v03", "length": 19, "label": 5}, {"file_name": "a01_s06_e04_v03", "length": 17, "label": 1}, {"file_name": "a04_s08_e01_v03", "length": 17, "label": 4}, {"file_name": "a01_s09_e00_v03", "length": 31, "label": 1}, {"file_name": "a08_s03_e03_v03", "length": 67, "label": 7}, {"file_name": "a12_s03_e00_v03", "length": 21, "label": 10}, {"file_name": "a11_s02_e03_v03", "length": 29, "label": 9}, {"file_name": "a12_s07_e02_v03", "length": 13, "label": 10}, {"file_name": "a05_s06_e01_v03", "length": 16, "label": 5}, {"file_name": "a06_s02_e04_v03", "length": 16, "label": 6}, {"file_name": "a06_s04_e00_v03", "length": 16, "label": 6}, {"file_name": "a05_s09_e01_v03", "length": 26, "label": 5}, {"file_name": "a11_s10_e04_v03", "length": 24, "label": 9}, {"file_name": "a03_s01_e00_v03", "length": 33, "label": 3}, {"file_name": "a11_s02_e01_v03", "length": 14, "label": 9}, {"file_name": "a04_s02_e00_v03", "length": 31, "label": 4}, {"file_name": "a11_s01_e01_v03", "length": 14, "label": 9}, {"file_name": "a02_s06_e03_v03", "length": 21, "label": 2}, {"file_name": "a12_s10_e03_v03", "length": 16, "label": 10}, {"file_name": "a01_s06_e00_v03", "length": 21, "label": 1}, {"file_name": "a05_s07_e01_v03", "length": 41, "label": 5}, {"file_name": "a01_s09_e01_v03", "length": 26, "label": 1}, {"file_name": "a02_s06_e00_v03", "length": 18, "label": 2}, {"file_name": "a11_s09_e00_v03", "length": 26, "label": 9}, {"file_name": "a03_s03_e01_v03", "length": 47, "label": 3}, {"file_name": "a03_s08_e00_v03", "length": 22, "label": 3}, {"file_name": "a06_s04_e01_v03", "length": 21, "label": 6}, {"file_name": "a02_s05_e01_v03", "length": 34, "label": 2}, {"file_name": "a03_s04_e04_v03", "length": 29, "label": 3}, {"file_name": "a01_s09_e02_v03", "length": 26, "label": 1}, {"file_name": "a08_s03_e04_v03", "length": 46, "label": 7}, {"file_name": "a01_s10_e00_v03", "length": 6, "label": 1}, {"file_name": "a01_s02_e02_v03", "length": 26, "label": 1}, {"file_name": "a09_s03_e01_v03", "length": 36, "label": 8}, {"file_name": "a05_s06_e00_v03", "length": 26, "label": 5}, {"file_name": "a05_s01_e02_v03", "length": 22, "label": 5}, {"file_name": "a02_s02_e04_v03", "length": 28, "label": 2}, {"file_name": "a06_s07_e03_v03", "length": 26, "label": 6}, {"file_name": "a04_s02_e04_v03", "length": 16, "label": 4}, {"file_name": "a02_s07_e01_v03", "length": 31, "label": 2}, {"file_name": "a03_s07_e03_v03", "length": 11, "label": 3}, {"file_name": "a12_s08_e01_v03", "length": 16, "label": 10}, {"file_name": "a05_s01_e03_v03", "length": 19, "label": 5}, {"file_name": "a02_s09_e02_v03", "length": 43, "label": 2}, {"file_name": "a05_s08_e03_v03", "length": 26, "label": 5}, {"file_name": "a04_s06_e00_v03", "length": 16, "label": 4}, {"file_name": "a09_s01_e02_v03", "length": 41, "label": 8}, {"file_name": "a12_s09_e00_v03", "length": 24, "label": 10}, {"file_name": "a04_s09_e02_v03", "length": 26, "label": 4}, {"file_name": "a03_s03_e03_v03", "length": 43, "label": 3}, {"file_name": "a08_s07_e03_v03", "length": 63, "label": 7}, {"file_name": "a08_s09_e02_v03", "length": 134, "label": 7}, {"file_name": "a08_s09_e00_v03", "length": 91, "label": 7}, {"file_name": "a06_s06_e04_v03", "length": 11, "label": 6}, {"file_name": "a01_s07_e04_v03", "length": 26, "label": 1}, {"file_name": "a05_s04_e01_v03", "length": 24, "label": 5}, {"file_name": "a04_s07_e00_v03", "length": 21, "label": 4}, {"file_name": "a05_s08_e01_v03", "length": 21, "label": 5}, {"file_name": "a11_s06_e03_v03", "length": 16, "label": 9}, {"file_name": "a01_s04_e03_v03", "length": 21, "label": 1}, {"file_name": "a11_s06_e04_v03", "length": 12, "label": 9}, {"file_name": "a12_s07_e03_v03", "length": 21, "label": 10}, {"file_name": "a06_s07_e05_v03", "length": 21, "label": 6}, {"file_name": "a01_s02_e04_v03", "length": 23, "label": 1}, {"file_name": "a03_s01_e03_v03", "length": 36, "label": 3}, {"file_name": "a12_s02_e02_v03", "length": 21, "label": 10}, {"file_name": "a03_s06_e01_v03", "length": 17, "label": 3}, {"file_name": "a05_s02_e03_v03", "length": 21, "label": 5}, {"file_name": "a03_s02_e04_v03", "length": 23, "label": 3}, {"file_name": "a08_s02_e03_v03", "length": 103, "label": 7}, {"file_name": "a08_s03_e02_v03", "length": 66, "label": 7}, {"file_name": "a09_s01_e01_v03", "length": 40, "label": 8}, {"file_name": "a02_s01_e01_v03", "length": 30, "label": 2}, {"file_name": "a08_s06_e00_v03", "length": 96, "label": 7}, {"file_name": "a12_s08_e02_v03", "length": 16, "label": 10}, {"file_name": "a02_s08_e00_v03", "length": 26, "label": 2}, {"file_name": "a01_s08_e02_v03", "length": 36, "label": 1}, {"file_name": "a09_s04_e01_v03", "length": 36, "label": 8}, {"file_name": "a04_s01_e04_v03", "length": 16, "label": 4}, {"file_name": "a08_s10_e03_v03", "length": 68, "label": 7}, {"file_name": "a02_s05_e00_v03", "length": 28, "label": 2}, {"file_name": "a06_s04_e03_v03", "length": 16, "label": 6}, {"file_name": "a06_s09_e03_v03", "length": 21, "label": 6}, {"file_name": "a05_s03_e02_v03", "length": 21, "label": 5}, {"file_name": "a06_s03_e04_v03", "length": 16, "label": 6}, {"file_name": "a06_s01_e03_v03", "length": 21, "label": 6}, {"file_name": "a11_s03_e01_v03", "length": 21, "label": 9}, {"file_name": "a09_s02_e01_v03", "length": 31, "label": 8}, {"file_name": "a02_s02_e00_v03", "length": 42, "label": 2}, {"file_name": "a01_s01_e03_v03", "length": 25, "label": 1}, {"file_name": "a08_s06_e02_v03", "length": 93, "label": 7}, {"file_name": "a12_s01_e03_v03", "length": 18, "label": 10}, {"file_name": "a09_s09_e01_v03", "length": 56, "label": 8}, {"file_name": "a04_s10_e03_v03", "length": 16, "label": 4}, {"file_name": "a06_s09_e04_v03", "length": 16, "label": 6}, {"file_name": "a02_s04_e01_v03", "length": 31, "label": 2}, {"file_name": "a12_s10_e04_v03", "length": 21, "label": 10}, {"file_name": "a06_s03_e01_v03", "length": 26, "label": 6}, {"file_name": "a02_s03_e04_v03", "length": 62, "label": 2}, {"file_name": "a11_s09_e02_v03", "length": 26, "label": 9}, {"file_name": "a08_s08_e02_v03", "length": 51, "label": 7}, {"file_name": "a03_s02_e01_v03", "length": 36, "label": 3}, {"file_name": "a12_s02_e00_v03", "length": 19, "label": 10}, {"file_name": "a12_s08_e03_v03", "length": 14, "label": 10}, {"file_name": "a02_s09_e03_v03", "length": 31, "label": 2}, {"file_name": "a09_s02_e02_v03", "length": 33, "label": 8}, {"file_name": "a05_s09_e04_v03", "length": 21, "label": 5}, {"file_name": "a01_s04_e00_v03", "length": 21, "label": 1}, {"file_name": "a08_s04_e03_v03", "length": 68, "label": 7}, {"file_name": "a12_s09_e03_v03", "length": 17, "label": 10}, {"file_name": "a02_s04_e03_v03", "length": 31, "label": 2}, {"file_name": "a04_s03_e04_v03", "length": 21, "label": 4}, {"file_name": "a12_s06_e01_v03", "length": 11, "label": 10}, {"file_name": "a11_s04_e03_v03", "length": 36, "label": 9}, {"file_name": "a05_s03_e00_v03", "length": 20, "label": 5}, {"file_name": "a12_s07_e00_v03", "length": 11, "label": 10}, {"file_name": "a06_s03_e02_v03", "length": 21, "label": 6}, {"file_name": "a03_s03_e05_v03", "length": 33, "label": 3}, {"file_name": "a11_s08_e01_v03", "length": 26, "label": 9}, {"file_name": "a06_s10_e01_v03", "length": 21, "label": 6}, {"file_name": "a04_s03_e02_v03", "length": 11, "label": 4}, {"file_name": "a02_s03_e03_v03", "length": 56, "label": 2}, {"file_name": "a09_s10_e04_v03", "length": 51, "label": 8}, {"file_name": "a04_s08_e04_v03", "length": 21, "label": 4}, {"file_name": "a11_s08_e00_v03", "length": 35, "label": 9}, {"file_name": "a02_s01_e00_v03", "length": 39, "label": 2}, {"file_name": "a04_s02_e03_v03", "length": 19, "label": 4}, {"file_name": "a04_s02_e01_v03", "length": 36, "label": 4}, {"file_name": "a06_s08_e00_v03", "length": 21, "label": 6}, {"file_name": "a08_s08_e01_v03", "length": 52, "label": 7}, {"file_name": "a02_s03_e01_v03", "length": 45, "label": 2}, {"file_name": "a11_s02_e02_v03", "length": 29, "label": 9}, {"file_name": "a09_s07_e02_v03", "length": 38, "label": 8}, {"file_name": "a02_s05_e03_v03", "length": 21, "label": 2}, {"file_name": "a01_s07_e02_v03", "length": 31, "label": 1}, {"file_name": "a03_s05_e00_v03", "length": 20, "label": 3}, {"file_name": "a09_s03_e02_v03", "length": 38, "label": 8}, {"file_name": "a01_s03_e07_v03", "length": 28, "label": 1}, {"file_name": "a09_s04_e04_v03", "length": 56, "label": 8}, {"file_name": "a11_s10_e00_v03", "length": 16, "label": 9}, {"file_name": "a04_s04_e01_v03", "length": 13, "label": 4}, {"file_name": "a02_s08_e02_v03", "length": 21, "label": 2}, {"file_name": "a04_s01_e07_v03", "length": 16, "label": 4}, {"file_name": "a11_s06_e00_v03", "length": 26, "label": 9}, {"file_name": "a05_s02_e00_v03", "length": 27, "label": 5}, {"file_name": "a02_s02_e03_v03", "length": 29, "label": 2}, {"file_name": "a05_s06_e02_v03", "length": 16, "label": 5}, {"file_name": "a08_s01_e03_v03", "length": 76, "label": 7}, {"file_name": "a08_s09_e01_v03", "length": 91, "label": 7}, {"file_name": "a02_s08_e04_v03", "length": 36, "label": 2}, {"file_name": "a01_s02_e03_v03", "length": 29, "label": 1}, {"file_name": "a11_s08_e05_v03", "length": 28, "label": 9}, {"file_name": "a03_s09_e02_v03", "length": 26, "label": 3}, {"file_name": "a04_s08_e00_v03", "length": 17, "label": 4}, {"file_name": "a12_s03_e04_v03", "length": 16, "label": 10}, {"file_name": "a08_s04_e01_v03", "length": 56, "label": 7}, {"file_name": "a12_s04_e03_v03", "length": 11, "label": 10}, {"file_name": "a04_s09_e03_v03", "length": 31, "label": 4}, {"file_name": "a05_s06_e03_v03", "length": 26, "label": 5}, {"file_name": "a09_s06_e02_v03", "length": 56, "label": 8}, {"file_name": "a06_s08_e05_v03", "length": 21, "label": 6}, {"file_name": "a12_s02_e03_v03", "length": 21, "label": 10}, {"file_name": "a11_s03_e03_v03", "length": 36, "label": 9}, {"file_name": "a11_s07_e04_v03", "length": 23, "label": 9}, {"file_name": "a04_s01_e00_v03", "length": 31, "label": 4}, {"file_name": "a03_s08_e03_v03", "length": 14, "label": 3}, {"file_name": "a04_s10_e00_v03", "length": 12, "label": 4}, {"file_name": "a08_s03_e00_v03", "length": 86, "label": 7}, {"file_name": "a02_s08_e03_v03", "length": 21, "label": 2}, {"file_name": "a01_s09_e03_v03", "length": 26, "label": 1}, {"file_name": "a01_s01_e04_v03", "length": 28, "label": 1}, {"file_name": "a01_s07_e00_v03", "length": 28, "label": 1}, {"file_name": "a02_s03_e00_v03", "length": 46, "label": 2}, {"file_name": "a01_s02_e00_v03", "length": 21, "label": 1}, {"file_name": "a03_s09_e04_v03", "length": 21, "label": 3}, {"file_name": "a01_s06_e02_v03", "length": 26, "label": 1}, {"file_name": "a03_s07_e02_v03", "length": 17, "label": 3}, {"file_name": "a03_s05_e04_v03", "length": 39, "label": 3}, {"file_name": "a08_s07_e01_v03", "length": 126, "label": 7}, {"file_name": "a04_s07_e03_v03", "length": 26, "label": 4}, {"file_name": "a08_s04_e04_v03", "length": 56, "label": 7}, {"file_name": "a08_s08_e00_v03", "length": 68, "label": 7}, {"file_name": "a02_s09_e00_v03", "length": 37, "label": 2}, {"file_name": "a06_s03_e00_v03", "length": 16, "label": 6}, {"file_name": "a09_s09_e04_v03", "length": 68, "label": 8}, {"file_name": "a05_s04_e04_v03", "length": 21, "label": 5}, {"file_name": "a09_s04_e03_v03", "length": 31, "label": 8}, {"file_name": "a01_s09_e04_v03", "length": 28, "label": 1}, {"file_name": "a05_s10_e00_v03", "length": 33, "label": 5}, {"file_name": "a09_s08_e02_v03", "length": 49, "label": 8}, {"file_name": "a11_s07_e01_v03", "length": 20, "label": 9}, {"file_name": "a06_s01_e00_v03", "length": 21, "label": 6}, {"file_name": "a12_s08_e04_v03", "length": 14, "label": 10}, {"file_name": "a08_s09_e04_v03", "length": 75, "label": 7}, {"file_name": "a12_s10_e02_v03", "length": 21, "label": 10}, {"file_name": "a04_s01_e01_v03", "length": 33, "label": 4}, {"file_name": "a01_s08_e01_v03", "length": 21, "label": 1}, {"file_name": "a09_s07_e00_v03", "length": 41, "label": 8}, {"file_name": "a04_s09_e00_v03", "length": 21, "label": 4}, {"file_name": "a08_s02_e02_v03", "length": 111, "label": 7}, {"file_name": "a09_s09_e02_v03", "length": 81, "label": 8}, {"file_name": "a09_s02_e03_v03", "length": 31, "label": 8}, {"file_name": "a11_s09_e01_v03", "length": 16, "label": 9}, {"file_name": "a03_s10_e01_v03", "length": 11, "label": 3}, {"file_name": "a11_s03_e02_v03", "length": 21, "label": 9}, {"file_name": "a11_s08_e04_v03", "length": 19, "label": 9}, {"file_name": "a06_s08_e02_v03", "length": 11, "label": 6}, {"file_name": "a11_s04_e04_v03", "length": 21, "label": 9}, {"file_name": "a12_s01_e00_v03", "length": 18, "label": 10}, {"file_name": "a02_s06_e04_v03", "length": 21, "label": 2}, {"file_name": "a06_s07_e01_v03", "length": 16, "label": 6}, {"file_name": "a05_s10_e03_v03", "length": 26, "label": 5}, {"file_name": "a03_s06_e00_v03", "length": 23, "label": 3}, {"file_name": "a12_s02_e01_v03", "length": 21, "label": 10}, {"file_name": "a08_s10_e02_v03", "length": 76, "label": 7}, {"file_name": "a08_s02_e00_v03", "length": 86, "label": 7}, {"file_name": "a06_s10_e03_v03", "length": 21, "label": 6}, {"file_name": "a11_s04_e02_v03", "length": 21, "label": 9}, {"file_name": "a08_s09_e03_v03", "length": 121, "label": 7}, {"file_name": "a12_s06_e04_v03", "length": 16, "label": 10}, {"file_name": "a01_s07_e01_v03", "length": 26, "label": 1}, {"file_name": "a05_s02_e04_v03", "length": 26, "label": 5}, {"file_name": "a09_s08_e00_v03", "length": 52, "label": 8}, {"file_name": "a02_s04_e04_v03", "length": 33, "label": 2}, {"file_name": "a06_s07_e00_v03", "length": 8, "label": 6}, {"file_name": "a04_s09_e01_v03", "length": 34, "label": 4}, {"file_name": "a09_s01_e00_v03", "length": 41, "label": 8}, {"file_name": "a08_s10_e01_v03", "length": 111, "label": 7}, {"file_name": "a11_s10_e02_v03", "length": 61, "label": 9}, {"file_name": "a09_s10_e02_v03", "length": 49, "label": 8}, {"file_name": "a03_s07_e04_v03", "length": 11, "label": 3}, {"file_name": "a05_s08_e00_v03", "length": 26, "label": 5}, {"file_name": "a11_s09_e03_v03", "length": 15, "label": 9}, {"file_name": "a12_s04_e04_v03", "length": 14, "label": 10}, {"file_name": "a04_s01_e03_v03", "length": 16, "label": 4}, {"file_name": "a04_s10_e02_v03", "length": 16, "label": 4}, {"file_name": "a06_s10_e04_v03", "length": 16, "label": 6}, {"file_name": "a01_s08_e00_v03", "length": 21, "label": 1}, {"file_name": "a03_s10_e02_v03", "length": 28, "label": 3}, {"file_name": "a03_s07_e01_v03", "length": 11, "label": 3}, {"file_name": "a05_s04_e03_v03", "length": 21, "label": 5}, {"file_name": "a01_s01_e02_v03", "length": 25, "label": 1}, {"file_name": "a05_s10_e04_v03", "length": 19, "label": 5}, {"file_name": "a06_s08_e03_v03", "length": 21, "label": 6}, {"file_name": "a02_s04_e02_v03", "length": 33, "label": 2}, {"file_name": "a12_s01_e04_v03", "length": 15, "label": 10}, {"file_name": "a05_s07_e05_v03", "length": 18, "label": 5}, {"file_name": "a02_s01_e02_v03", "length": 28, "label": 2}, {"file_name": "a12_s10_e00_v03", "length": 21, "label": 10}, {"file_name": "a11_s02_e00_v03", "length": 31, "label": 9}, {"file_name": "a02_s09_e01_v03", "length": 40, "label": 2}, {"file_name": "a02_s04_e00_v03", "length": 46, "label": 2}, {"file_name": "a12_s01_e02_v03", "length": 14, "label": 10}, {"file_name": "a01_s03_e06_v03", "length": 31, "label": 1}, {"file_name": "a03_s01_e04_v03", "length": 36, "label": 3}, {"file_name": "a01_s03_e04_v03", "length": 34, "label": 1}, {"file_name": "a01_s06_e03_v03", "length": 21, "label": 1}, {"file_name": "a02_s06_e01_v03", "length": 16, "label": 2}, {"file_name": "a12_s07_e04_v03", "length": 21, "label": 10}, {"file_name": "a08_s10_e04_v03", "length": 86, "label": 7}, {"file_name": "a02_s03_e02_v03", "length": 58, "label": 2}, {"file_name": "a05_s06_e04_v03", "length": 18, "label": 5}, {"file_name": "a05_s10_e01_v03", "length": 26, "label": 5}, {"file_name": "a09_s10_e01_v03", "length": 55, "label": 8}, {"file_name": "a08_s08_e04_v03", "length": 61, "label": 7}, {"file_name": "a06_s01_e02_v03", "length": 21, "label": 6}, {"file_name": "a01_s01_e01_v03", "length": 21, "label": 1}, {"file_name": "a06_s08_e04_v03", "length": 17, "label": 6}, {"file_name": "a09_s06_e03_v03", "length": 56, "label": 8}, {"file_name": "a06_s09_e01_v03", "length": 21, "label": 6}, {"file_name": "a08_s06_e01_v03", "length": 134, "label": 7}, {"file_name": "a02_s01_e04_v03", "length": 38, "label": 2}, {"file_name": "a11_s01_e00_v03", "length": 14, "label": 9}, {"file_name": "a03_s03_e00_v03", "length": 41, "label": 3}, {"file_name": "a01_s04_e04_v03", "length": 21, "label": 1}, {"file_name": "a06_s01_e04_v03", "length": 16, "label": 6}, {"file_name": "a01_s10_e01_v03", "length": 24, "label": 1}, {"file_name": "a03_s09_e00_v03", "length": 26, "label": 3}, {"file_name": "a08_s10_e00_v03", "length": 71, "label": 7}, {"file_name": "a05_s10_e02_v03", "length": 34, "label": 5}, {"file_name": "a04_s10_e01_v03", "length": 16, "label": 4}, {"file_name": "a05_s03_e04_v03", "length": 14, "label": 5}, {"file_name": "a05_s07_e02_v03", "length": 26, "label": 5}, {"file_name": "a12_s02_e04_v03", "length": 16, "label": 10}, {"file_name": "a06_s02_e03_v03", "length": 17, "label": 6}, {"file_name": "a09_s01_e03_v03", "length": 41, "label": 8}, {"file_name": "a08_s04_e00_v03", "length": 49, "label": 7}, {"file_name": "a02_s10_e01_v03", "length": 32, "label": 2}, {"file_name": "a11_s04_e01_v03", "length": 21, "label": 9}, {"file_name": "a03_s05_e01_v03", "length": 39, "label": 3}, {"file_name": "a06_s07_e04_v03", "length": 21, "label": 6}, {"file_name": "a09_s09_e03_v03", "length": 56, "label": 8}, {"file_name": "a02_s06_e02_v03", "length": 21, "label": 2}, {"file_name": "a05_s01_e04_v03", "length": 21, "label": 5}, {"file_name": "a11_s03_e04_v03", "length": 26, "label": 9}, {"file_name": "a04_s08_e02_v03", "length": 21, "label": 4}, {"file_name": "a04_s09_e04_v03", "length": 21, "label": 4}, {"file_name": "a08_s07_e00_v03", "length": 51, "label": 7}, {"file_name": "a04_s01_e05_v03", "length": 16, "label": 4}, {"file_name": "a12_s07_e01_v03", "length": 16, "label": 10}, {"file_name": "a02_s01_e03_v03", "length": 40, "label": 2}, {"file_name": "a09_s04_e00_v03", "length": 35, "label": 8}, {"file_name": "a09_s01_e04_v03", "length": 37, "label": 8}, {"file_name": "a12_s08_e00_v03", "length": 16, "label": 10}, {"file_name": "a04_s06_e03_v03", "length": 16, "label": 4}, {"file_name": "a11_s06_e01_v03", "length": 21, "label": 9}, {"file_name": "a01_s10_e02_v03", "length": 26, "label": 1}, {"file_name": "a02_s10_e04_v03", "length": 29, "label": 2}, {"file_name": "a04_s07_e01_v03", "length": 21, "label": 4}, {"file_name": "a03_s04_e01_v03", "length": 39, "label": 3}, {"file_name": "a03_s01_e02_v03", "length": 31, "label": 3}, {"file_name": "a06_s09_e02_v03", "length": 26, "label": 6}, {"file_name": "a03_s07_e00_v03", "length": 21, "label": 3}, {"file_name": "a06_s04_e02_v03", "length": 21, "label": 6}, {"file_name": "a12_s04_e01_v03", "length": 16, "label": 10}, {"file_name": "a04_s06_e02_v03", "length": 21, "label": 4}, {"file_name": "a04_s04_e04_v03", "length": 21, "label": 4}, {"file_name": "a09_s04_e02_v03", "length": 37, "label": 8}, {"file_name": "a02_s02_e01_v03", "length": 26, "label": 2}, {"file_name": "a06_s09_e00_v03", "length": 21, "label": 6}, {"file_name": "a05_s09_e00_v03", "length": 28, "label": 5}, {"file_name": "a05_s03_e01_v03", "length": 17, "label": 5}, {"file_name": "a02_s05_e04_v03", "length": 29, "label": 2}, {"file_name": "a01_s06_e01_v03", "length": 21, "label": 1}, {"file_name": "a12_s04_e02_v03", "length": 13, "label": 10}, {"file_name": "a03_s05_e02_v03", "length": 36, "label": 3}, {"file_name": "a01_s03_e02_v03", "length": 37, "label": 1}, {"file_name": "a05_s08_e05_v03", "length": 21, "label": 5}, {"file_name": "a01_s03_e00_v03", "length": 29, "label": 1}, {"file_name": "a08_s06_e03_v03", "length": 120, "label": 7}, {"file_name": "a05_s09_e02_v03", "length": 26, "label": 5}, {"file_name": "a01_s02_e01_v03", "length": 27, "label": 1}, {"file_name": "a01_s03_e01_v03", "length": 33, "label": 1}, {"file_name": "a04_s03_e01_v03", "length": 16, "label": 4}, {"file_name": "a06_s06_e00_v03", "length": 21, "label": 6}, {"file_name": "a12_s06_e02_v03", "length": 18, "label": 10}, {"file_name": "a11_s03_e05_v03", "length": 26, "label": 9}, {"file_name": "a04_s10_e04_v03", "length": 16, "label": 4}, {"file_name": "a12_s03_e01_v03", "length": 11, "label": 10}, {"file_name": "a08_s04_e02_v03", "length": 67, "label": 7}, {"file_name": "a06_s04_e04_v03", "length": 13, "label": 6}, {"file_name": "a12_s06_e03_v03", "length": 17, "label": 10}, {"file_name": "a08_s01_e04_v03", "length": 71, "label": 7}, {"file_name": "a04_s03_e00_v03", "length": 14, "label": 4}, {"file_name": "a08_s01_e00_v03", "length": 51, "label": 7}, {"file_name": "a01_s03_e03_v03", "length": 41, "label": 1}, {"file_name": "a04_s01_e08_v03", "length": 16, "label": 4}, {"file_name": "a01_s04_e02_v03", "length": 26, "label": 1}, {"file_name": "a01_s10_e04_v03", "length": 26, "label": 1}, {"file_name": "a09_s02_e00_v03", "length": 41, "label": 8}, {"file_name": "a06_s07_e02_v03", "length": 16, "label": 6}, {"file_name": "a08_s07_e02_v03", "length": 46, "label": 7}, {"file_name": "a11_s10_e01_v03", "length": 36, "label": 9}, {"file_name": "a02_s07_e00_v03", "length": 31, "label": 2}, {"file_name": "a06_s08_e01_v03", "length": 16, "label": 6}, {"file_name": "a01_s10_e03_v03", "length": 31, "label": 1}, {"file_name": "a11_s02_e04_v03", "length": 35, "label": 9}, {"file_name": "a02_s09_e04_v03", "length": 1, "label": 2}, {"file_name": "a12_s03_e03_v03", "length": 21, "label": 10}, {"file_name": "a05_s01_e01_v03", "length": 21, "label": 5}, {"file_name": "a05_s08_e02_v03", "length": 16, "label": 5}, {"file_name": "a12_s09_e02_v03", "length": 23, "label": 10}, {"file_name": "a09_s08_e01_v03", "length": 48, "label": 8}, {"file_name": "a01_s08_e04_v03", "length": 23, "label": 1}, {"file_name": "a09_s09_e00_v03", "length": 56, "label": 8}, {"file_name": "a03_s10_e03_v03", "length": 13, "label": 3}, {"file_name": "a09_s02_e04_v03", "length": 36, "label": 8}, {"file_name": "a08_s01_e01_v03", "length": 61, "label": 7}, {"file_name": "a09_s10_e00_v03", "length": 54, "label": 8}, {"file_name": "a12_s09_e01_v03", "length": 18, "label": 10}, {"file_name": "a05_s01_e00_v03", "length": 20, "label": 5}, {"file_name": "a06_s02_e01_v03", "length": 16, "label": 6}, {"file_name": "a08_s08_e03_v03", "length": 62, "label": 7}, {"file_name": "a04_s04_e03_v03", "length": 21, "label": 4}, {"file_name": "a02_s10_e02_v03", "length": 31, "label": 2}, {"file_name": "a01_s03_e05_v03", "length": 31, "label": 1}, {"file_name": "a06_s03_e03_v03", "length": 19, "label": 6}, {"file_name": "a05_s07_e04_v03", "length": 21, "label": 5}, {"file_name": "a02_s10_e00_v03", "length": 38, "label": 2}, {"file_name": "a12_s04_e00_v03", "length": 16, "label": 10}, {"file_name": "a03_s04_e02_v03", "length": 27, "label": 3}, {"file_name": "a06_s02_e02_v03", "length": 21, "label": 6}, {"file_name": "a03_s04_e03_v03", "length": 31, "label": 3}, {"file_name": "a11_s08_e03_v03", "length": 12, "label": 9}, {"file_name": "a09_s07_e03_v03", "length": 44, "label": 8}, {"file_name": "a05_s03_e03_v03", "length": 14, "label": 5}, {"file_name": "a09_s10_e03_v03", "length": 54, "label": 8}, {"file_name": "a11_s06_e02_v03", "length": 18, "label": 9}, {"file_name": "a04_s04_e02_v03", "length": 11, "label": 4}, {"file_name": "a11_s08_e02_v03", "length": 21, "label": 9}, {"file_name": "a11_s07_e03_v03", "length": 21, "label": 9}, {"file_name": "a04_s01_e06_v03", "length": 19, "label": 4}, {"file_name": "a06_s01_e01_v03", "length": 21, "label": 6}, {"file_name": "a12_s06_e00_v03", "length": 11, "label": 10}, {"file_name": "a12_s03_e02_v03", "length": 18, "label": 10}, {"file_name": "a03_s04_e00_v03", "length": 26, "label": 3}, {"file_name": "a11_s01_e03_v03", "length": 18, "label": 9}, {"file_name": "a03_s08_e01_v03", "length": 21, "label": 3}, {"file_name": "a11_s04_e00_v03", "length": 31, "label": 9}, {"file_name": "a02_s05_e02_v03", "length": 26, "label": 2}, {"file_name": "a06_s06_e01_v03", "length": 19, "label": 6}, {"file_name": "a03_s03_e02_v03", "length": 32, "label": 3}, {"file_name": "a11_s07_e02_v03", "length": 16, "label": 9}, {"file_name": "a11_s01_e02_v03", "length": 15, "label": 9}]
else:
self.train_val = 'train'
self.data_dict = [{"file_name": "a05_s04_e02_v01", "length": 26, "label": 5}, {"file_name": "a01_s05_e04_v01", "length": 46, "label": 1}, {"file_name": "a03_s03_e04_v01", "length": 42, "label": 3}, {"file_name": "a08_s02_e01_v01", "length": 106, "label": 7}, {"file_name": "a03_s05_e03_v01", "length": 31, "label": 3}, {"file_name": "a06_s05_e01_v01", "length": 20, "label": 6}, {"file_name": "a12_s10_e01_v01", "length": 37, "label": 10}, {"file_name": "a01_s07_e03_v01", "length": 39, "label": 1}, {"file_name": "a03_s08_e02_v01", "length": 61, "label": 3}, {"file_name": "a11_s10_e03_v01", "length": 49, "label": 9}, {"file_name": "a11_s03_e00_v01", "length": 41, "label": 9}, {"file_name": "a03_s02_e00_v01", "length": 31, "label": 3}, {"file_name": "a11_s01_e04_v01", "length": 21, "label": 9}, {"file_name": "a04_s05_e04_v01", "length": 49, "label": 4}, {"file_name": "a09_s08_e04_v01", "length": 76, "label": 8}, {"file_name": "a09_s06_e01_v01", "length": 41, "label": 8}, {"file_name": "a09_s07_e01_v01", "length": 77, "label": 8}, {"file_name": "a02_s08_e01_v01", "length": 21, "label": 2}, {"file_name": "a01_s04_e01_v01", "length": 41, "label": 1}, {"file_name": "a02_s02_e02_v01", "length": 53, "label": 2}, {"file_name": "a02_s07_e05_v01", "length": 39, "label": 2}, {"file_name": "a06_s02_e00_v01", "length": 34, "label": 6}, {"file_name": "a03_s02_e02_v01", "length": 26, "label": 3}, {"file_name": "a09_s03_e04_v01", "length": 75, "label": 8}, {"file_name": "a04_s01_e02_v01", "length": 44, "label": 4}, {"file_name": "a12_s01_e01_v01", "length": 45, "label": 10}, {"file_name": "a02_s07_e03_v01", "length": 53, "label": 2}, {"file_name": "a05_s08_e04_v01", "length": 19, "label": 5}, {"file_name": "a02_s07_e02_v01", "length": 35, "label": 2}, {"file_name": "a04_s07_e02_v01", "length": 78, "label": 4}, {"file_name": "a01_s08_e03_v01", "length": 64, "label": 1}, {"file_name": "a08_s03_e01_v01", "length": 86, "label": 7}, {"file_name": "a04_s08_e03_v01", "length": 79, "label": 4}, {"file_name": "a03_s10_e00_v01", "length": 52, "label": 3}, {"file_name": "a04_s03_e03_v01", "length": 76, "label": 4}, {"file_name": "a11_s05_e02_v01", "length": 20, "label": 9}, {"file_name": "a06_s06_e02_v01", "length": 21, "label": 6}, {"file_name": "a01_s08_e06_v01", "length": 27, "label": 1}, {"file_name": "a03_s09_e03_v01", "length": 29, "label": 3}, {"file_name": "a09_s03_e00_v01", "length": 105, "label": 8}, {"file_name": "a09_s03_e03_v01", "length": 49, "label": 8}, {"file_name": "a04_s02_e02_v01", "length": 120, "label": 4}, {"file_name": "a08_s01_e02_v01", "length": 84, "label": 7}, {"file_name": "a04_s04_e00_v01", "length": 30, "label": 4}, {"file_name": "a03_s02_e03_v01", "length": 50, "label": 3}, {"file_name": "a05_s04_e00_v01", "length": 49, "label": 5}, {"file_name": "a05_s07_e03_v01", "length": 34, "label": 5}, {"file_name": "a02_s10_e05_v01", "length": 51, "label": 2}, {"file_name": "a06_s10_e00_v01", "length": 35, "label": 6}, {"file_name": "a11_s07_e00_v01", "length": 26, "label": 9}, {"file_name": "a03_s01_e01_v01", "length": 131, "label": 3}, {"file_name": "a04_s06_e01_v01", "length": 35, "label": 4}, {"file_name": "a08_s02_e04_v01", "length": 106, "label": 7}, {"file_name": "a09_s08_e03_v01", "length": 85, "label": 8}, {"file_name": "a05_s02_e02_v01", "length": 19, "label": 5}, {"file_name": "a04_s06_e04_v01", "length": 23, "label": 4}, {"file_name": "a05_s09_e03_v01", "length": 38, "label": 5}, {"file_name": "a03_s06_e02_v01", "length": 23, "label": 3}, {"file_name": "a01_s01_e00_v01", "length": 44, "label": 1}, {"file_name": "a06_s06_e03_v01", "length": 28, "label": 6}, {"file_name": "a06_s10_e02_v01", "length": 35, "label": 6}, {"file_name": "a02_s07_e04_v01", "length": 45, "label": 2}, {"file_name": "a09_s06_e00_v01", "length": 80, "label": 8}, {"file_name": "a04_s07_e04_v01", "length": 89, "label": 4}, {"file_name": "a04_s05_e09_v01", "length": 38, "label": 4}, {"file_name": "a05_s02_e01_v01", "length": 17, "label": 5}, {"file_name": "a01_s06_e04_v01", "length": 24, "label": 1}, {"file_name": "a04_s08_e01_v01", "length": 77, "label": 4}, {"file_name": "a01_s09_e00_v01", "length": 37, "label": 1}, {"file_name": "a08_s03_e03_v01", "length": 157, "label": 7}, {"file_name": "a12_s03_e00_v01", "length": 31, "label": 10}, {"file_name": "a11_s02_e03_v01", "length": 29, "label": 9}, {"file_name": "a12_s07_e02_v01", "length": 25, "label": 10}, {"file_name": "a11_s05_e01_v01", "length": 53, "label": 9}, {"file_name": "a05_s06_e01_v01", "length": 18, "label": 5}, {"file_name": "a03_s08_e06_v01", "length": 25, "label": 3}, {"file_name": "a06_s02_e04_v01", "length": 32, "label": 6}, {"file_name": "a06_s04_e00_v01", "length": 49, "label": 6}, {"file_name": "a05_s09_e01_v01", "length": 33, "label": 5}, {"file_name": "a11_s05_e03_v01", "length": 41, "label": 9}, {"file_name": "a11_s10_e04_v01", "length": 55, "label": 9}, {"file_name": "a03_s01_e00_v01", "length": 106, "label": 3}, {"file_name": "a03_s08_e04_v01", "length": 31, "label": 3}, {"file_name": "a11_s02_e01_v01", "length": 8, "label": 9}, {"file_name": "a04_s02_e00_v01", "length": 25, "label": 4}, {"file_name": "a11_s01_e01_v01", "length": 31, "label": 9}, {"file_name": "a02_s06_e03_v01", "length": 35, "label": 2}, {"file_name": "a12_s10_e03_v01", "length": 40, "label": 10}, {"file_name": "a01_s05_e02_v01", "length": 51, "label": 1}, {"file_name": "a01_s06_e00_v01", "length": 36, "label": 1}, {"file_name": "a05_s07_e01_v01", "length": 35, "label": 5}, {"file_name": "a01_s09_e01_v01", "length": 38, "label": 1}, {"file_name": "a02_s06_e00_v01", "length": 32, "label": 2}, {"file_name": "a11_s09_e00_v01", "length": 12, "label": 9}, {"file_name": "a03_s03_e01_v01", "length": 33, "label": 3}, {"file_name": "a03_s08_e00_v01", "length": 42, "label": 3}, {"file_name": "a06_s04_e01_v01", "length": 31, "label": 6}, {"file_name": "a02_s05_e01_v01", "length": 58, "label": 2}, {"file_name": "a03_s04_e04_v01", "length": 36, "label": 3}, {"file_name": "a01_s09_e02_v01", "length": 32, "label": 1}, {"file_name": "a08_s03_e04_v01", "length": 51, "label": 7}, {"file_name": "a01_s10_e00_v01", "length": 36, "label": 1}, {"file_name": "a01_s02_e02_v01", "length": 33, "label": 1}, {"file_name": "a09_s03_e01_v01", "length": 61, "label": 8}, {"file_name": "a05_s06_e00_v01", "length": 39, "label": 5}, {"file_name": "a05_s01_e02_v01", "length": 26, "label": 5}, {"file_name": "a03_s06_e04_v01", "length": 24, "label": 3}, {"file_name": "a02_s02_e04_v01", "length": 36, "label": 2}, {"file_name": "a06_s07_e03_v01", "length": 32, "label": 6}, {"file_name": "a04_s02_e04_v01", "length": 28, "label": 4}, {"file_name": "a04_s05_e02_v01", "length": 75, "label": 4}, {"file_name": "a02_s07_e01_v01", "length": 38, "label": 2}, {"file_name": "a03_s07_e03_v01", "length": 62, "label": 3}, {"file_name": "a12_s08_e01_v01", "length": 32, "label": 10}, {"file_name": "a05_s01_e03_v01", "length": 31, "label": 5}, {"file_name": "a02_s09_e02_v01", "length": 60, "label": 2}, {"file_name": "a05_s08_e03_v01", "length": 29, "label": 5}, {"file_name": "a04_s06_e00_v01", "length": 67, "label": 4}, {"file_name": "a09_s01_e02_v01", "length": 130, "label": 8}, {"file_name": "a04_s09_e02_v01", "length": 68, "label": 4}, {"file_name": "a03_s03_e03_v01", "length": 36, "label": 3}, {"file_name": "a08_s07_e03_v01", "length": 86, "label": 7}, {"file_name": "a08_s09_e02_v01", "length": 148, "label": 7}, {"file_name": "a08_s09_e00_v01", "length": 120, "label": 7}, {"file_name": "a06_s06_e04_v01", "length": 30, "label": 6}, {"file_name": "a01_s07_e04_v01", "length": 34, "label": 1}, {"file_name": "a04_s05_e08_v01", "length": 28, "label": 4}, {"file_name": "a08_s05_e04_v01", "length": 80, "label": 7}, {"file_name": "a05_s04_e01_v01", "length": 30, "label": 5}, {"file_name": "a04_s07_e00_v01", "length": 117, "label": 4}, {"file_name": "a05_s08_e01_v01", "length": 35, "label": 5}, {"file_name": "a11_s06_e03_v01", "length": 22, "label": 9}, {"file_name": "a01_s04_e03_v01", "length": 58, "label": 1}, {"file_name": "a12_s07_e03_v01", "length": 21, "label": 10}, {"file_name": "a01_s02_e04_v01", "length": 19, "label": 1}, {"file_name": "a04_s04_e05_v01", "length": 34, "label": 4}, {"file_name": "a03_s01_e03_v01", "length": 113, "label": 3}, {"file_name": "a12_s02_e02_v01", "length": 63, "label": 10}, {"file_name": "a05_s02_e03_v01", "length": 26, "label": 5}, {"file_name": "a03_s02_e04_v01", "length": 33, "label": 3}, {"file_name": "a08_s02_e03_v01", "length": 100, "label": 7}, {"file_name": "a08_s03_e02_v01", "length": 83, "label": 7}, {"file_name": "a09_s01_e01_v01", "length": 106, "label": 8}, {"file_name": "a02_s01_e01_v01", "length": 59, "label": 2}, {"file_name": "a08_s06_e00_v01", "length": 103, "label": 7}, {"file_name": "a04_s04_e09_v01", "length": 36, "label": 4}, {"file_name": "a12_s08_e02_v01", "length": 29, "label": 10}, {"file_name": "a02_s08_e00_v01", "length": 27, "label": 2}, {"file_name": "a01_s08_e02_v01", "length": 201, "label": 1}, {"file_name": "a09_s04_e01_v01", "length": 74, "label": 8}, {"file_name": "a04_s01_e04_v01", "length": 120, "label": 4}, {"file_name": "a04_s05_e03_v01", "length": 31, "label": 4}, {"file_name": "a08_s10_e03_v01", "length": 70, "label": 7}, {"file_name": "a02_s05_e00_v01", "length": 26, "label": 2}, {"file_name": "a06_s04_e03_v01", "length": 19, "label": 6}, {"file_name": "a06_s09_e03_v01", "length": 44, "label": 6}, {"file_name": "a05_s03_e02_v01", "length": 40, "label": 5}, {"file_name": "a06_s03_e04_v01", "length": 30, "label": 6}, {"file_name": "a06_s01_e03_v01", "length": 26, "label": 6}, {"file_name": "a11_s03_e01_v01", "length": 31, "label": 9}, {"file_name": "a09_s02_e01_v01", "length": 67, "label": 8}, {"file_name": "a02_s02_e00_v01", "length": 57, "label": 2}, {"file_name": "a01_s01_e03_v01", "length": 51, "label": 1}, {"file_name": "a08_s06_e02_v01", "length": 90, "label": 7}, {"file_name": "a12_s01_e03_v01", "length": 57, "label": 10}, {"file_name": "a06_s05_e04_v01", "length": 15, "label": 6}, {"file_name": "a09_s09_e01_v01", "length": 179, "label": 8}, {"file_name": "a04_s10_e03_v01", "length": 20, "label": 4}, {"file_name": "a06_s09_e04_v01", "length": 35, "label": 6}, {"file_name": "a02_s04_e01_v01", "length": 55, "label": 2}, {"file_name": "a12_s10_e04_v01", "length": 57, "label": 10}, {"file_name": "a04_s03_e05_v01", "length": 44, "label": 4}, {"file_name": "a06_s03_e01_v01", "length": 31, "label": 6}, {"file_name": "a02_s03_e04_v01", "length": 51, "label": 2}, {"file_name": "a11_s09_e02_v01", "length": 42, "label": 9}, {"file_name": "a08_s08_e02_v01", "length": 61, "label": 7}, {"file_name": "a03_s02_e01_v01", "length": 28, "label": 3}, {"file_name": "a12_s02_e00_v01", "length": 38, "label": 10}, {"file_name": "a12_s08_e03_v01", "length": 26, "label": 10}, {"file_name": "a02_s09_e03_v01", "length": 45, "label": 2}, {"file_name": "a09_s02_e02_v01", "length": 54, "label": 8}, {"file_name": "a05_s09_e04_v01", "length": 39, "label": 5}, {"file_name": "a04_s04_e06_v01", "length": 28, "label": 4}, {"file_name": "a01_s04_e00_v01", "length": 21, "label": 1}, {"file_name": "a08_s04_e03_v01", "length": 125, "label": 7}, {"file_name": "a08_s05_e01_v01", "length": 135, "label": 7}, {"file_name": "a02_s04_e03_v01", "length": 28, "label": 2}, {"file_name": "a04_s03_e04_v01", "length": 51, "label": 4}, {"file_name": "a12_s06_e01_v01", "length": 21, "label": 10}, {"file_name": "a11_s04_e03_v01", "length": 51, "label": 9}, {"file_name": "a05_s03_e00_v01", "length": 46, "label": 5}, {"file_name": "a12_s07_e00_v01", "length": 34, "label": 10}, {"file_name": "a06_s03_e02_v01", "length": 70, "label": 6}, {"file_name": "a03_s03_e05_v01", "length": 30, "label": 3}, {"file_name": "a11_s08_e01_v01", "length": 19, "label": 9}, {"file_name": "a05_s05_e04_v01", "length": 26, "label": 5}, {"file_name": "a06_s10_e01_v01", "length": 30, "label": 6}, {"file_name": "a04_s03_e02_v01", "length": 97, "label": 4}, {"file_name": "a02_s03_e03_v01", "length": 56, "label": 2}, {"file_name": "a09_s10_e04_v01", "length": 66, "label": 8}, {"file_name": "a04_s08_e04_v01", "length": 71, "label": 4}, {"file_name": "a11_s08_e00_v01", "length": 14, "label": 9}, {"file_name": "a02_s01_e00_v01", "length": 55, "label": 2}, {"file_name": "a04_s02_e03_v01", "length": 51, "label": 4}, {"file_name": "a04_s02_e01_v01", "length": 48, "label": 4}, {"file_name": "a06_s08_e00_v01", "length": 15, "label": 6}, {"file_name": "a08_s08_e01_v01", "length": 90, "label": 7}, {"file_name": "a02_s03_e01_v01", "length": 50, "label": 2}, {"file_name": "a11_s02_e02_v01", "length": 35, "label": 9}, {"file_name": "a09_s07_e02_v01", "length": 44, "label": 8}, {"file_name": "a02_s05_e03_v01", "length": 41, "label": 2}, {"file_name": "a01_s07_e02_v01", "length": 35, "label": 1}, {"file_name": "a06_s05_e03_v01", "length": 18, "label": 6}, {"file_name": "a12_s05_e03_v01", "length": 40, "label": 10}, {"file_name": "a03_s05_e00_v01", "length": 63, "label": 3}, {"file_name": "a09_s03_e02_v01", "length": 48, "label": 8}, {"file_name": "a09_s04_e04_v01", "length": 139, "label": 8}, {"file_name": "a11_s10_e00_v01", "length": 50, "label": 9}, {"file_name": "a04_s04_e01_v01", "length": 26, "label": 4}, {"file_name": "a01_s08_e05_v01", "length": 63, "label": 1}, {"file_name": "a02_s08_e02_v01", "length": 35, "label": 2}, {"file_name": "a01_s05_e00_v01", "length": 83, "label": 1}, {"file_name": "a11_s06_e00_v01", "length": 33, "label": 9}, {"file_name": "a05_s02_e00_v01", "length": 40, "label": 5}, {"file_name": "a02_s02_e03_v01", "length": 41, "label": 2}, {"file_name": "a09_s05_e02_v01", "length": 61, "label": 8}, {"file_name": "a05_s06_e02_v01", "length": 19, "label": 5}, {"file_name": "a08_s01_e03_v01", "length": 130, "label": 7}, {"file_name": "a08_s09_e01_v01", "length": 153, "label": 7}, {"file_name": "a02_s08_e04_v01", "length": 51, "label": 2}, {"file_name": "a06_s05_e02_v01", "length": 21, "label": 6}, {"file_name": "a01_s02_e03_v01", "length": 31, "label": 1}, {"file_name": "a11_s08_e05_v01", "length": 34, "label": 9}, {"file_name": "a03_s09_e02_v01", "length": 19, "label": 3}, {"file_name": "a04_s08_e00_v01", "length": 86, "label": 4}, {"file_name": "a03_s09_e01_v01", "length": 6, "label": 3}, {"file_name": "a08_s04_e01_v01", "length": 109, "label": 7}, {"file_name": "a12_s04_e03_v01", "length": 41, "label": 10}, {"file_name": "a04_s09_e03_v01", "length": 43, "label": 4}, {"file_name": "a12_s05_e00_v01", "length": 32, "label": 10}, {"file_name": "a11_s05_e04_v01", "length": 41, "label": 9}, {"file_name": "a05_s06_e03_v01", "length": 19, "label": 5}, {"file_name": "a09_s06_e02_v01", "length": 31, "label": 8}, {"file_name": "a06_s08_e05_v01", "length": 19, "label": 6}, {"file_name": "a03_s06_e03_v01", "length": 25, "label": 3}, {"file_name": "a12_s02_e03_v01", "length": 77, "label": 10}, {"file_name": "a11_s03_e03_v01", "length": 36, "label": 9}, {"file_name": "a04_s01_e00_v01", "length": 141, "label": 4}, {"file_name": "a04_s04_e08_v01", "length": 36, "label": 4}, {"file_name": "a03_s08_e03_v01", "length": 31, "label": 3}, {"file_name": "a02_s10_e03_v01", "length": 71, "label": 2}, {"file_name": "a04_s10_e00_v01", "length": 12, "label": 4}, {"file_name": "a08_s03_e00_v01", "length": 84, "label": 7}, {"file_name": "a02_s08_e03_v01", "length": 56, "label": 2}, {"file_name": "a01_s09_e03_v01", "length": 35, "label": 1}, {"file_name": "a01_s01_e04_v01", "length": 46, "label": 1}, {"file_name": "a01_s07_e00_v01", "length": 35, "label": 1}, {"file_name": "a02_s03_e00_v01", "length": 86, "label": 2}, {"file_name": "a01_s02_e00_v01", "length": 25, "label": 1}, {"file_name": "a03_s09_e04_v01", "length": 38, "label": 3}, {"file_name": "a01_s06_e02_v01", "length": 28, "label": 1}, {"file_name": "a03_s07_e02_v01", "length": 8, "label": 3}, {"file_name": "a04_s05_e05_v01", "length": 56, "label": 4}, {"file_name": "a08_s07_e01_v01", "length": 155, "label": 7}, {"file_name": "a04_s07_e03_v01", "length": 109, "label": 4}, {"file_name": "a08_s04_e04_v01", "length": 146, "label": 7}, {"file_name": "a08_s08_e00_v01", "length": 56, "label": 7}, {"file_name": "a02_s09_e00_v01", "length": 55, "label": 2}, {"file_name": "a06_s03_e00_v01", "length": 35, "label": 6}, {"file_name": "a04_s05_e07_v01", "length": 39, "label": 4}, {"file_name": "a09_s09_e04_v01", "length": 81, "label": 8}, {"file_name": "a05_s04_e04_v01", "length": 27, "label": 5}, {"file_name": "a09_s04_e03_v01", "length": 63, "label": 8}, {"file_name": "a01_s09_e04_v01", "length": 25, "label": 1}, {"file_name": "a05_s10_e00_v01", "length": 59, "label": 5}, {"file_name": "a09_s08_e02_v01", "length": 100, "label": 8}, {"file_name": "a11_s07_e01_v01", "length": 10, "label": 9}, {"file_name": "a06_s01_e00_v01", "length": 32, "label": 6}, {"file_name": "a12_s08_e04_v01", "length": 26, "label": 10}, {"file_name": "a08_s09_e04_v01", "length": 88, "label": 7}, {"file_name": "a12_s10_e02_v01", "length": 66, "label": 10}, {"file_name": "a04_s01_e01_v01", "length": 84, "label": 4}, {"file_name": "a01_s08_e01_v01", "length": 19, "label": 1}, {"file_name": "a09_s07_e00_v01", "length": 63, "label": 8}, {"file_name": "a04_s09_e00_v01", "length": 112, "label": 4}, {"file_name": "a08_s02_e02_v01", "length": 163, "label": 7}, {"file_name": "a09_s09_e02_v01", "length": 192, "label": 8}, {"file_name": "a09_s02_e03_v01", "length": 66, "label": 8}, {"file_name": "a11_s09_e01_v01", "length": 26, "label": 9}, {"file_name": "a03_s10_e01_v01", "length": 31, "label": 3}, {"file_name": "a11_s03_e02_v01", "length": 21, "label": 9}, {"file_name": "a11_s08_e04_v01", "length": 65, "label": 9}, {"file_name": "a06_s08_e02_v01", "length": 20, "label": 6}, {"file_name": "a11_s04_e04_v01", "length": 51, "label": 9}, {"file_name": "a12_s01_e00_v01", "length": 62, "label": 10}, {"file_name": "a02_s06_e04_v01", "length": 25, "label": 2}, {"file_name": "a06_s07_e01_v01", "length": 29, "label": 6}, {"file_name": "a05_s10_e03_v01", "length": 46, "label": 5}, {"file_name": "a09_s05_e04_v01", "length": 60, "label": 8}, {"file_name": "a03_s06_e00_v01", "length": 28, "label": 3}, {"file_name": "a12_s02_e01_v01", "length": 45, "label": 10}, {"file_name": "a08_s10_e02_v01", "length": 102, "label": 7}, {"file_name": "a08_s02_e00_v01", "length": 116, "label": 7}, {"file_name": "a06_s10_e03_v01", "length": 37, "label": 6}, {"file_name": "a11_s04_e02_v01", "length": 37, "label": 9}, {"file_name": "a08_s09_e03_v01", "length": 125, "label": 7}, {"file_name": "a12_s06_e04_v01", "length": 18, "label": 10}, {"file_name": "a01_s07_e01_v01", "length": 31, "label": 1}, {"file_name": "a05_s02_e04_v01", "length": 21, "label": 5}, {"file_name": "a09_s08_e00_v01", "length": 71, "label": 8}, {"file_name": "a02_s04_e04_v01", "length": 44, "label": 2}, {"file_name": "a06_s07_e00_v01", "length": 20, "label": 6}, {"file_name": "a04_s09_e01_v01", "length": 79, "label": 4}, {"file_name": "a09_s01_e00_v01", "length": 97, "label": 8}, {"file_name": "a08_s10_e01_v01", "length": 100, "label": 7}, {"file_name": "a11_s10_e02_v01", "length": 22, "label": 9}, {"file_name": "a09_s10_e02_v01", "length": 40, "label": 8}, {"file_name": "a03_s07_e04_v01", "length": 28, "label": 3}, {"file_name": "a05_s08_e00_v01", "length": 31, "label": 5}, {"file_name": "a05_s05_e03_v01", "length": 21, "label": 5}, {"file_name": "a11_s09_e03_v01", "length": 19, "label": 9}, {"file_name": "a12_s04_e04_v01", "length": 37, "label": 10}, {"file_name": "a04_s01_e03_v01", "length": 84, "label": 4}, {"file_name": "a04_s10_e02_v01", "length": 35, "label": 4}, {"file_name": "a06_s10_e04_v01", "length": 42, "label": 6}, {"file_name": "a01_s08_e00_v01", "length": 42, "label": 1}, {"file_name": "a03_s10_e02_v01", "length": 60, "label": 3}, {"file_name": "a03_s07_e01_v01", "length": 18, "label": 3}, {"file_name": "a05_s04_e03_v01", "length": 27, "label": 5}, {"file_name": "a01_s01_e02_v01", "length": 64, "label": 1}, {"file_name": "a05_s10_e04_v01", "length": 29, "label": 5}, {"file_name": "a06_s08_e03_v01", "length": 24, "label": 6}, {"file_name": "a02_s04_e02_v01", "length": 29, "label": 2}, {"file_name": "a12_s01_e04_v01", "length": 61, "label": 10}, {"file_name": "a02_s01_e02_v01", "length": 69, "label": 2}, {"file_name": "a12_s10_e00_v01", "length": 31, "label": 10}, {"file_name": "a11_s02_e00_v01", "length": 25, "label": 9}, {"file_name": "a02_s09_e01_v01", "length": 38, "label": 2}, {"file_name": "a12_s06_e05_v01", "length": 43, "label": 10}, {"file_name": "a02_s04_e00_v01", "length": 51, "label": 2}, {"file_name": "a12_s01_e02_v01", "length": 58, "label": 10}, {"file_name": "a04_s02_e05_v01", "length": 57, "label": 4}, {"file_name": "a03_s01_e04_v01", "length": 69, "label": 3}, {"file_name": "a01_s03_e04_v01", "length": 54, "label": 1}, {"file_name": "a01_s06_e03_v01", "length": 21, "label": 1}, {"file_name": "a02_s06_e01_v01", "length": 25, "label": 2}, {"file_name": "a12_s07_e04_v01", "length": 19, "label": 10}, {"file_name": "a08_s10_e04_v01", "length": 123, "label": 7}, {"file_name": "a02_s03_e02_v01", "length": 50, "label": 2}, {"file_name": "a09_s05_e06_v01", "length": 57, "label": 8}, {"file_name": "a05_s10_e01_v01", "length": 36, "label": 5}, {"file_name": "a09_s10_e01_v01", "length": 65, "label": 8}, {"file_name": "a08_s08_e04_v01", "length": 92, "label": 7}, {"file_name": "a06_s01_e02_v01", "length": 30, "label": 6}, {"file_name": "a01_s01_e01_v01", "length": 47, "label": 1}, {"file_name": "a06_s08_e04_v01", "length": 17, "label": 6}, {"file_name": "a09_s06_e03_v01", "length": 44, "label": 8}, {"file_name": "a06_s09_e01_v01", "length": 69, "label": 6}, {"file_name": "a08_s06_e01_v01", "length": 152, "label": 7}, {"file_name": "a02_s01_e04_v01", "length": 31, "label": 2}, {"file_name": "a11_s01_e00_v01", "length": 51, "label": 9}, {"file_name": "a05_s05_e02_v01", "length": 21, "label": 5}, {"file_name": "a03_s03_e00_v01", "length": 37, "label": 3}, {"file_name": "a01_s04_e04_v01", "length": 31, "label": 1}, {"file_name": "a06_s01_e04_v01", "length": 30, "label": 6}, {"file_name": "a09_s05_e05_v01", "length": 88, "label": 8}, {"file_name": "a01_s10_e01_v01", "length": 33, "label": 1}, {"file_name": "a03_s09_e00_v01", "length": 22, "label": 3}, {"file_name": "a08_s10_e00_v01", "length": 91, "label": 7}, {"file_name": "a05_s10_e02_v01", "length": 28, "label": 5}, {"file_name": "a03_s08_e05_v01", "length": 51, "label": 3}, {"file_name": "a04_s10_e01_v01", "length": 30, "label": 4}, {"file_name": "a05_s03_e04_v01", "length": 20, "label": 5}, {"file_name": "a05_s07_e02_v01", "length": 21, "label": 5}, {"file_name": "a12_s02_e04_v01", "length": 53, "label": 10}, {"file_name": "a06_s02_e03_v01", "length": 21, "label": 6}, {"file_name": "a09_s01_e03_v01", "length": 100, "label": 8}, {"file_name": "a08_s04_e00_v01", "length": 99, "label": 7}, {"file_name": "a02_s10_e01_v01", "length": 81, "label": 2}, {"file_name": "a11_s04_e01_v01", "length": 26, "label": 9}, {"file_name": "a03_s05_e01_v01", "length": 56, "label": 3}, {"file_name": "a06_s07_e04_v01", "length": 38, "label": 6}, {"file_name": "a09_s09_e03_v01", "length": 150, "label": 8}, {"file_name": "a02_s06_e02_v01", "length": 25, "label": 2}, {"file_name": "a05_s01_e04_v01", "length": 26, "label": 5}, {"file_name": "a11_s03_e04_v01", "length": 26, "label": 9}, {"file_name": "a04_s08_e02_v01", "length": 97, "label": 4}, {"file_name": "a04_s09_e04_v01", "length": 54, "label": 4}, {"file_name": "a08_s07_e00_v01", "length": 72, "label": 7}, {"file_name": "a04_s01_e05_v01", "length": 50, "label": 4}, {"file_name": "a12_s07_e01_v01", "length": 32, "label": 10}, {"file_name": "a02_s01_e03_v01", "length": 76, "label": 2}, {"file_name": "a11_s10_e05_v01", "length": 21, "label": 9}, {"file_name": "a09_s04_e00_v01", "length": 99, "label": 8}, {"file_name": "a09_s05_e01_v01", "length": 60, "label": 8}, {"file_name": "a09_s01_e04_v01", "length": 50, "label": 8}, {"file_name": "a12_s08_e00_v01", "length": 44, "label": 10}, {"file_name": "a04_s06_e03_v01", "length": 161, "label": 4}, {"file_name": "a05_s05_e00_v01", "length": 65, "label": 5}, {"file_name": "a11_s06_e01_v01", "length": 18, "label": 9}, {"file_name": "a01_s10_e02_v01", "length": 50, "label": 1}, {"file_name": "a04_s05_e01_v01", "length": 40, "label": 4}, {"file_name": "a02_s10_e04_v01", "length": 36, "label": 2}, {"file_name": "a02_s06_e05_v01", "length": 27, "label": 2}, {"file_name": "a11_s05_e00_v01", "length": 32, "label": 9}, {"file_name": "a04_s05_e06_v01", "length": 31, "label": 4}, {"file_name": "a04_s07_e01_v01", "length": 97, "label": 4}, {"file_name": "a03_s04_e01_v01", "length": 39, "label": 3}, {"file_name": "a03_s01_e02_v01", "length": 99, "label": 3}, {"file_name": "a06_s09_e02_v01", "length": 50, "label": 6}, {"file_name": "a03_s07_e00_v01", "length": 22, "label": 3}, {"file_name": "a08_s05_e05_v01", "length": 54, "label": 7}, {"file_name": "a06_s04_e02_v01", "length": 25, "label": 6}, {"file_name": "a12_s04_e01_v01", "length": 31, "label": 10}, {"file_name": "a09_s05_e00_v01", "length": 86, "label": 8}, {"file_name": "a04_s06_e02_v01", "length": 120, "label": 4}, {"file_name": "a04_s04_e04_v01", "length": 38, "label": 4}, {"file_name": "a09_s04_e02_v01", "length": 73, "label": 8}, {"file_name": "a02_s02_e01_v01", "length": 35, "label": 2}, {"file_name": "a06_s09_e00_v01", "length": 82, "label": 6}, {"file_name": "a05_s09_e00_v01", "length": 20, "label": 5}, {"file_name": "a05_s03_e01_v01", "length": 54, "label": 5}, {"file_name": "a02_s05_e04_v01", "length": 31, "label": 2}, {"file_name": "a01_s06_e01_v01", "length": 35, "label": 1}, {"file_name": "a01_s04_e05_v01", "length": 20, "label": 1}, {"file_name": "a12_s04_e02_v01", "length": 41, "label": 10}, {"file_name": "a03_s05_e02_v01", "length": 85, "label": 3}, {"file_name": "a03_s10_e04_v01", "length": 165, "label": 3}, {"file_name": "a01_s03_e02_v01", "length": 51, "label": 1}, {"file_name": "a05_s08_e05_v01", "length": 31, "label": 5}, {"file_name": "a01_s03_e00_v01", "length": 25, "label": 1}, {"file_name": "a08_s06_e03_v01", "length": 175, "label": 7}, {"file_name": "a04_s04_e07_v01", "length": 37, "label": 4}, {"file_name": "a05_s09_e02_v01", "length": 22, "label": 5}, {"file_name": "a01_s02_e01_v01", "length": 32, "label": 1}, {"file_name": "a01_s03_e01_v01", "length": 53, "label": 1}, {"file_name": "a04_s03_e01_v01", "length": 33, "label": 4}, {"file_name": "a06_s06_e00_v01", "length": 27, "label": 6}, {"file_name": "a12_s06_e02_v01", "length": 22, "label": 10}, {"file_name": "a04_s10_e04_v01", "length": 21, "label": 4}, {"file_name": "a12_s03_e01_v01", "length": 54, "label": 10}, {"file_name": "a08_s04_e02_v01", "length": 124, "label": 7}, {"file_name": "a06_s04_e04_v01", "length": 29, "label": 6}, {"file_name": "a12_s06_e03_v01", "length": 26, "label": 10}, {"file_name": "a08_s01_e04_v01", "length": 141, "label": 7}, {"file_name": "a04_s03_e00_v01", "length": 33, "label": 4}, {"file_name": "a12_s05_e02_v01", "length": 45, "label": 10}, {"file_name": "a08_s01_e00_v01", "length": 111, "label": 7}, {"file_name": "a01_s03_e03_v01", "length": 41, "label": 1}, {"file_name": "a01_s04_e02_v01", "length": 44, "label": 1}, {"file_name": "a06_s05_e00_v01", "length": 30, "label": 6}, {"file_name": "a01_s10_e04_v01", "length": 70, "label": 1}, {"file_name": "a08_s05_e00_v01", "length": 110, "label": 7}, {"file_name": "a09_s02_e00_v01", "length": 40, "label": 8}, {"file_name": "a12_s04_e05_v01", "length": 42, "label": 10}, {"file_name": "a06_s07_e02_v01", "length": 41, "label": 6}, {"file_name": "a08_s07_e02_v01", "length": 95, "label": 7}, {"file_name": "a11_s10_e01_v01", "length": 38, "label": 9}, {"file_name": "a02_s07_e00_v01", "length": 33, "label": 2}, {"file_name": "a06_s08_e01_v01", "length": 17, "label": 6}, {"file_name": "a01_s10_e03_v01", "length": 32, "label": 1}, {"file_name": "a11_s02_e04_v01", "length": 38, "label": 9}, {"file_name": "a12_s03_e03_v01", "length": 31, "label": 10}, {"file_name": "a05_s01_e01_v01", "length": 21, "label": 5}, {"file_name": "a05_s08_e02_v01", "length": 13, "label": 5}, {"file_name": "a09_s08_e01_v01", "length": 84, "label": 8}, {"file_name": "a01_s08_e04_v01", "length": 34, "label": 1}, {"file_name": "a09_s09_e00_v01", "length": 128, "label": 8}, {"file_name": "a03_s10_e03_v01", "length": 43, "label": 3}, {"file_name": "a09_s05_e03_v01", "length": 96, "label": 8}, {"file_name": "a09_s02_e04_v01", "length": 84, "label": 8}, {"file_name": "a08_s01_e01_v01", "length": 81, "label": 7}, {"file_name": "a09_s10_e00_v01", "length": 76, "label": 8}, {"file_name": "a04_s04_e10_v01", "length": 22, "label": 4}, {"file_name": "a05_s01_e00_v01", "length": 24, "label": 5}, {"file_name": "a06_s02_e01_v01", "length": 38, "label": 6}, {"file_name": "a08_s08_e03_v01", "length": 82, "label": 7}, {"file_name": "a04_s04_e03_v01", "length": 31, "label": 4}, {"file_name": "a12_s05_e04_v01", "length": 41, "label": 10}, {"file_name": "a05_s10_e05_v01", "length": 48, "label": 5}, {"file_name": "a02_s10_e02_v01", "length": 49, "label": 2}, {"file_name": "a06_s03_e03_v01", "length": 40, "label": 6}, {"file_name": "a05_s07_e04_v01", "length": 20, "label": 5}, {"file_name": "a02_s10_e00_v01", "length": 50, "label": 2}, {"file_name": "a08_s05_e03_v01", "length": 90, "label": 7}, {"file_name": "a12_s04_e00_v01", "length": 65, "label": 10}, {"file_name": "a03_s04_e02_v01", "length": 46, "label": 3}, {"file_name": "a06_s02_e02_v01", "length": 30, "label": 6}, {"file_name": "a03_s04_e03_v01", "length": 47, "label": 3}, {"file_name": "a11_s08_e03_v01", "length": 46, "label": 9}, {"file_name": "a09_s07_e03_v01", "length": 47, "label": 8}, {"file_name": "a05_s03_e03_v01", "length": 26, "label": 5}, {"file_name": "a09_s10_e03_v01", "length": 58, "label": 8}, {"file_name": "a01_s05_e03_v01", "length": 51, "label": 1}, {"file_name": "a11_s06_e02_v01", "length": 21, "label": 9}, {"file_name": "a05_s05_e01_v01", "length": 31, "label": 5}, {"file_name": "a01_s05_e01_v01", "length": 54, "label": 1}, {"file_name": "a04_s04_e02_v01", "length": 46, "label": 4}, {"file_name": "a11_s08_e02_v01", "length": 32, "label": 9}, {"file_name": "a11_s07_e03_v01", "length": 13, "label": 9}, {"file_name": "a06_s01_e01_v01", "length": 26, "label": 6}, {"file_name": "a06_s10_e05_v01", "length": 20, "label": 6}, {"file_name": "a12_s06_e00_v01", "length": 23, "label": 10}, {"file_name": "a12_s03_e02_v01", "length": 26, "label": 10}, {"file_name": "a08_s05_e02_v01", "length": 73, "label": 7}, {"file_name": "a03_s04_e00_v01", "length": 36, "label": 3}, {"file_name": "a11_s01_e03_v01", "length": 45, "label": 9}, {"file_name": "a03_s08_e01_v01", "length": 55, "label": 3}, {"file_name": "a11_s04_e00_v01", "length": 27, "label": 9}, {"file_name": "a04_s05_e00_v01", "length": 83, "label": 4}, {"file_name": "a12_s05_e01_v01", "length": 30, "label": 10}, {"file_name": "a02_s05_e02_v01", "length": 30, "label": 2}, {"file_name": "a06_s06_e01_v01", "length": 20, "label": 6}, {"file_name": "a03_s03_e02_v01", "length": 62, "label": 3}, {"file_name": "a11_s07_e02_v01", "length": 38, "label": 9}, {"file_name": "a11_s01_e02_v01", "length": 26, "label": 9}, {"file_name": "a05_s04_e02_v02", "length": 46, "label": 5}, {"file_name": "a12_s09_e04_v02", "length": 16, "label": 10}, {"file_name": "a03_s03_e04_v02", "length": 35, "label": 3}, {"file_name": "a08_s02_e01_v02", "length": 145, "label": 7}, {"file_name": "a03_s05_e03_v02", "length": 26, "label": 3}, {"file_name": "a06_s05_e01_v02", "length": 21, "label": 6}, {"file_name": "a12_s10_e01_v02", "length": 21, "label": 10}, {"file_name": "a01_s07_e03_v02", "length": 26, "label": 1}, {"file_name": "a03_s08_e02_v02", "length": 21, "label": 3}, {"file_name": "a11_s10_e03_v02", "length": 21, "label": 9}, {"file_name": "a04_s06_e05_v02", "length": 24, "label": 4}, {"file_name": "a11_s03_e00_v02", "length": 40, "label": 9}, {"file_name": "a03_s02_e00_v02", "length": 32, "label": 3}, {"file_name": "a11_s01_e04_v02", "length": 21, "label": 9}, {"file_name": "a04_s05_e04_v02", "length": 30, "label": 4}, {"file_name": "a09_s08_e04_v02", "length": 48, "label": 8}, {"file_name": "a09_s06_e01_v02", "length": 33, "label": 8}, {"file_name": "a09_s07_e01_v02", "length": 36, "label": 8}, {"file_name": "a02_s08_e01_v02", "length": 21, "label": 2}, {"file_name": "a01_s04_e01_v02", "length": 41, "label": 1}, {"file_name": "a02_s02_e02_v02", "length": 31, "label": 2}, {"file_name": "a02_s07_e05_v02", "length": 31, "label": 2}, {"file_name": "a06_s02_e00_v02", "length": 25, "label": 6}, {"file_name": "a03_s02_e02_v02", "length": 22, "label": 3}, {"file_name": "a11_s09_e04_v02", "length": 21, "label": 9}, {"file_name": "a09_s03_e04_v02", "length": 61, "label": 8}, {"file_name": "a04_s01_e02_v02", "length": 37, "label": 4}, {"file_name": "a12_s01_e01_v02", "length": 47, "label": 10}, {"file_name": "a02_s07_e03_v02", "length": 9, "label": 2}, {"file_name": "a05_s08_e04_v02", "length": 21, "label": 5}, {"file_name": "a02_s07_e02_v02", "length": 31, "label": 2}, {"file_name": "a04_s07_e02_v02", "length": 18, "label": 4}, {"file_name": "a01_s08_e03_v02", "length": 31, "label": 1}, {"file_name": "a08_s03_e01_v02", "length": 81, "label": 7}, {"file_name": "a04_s08_e03_v02", "length": 16, "label": 4}, {"file_name": "a03_s10_e00_v02", "length": 17, "label": 3}, {"file_name": "a04_s03_e03_v02", "length": 44, "label": 4}, {"file_name": "a11_s05_e02_v02", "length": 29, "label": 9}, {"file_name": "a06_s06_e02_v02", "length": 18, "label": 6}, {"file_name": "a09_s03_e00_v02", "length": 88, "label": 8}, {"file_name": "a09_s03_e03_v02", "length": 58, "label": 8}, {"file_name": "a04_s02_e02_v02", "length": 104, "label": 4}, {"file_name": "a08_s01_e02_v02", "length": 83, "label": 7}, {"file_name": "a04_s04_e00_v02", "length": 46, "label": 4}, {"file_name": "a03_s02_e03_v02", "length": 39, "label": 3}, {"file_name": "a05_s04_e00_v02", "length": 19, "label": 5}, {"file_name": "a05_s07_e03_v02", "length": 16, "label": 5}, {"file_name": "a06_s10_e00_v02", "length": 26, "label": 6}, {"file_name": "a11_s07_e00_v02", "length": 26, "label": 9}, {"file_name": "a03_s01_e01_v02", "length": 24, "label": 3}, {"file_name": "a04_s06_e01_v02", "length": 16, "label": 4}, {"file_name": "a08_s02_e04_v02", "length": 102, "label": 7}, {"file_name": "a09_s08_e03_v02", "length": 41, "label": 8}, {"file_name": "a05_s07_e00_v02", "length": 16, "label": 5}, {"file_name": "a05_s02_e02_v02", "length": 27, "label": 5}, {"file_name": "a04_s06_e04_v02", "length": 21, "label": 4}, {"file_name": "a05_s09_e03_v02", "length": 21, "label": 5}, {"file_name": "a03_s06_e02_v02", "length": 15, "label": 3}, {"file_name": "a01_s01_e00_v02", "length": 30, "label": 1}, {"file_name": "a06_s06_e03_v02", "length": 13, "label": 6}, {"file_name": "a06_s10_e02_v02", "length": 21, "label": 6}, {"file_name": "a02_s07_e04_v02", "length": 36, "label": 2}, {"file_name": "a09_s06_e00_v02", "length": 68, "label": 8}, {"file_name": "a04_s07_e04_v02", "length": 21, "label": 4}, {"file_name": "a05_s02_e01_v02", "length": 36, "label": 5}, {"file_name": "a01_s06_e04_v02", "length": 17, "label": 1}, {"file_name": "a04_s08_e01_v02", "length": 21, "label": 4}, {"file_name": "a01_s09_e00_v02", "length": 31, "label": 1}, {"file_name": "a08_s03_e03_v02", "length": 71, "label": 7}, {"file_name": "a12_s03_e00_v02", "length": 41, "label": 10}, {"file_name": "a11_s02_e03_v02", "length": 26, "label": 9}, {"file_name": "a12_s07_e02_v02", "length": 11, "label": 10}, {"file_name": "a11_s05_e01_v02", "length": 35, "label": 9}, {"file_name": "a05_s06_e01_v02", "length": 14, "label": 5}, {"file_name": "a06_s02_e04_v02", "length": 14, "label": 6}, {"file_name": "a06_s04_e00_v02", "length": 18, "label": 6}, {"file_name": "a05_s09_e01_v02", "length": 31, "label": 5}, {"file_name": "a11_s05_e03_v02", "length": 34, "label": 9}, {"file_name": "a03_s01_e00_v02", "length": 33, "label": 3}, {"file_name": "a11_s02_e01_v02", "length": 32, "label": 9}, {"file_name": "a04_s02_e00_v02", "length": 57, "label": 4}, {"file_name": "a11_s01_e01_v02", "length": 26, "label": 9}, {"file_name": "a02_s06_e03_v02", "length": 21, "label": 2}, {"file_name": "a12_s10_e03_v02", "length": 21, "label": 10}, {"file_name": "a01_s05_e02_v02", "length": 19, "label": 1}, {"file_name": "a01_s06_e00_v02", "length": 21, "label": 1}, {"file_name": "a05_s07_e01_v02", "length": 21, "label": 5}, {"file_name": "a01_s09_e01_v02", "length": 26, "label": 1}, {"file_name": "a02_s06_e00_v02", "length": 18, "label": 2}, {"file_name": "a11_s09_e00_v02", "length": 11, "label": 9}, {"file_name": "a03_s03_e01_v02", "length": 47, "label": 3}, {"file_name": "a03_s08_e00_v02", "length": 22, "label": 3}, {"file_name": "a06_s04_e01_v02", "length": 21, "label": 6}, {"file_name": "a02_s05_e01_v02", "length": 34, "label": 2}, {"file_name": "a03_s04_e04_v02", "length": 29, "label": 3}, {"file_name": "a01_s09_e02_v02", "length": 22, "label": 1}, {"file_name": "a08_s03_e04_v02", "length": 59, "label": 7}, {"file_name": "a01_s10_e00_v02", "length": 28, "label": 1}, {"file_name": "a01_s02_e02_v02", "length": 23, "label": 1}, {"file_name": "a09_s03_e01_v02", "length": 42, "label": 8}, {"file_name": "a05_s06_e00_v02", "length": 23, "label": 5}, {"file_name": "a05_s01_e02_v02", "length": 31, "label": 5}, {"file_name": "a02_s02_e04_v02", "length": 28, "label": 2}, {"file_name": "a06_s07_e03_v02", "length": 21, "label": 6}, {"file_name": "a04_s02_e04_v02", "length": 23, "label": 4}, {"file_name": "a04_s05_e02_v02", "length": 29, "label": 4}, {"file_name": "a02_s07_e01_v02", "length": 31, "label": 2}, {"file_name": "a04_s02_e06_v02", "length": 28, "label": 4}, {"file_name": "a03_s07_e03_v02", "length": 11, "label": 3}, {"file_name": "a12_s08_e01_v02", "length": 14, "label": 10}, {"file_name": "a05_s01_e03_v02", "length": 31, "label": 5}, {"file_name": "a02_s09_e02_v02", "length": 43, "label": 2}, {"file_name": "a05_s08_e03_v02", "length": 26, "label": 5}, {"file_name": "a04_s06_e00_v02", "length": 18, "label": 4}, {"file_name": "a09_s01_e02_v02", "length": 67, "label": 8}, {"file_name": "a12_s09_e00_v02", "length": 21, "label": 10}, {"file_name": "a04_s09_e02_v02", "length": 16, "label": 4}, {"file_name": "a03_s03_e03_v02", "length": 43, "label": 3}, {"file_name": "a08_s07_e03_v02", "length": 54, "label": 7}, {"file_name": "a08_s09_e02_v02", "length": 76, "label": 7}, {"file_name": "a08_s09_e00_v02", "length": 71, "label": 7}, {"file_name": "a06_s06_e04_v02", "length": 16, "label": 6}, {"file_name": "a01_s07_e04_v02", "length": 21, "label": 1}, {"file_name": "a08_s05_e04_v02", "length": 45, "label": 7}, {"file_name": "a05_s04_e01_v02", "length": 26, "label": 5}, {"file_name": "a04_s07_e00_v02", "length": 23, "label": 4}, {"file_name": "a05_s08_e01_v02", "length": 21, "label": 5}, {"file_name": "a11_s06_e03_v02", "length": 17, "label": 9}, {"file_name": "a01_s04_e03_v02", "length": 34, "label": 1}, {"file_name": "a11_s06_e04_v02", "length": 8, "label": 9}, {"file_name": "a12_s07_e03_v02", "length": 16, "label": 10}, {"file_name": "a01_s02_e04_v02", "length": 21, "label": 1}, {"file_name": "a04_s04_e05_v02", "length": 132, "label": 4}, {"file_name": "a03_s01_e03_v02", "length": 36, "label": 3}, {"file_name": "a12_s02_e02_v02", "length": 38, "label": 10}, {"file_name": "a03_s06_e01_v02", "length": 17, "label": 3}, {"file_name": "a05_s02_e03_v02", "length": 28, "label": 5}, {"file_name": "a03_s02_e04_v02", "length": 23, "label": 3}, {"file_name": "a08_s02_e03_v02", "length": 113, "label": 7}, {"file_name": "a08_s03_e02_v02", "length": 67, "label": 7}, {"file_name": "a09_s01_e01_v02", "length": 55, "label": 8}, {"file_name": "a02_s01_e01_v02", "length": 30, "label": 2}, {"file_name": "a08_s06_e00_v02", "length": 86, "label": 7}, {"file_name": "a12_s08_e02_v02", "length": 16, "label": 10}, {"file_name": "a02_s08_e00_v02", "length": 26, "label": 2}, {"file_name": "a01_s08_e02_v02", "length": 33, "label": 1}, {"file_name": "a09_s04_e01_v02", "length": 74, "label": 8}, {"file_name": "a04_s01_e04_v02", "length": 26, "label": 4}, {"file_name": "a04_s05_e03_v02", "length": 31, "label": 4}, {"file_name": "a08_s10_e03_v02", "length": 61, "label": 7}, {"file_name": "a02_s05_e00_v02", "length": 28, "label": 2}, {"file_name": "a06_s04_e03_v02", "length": 24, "label": 6}, {"file_name": "a06_s09_e03_v02", "length": 21, "label": 6}, {"file_name": "a05_s03_e02_v02", "length": 21, "label": 5}, {"file_name": "a06_s03_e04_v02", "length": 12, "label": 6}, {"file_name": "a06_s01_e03_v02", "length": 16, "label": 6}, {"file_name": "a11_s03_e01_v02", "length": 23, "label": 9}, {"file_name": "a09_s02_e01_v02", "length": 33, "label": 8}, {"file_name": "a02_s02_e00_v02", "length": 42, "label": 2}, {"file_name": "a01_s01_e03_v02", "length": 39, "label": 1}, {"file_name": "a08_s06_e02_v02", "length": 83, "label": 7}, {"file_name": "a12_s01_e03_v02", "length": 41, "label": 10}, {"file_name": "a06_s05_e04_v02", "length": 16, "label": 6}, {"file_name": "a01_s04_e06_v02", "length": 24, "label": 1}, {"file_name": "a09_s09_e01_v02", "length": 41, "label": 8}, {"file_name": "a04_s10_e03_v02", "length": 16, "label": 4}, {"file_name": "a06_s09_e04_v02", "length": 16, "label": 6}, {"file_name": "a02_s04_e01_v02", "length": 31, "label": 2}, {"file_name": "a12_s10_e04_v02", "length": 14, "label": 10}, {"file_name": "a04_s03_e05_v02", "length": 42, "label": 4}, {"file_name": "a06_s03_e01_v02", "length": 25, "label": 6}, {"file_name": "a02_s03_e04_v02", "length": 62, "label": 2}, {"file_name": "a11_s09_e02_v02", "length": 25, "label": 9}, {"file_name": "a08_s08_e02_v02", "length": 53, "label": 7}, {"file_name": "a03_s02_e01_v02", "length": 36, "label": 3}, {"file_name": "a12_s02_e00_v02", "length": 50, "label": 10}, {"file_name": "a12_s08_e03_v02", "length": 13, "label": 10}, {"file_name": "a02_s09_e03_v02", "length": 31, "label": 2}, {"file_name": "a09_s02_e02_v02", "length": 46, "label": 8}, {"file_name": "a05_s09_e04_v02", "length": 21, "label": 5}, {"file_name": "a01_s04_e00_v02", "length": 26, "label": 1}, {"file_name": "a08_s04_e03_v02", "length": 121, "label": 7}, {"file_name": "a08_s05_e01_v02", "length": 59, "label": 7}, {"file_name": "a12_s09_e03_v02", "length": 16, "label": 10}, {"file_name": "a02_s04_e03_v02", "length": 31, "label": 2}, {"file_name": "a04_s03_e04_v02", "length": 49, "label": 4}, {"file_name": "a12_s06_e01_v02", "length": 16, "label": 10}, {"file_name": "a11_s04_e03_v02", "length": 32, "label": 9}, {"file_name": "a05_s03_e00_v02", "length": 22, "label": 5}, {"file_name": "a12_s07_e00_v02", "length": 18, "label": 10}, {"file_name": "a06_s03_e02_v02", "length": 16, "label": 6}, {"file_name": "a03_s03_e05_v02", "length": 33, "label": 3}, {"file_name": "a11_s08_e01_v02", "length": 22, "label": 9}, {"file_name": "a05_s05_e04_v02", "length": 17, "label": 5}, {"file_name": "a06_s10_e01_v02", "length": 17, "label": 6}, {"file_name": "a04_s03_e02_v02", "length": 108, "label": 4}, {"file_name": "a02_s03_e03_v02", "length": 56, "label": 2}, {"file_name": "a09_s10_e04_v02", "length": 36, "label": 8}, {"file_name": "a04_s08_e04_v02", "length": 36, "label": 4}, {"file_name": "a11_s08_e00_v02", "length": 35, "label": 9}, {"file_name": "a02_s01_e00_v02", "length": 39, "label": 2}, {"file_name": "a04_s02_e03_v02", "length": 45, "label": 4}, {"file_name": "a04_s02_e01_v02", "length": 113, "label": 4}, {"file_name": "a06_s08_e00_v02", "length": 19, "label": 6}, {"file_name": "a08_s08_e01_v02", "length": 49, "label": 7}, {"file_name": "a02_s03_e01_v02", "length": 45, "label": 2}, {"file_name": "a11_s02_e02_v02", "length": 33, "label": 9}, {"file_name": "a09_s07_e02_v02", "length": 29, "label": 8}, {"file_name": "a02_s05_e03_v02", "length": 21, "label": 2}, {"file_name": "a01_s07_e02_v02", "length": 23, "label": 1}, {"file_name": "a06_s05_e03_v02", "length": 15, "label": 6}, {"file_name": "a12_s05_e03_v02", "length": 33, "label": 10}, {"file_name": "a03_s05_e00_v02", "length": 20, "label": 3}, {"file_name": "a09_s03_e02_v02", "length": 58, "label": 8}, {"file_name": "a09_s04_e04_v02", "length": 138, "label": 8}, {"file_name": "a11_s10_e00_v02", "length": 21, "label": 9}, {"file_name": "a04_s04_e01_v02", "length": 35, "label": 4}, {"file_name": "a02_s08_e02_v02", "length": 21, "label": 2}, {"file_name": "a01_s05_e00_v02", "length": 27, "label": 1}, {"file_name": "a04_s01_e07_v02", "length": 34, "label": 4}, {"file_name": "a11_s06_e00_v02", "length": 27, "label": 9}, {"file_name": "a05_s02_e00_v02", "length": 36, "label": 5}, {"file_name": "a02_s02_e03_v02", "length": 29, "label": 2}, {"file_name": "a09_s05_e02_v02", "length": 51, "label": 8}, {"file_name": "a05_s06_e02_v02", "length": 16, "label": 5}, {"file_name": "a08_s01_e03_v02", "length": 80, "label": 7}, {"file_name": "a08_s09_e01_v02", "length": 62, "label": 7}, {"file_name": "a02_s08_e04_v02", "length": 36, "label": 2}, {"file_name": "a06_s05_e02_v02", "length": 21, "label": 6}, {"file_name": "a01_s02_e03_v02", "length": 24, "label": 1}, {"file_name": "a03_s09_e02_v02", "length": 26, "label": 3}, {"file_name": "a04_s08_e00_v02", "length": 31, "label": 4}, {"file_name": "a12_s03_e04_v02", "length": 46, "label": 10}, {"file_name": "a08_s04_e01_v02", "length": 126, "label": 7}, {"file_name": "a12_s04_e03_v02", "length": 35, "label": 10}, {"file_name": "a04_s09_e03_v02", "length": 26, "label": 4}, {"file_name": "a12_s05_e00_v02", "length": 31, "label": 10}, {"file_name": "a11_s05_e04_v02", "length": 25, "label": 9}, {"file_name": "a05_s06_e03_v02", "length": 30, "label": 5}, {"file_name": "a09_s06_e02_v02", "length": 39, "label": 8}, {"file_name": "a12_s02_e03_v02", "length": 27, "label": 10}, {"file_name": "a11_s03_e03_v02", "length": 21, "label": 9}, {"file_name": "a11_s07_e04_v02", "length": 17, "label": 9}, {"file_name": "a04_s01_e00_v02", "length": 43, "label": 4}, {"file_name": "a03_s08_e03_v02", "length": 14, "label": 3}, {"file_name": "a04_s10_e00_v02", "length": 21, "label": 4}, {"file_name": "a08_s03_e00_v02", "length": 116, "label": 7}, {"file_name": "a02_s08_e03_v02", "length": 21, "label": 2}, {"file_name": "a01_s09_e03_v02", "length": 24, "label": 1}, {"file_name": "a01_s01_e04_v02", "length": 29, "label": 1}, {"file_name": "a01_s07_e00_v02", "length": 21, "label": 1}, {"file_name": "a02_s03_e00_v02", "length": 46, "label": 2}, {"file_name": "a01_s02_e00_v02", "length": 23, "label": 1}, {"file_name": "a03_s09_e04_v02", "length": 21, "label": 3}, {"file_name": "a01_s06_e02_v02", "length": 21, "label": 1}, {"file_name": "a03_s07_e02_v02", "length": 17, "label": 3}, {"file_name": "a03_s05_e04_v02", "length": 39, "label": 3}, {"file_name": "a08_s07_e01_v02", "length": 104, "label": 7}, {"file_name": "a04_s07_e03_v02", "length": 21, "label": 4}, {"file_name": "a08_s04_e04_v02", "length": 124, "label": 7}, {"file_name": "a08_s08_e00_v02", "length": 58, "label": 7}, {"file_name": "a02_s09_e00_v02", "length": 37, "label": 2}, {"file_name": "a06_s03_e00_v02", "length": 24, "label": 6}, {"file_name": "a09_s09_e04_v02", "length": 36, "label": 8}, {"file_name": "a05_s04_e04_v02", "length": 21, "label": 5}, {"file_name": "a09_s04_e03_v02", "length": 61, "label": 8}, {"file_name": "a01_s09_e04_v02", "length": 28, "label": 1}, {"file_name": "a05_s10_e00_v02", "length": 26, "label": 5}, {"file_name": "a09_s08_e02_v02", "length": 36, "label": 8}, {"file_name": "a11_s07_e01_v02", "length": 15, "label": 9}, {"file_name": "a06_s01_e00_v02", "length": 21, "label": 6}, {"file_name": "a12_s08_e04_v02", "length": 14, "label": 10}, {"file_name": "a08_s09_e04_v02", "length": 56, "label": 7}, {"file_name": "a12_s10_e02_v02", "length": 16, "label": 10}, {"file_name": "a04_s01_e01_v02", "length": 83, "label": 4}, {"file_name": "a01_s08_e01_v02", "length": 26, "label": 1}, {"file_name": "a09_s07_e00_v02", "length": 31, "label": 8}, {"file_name": "a04_s09_e00_v02", "length": 26, "label": 4}, {"file_name": "a08_s02_e02_v02", "length": 134, "label": 7}, {"file_name": "a09_s09_e02_v02", "length": 57, "label": 8}, {"file_name": "a09_s02_e03_v02", "length": 46, "label": 8}, {"file_name": "a11_s09_e01_v02", "length": 14, "label": 9}, {"file_name": "a03_s10_e01_v02", "length": 11, "label": 3}, {"file_name": "a11_s03_e02_v02", "length": 36, "label": 9}, {"file_name": "a11_s08_e04_v02", "length": 16, "label": 9}, {"file_name": "a06_s08_e02_v02", "length": 16, "label": 6}, {"file_name": "a12_s01_e00_v02", "length": 21, "label": 10}, {"file_name": "a02_s06_e04_v02", "length": 21, "label": 2}, {"file_name": "a06_s07_e01_v02", "length": 21, "label": 6}, {"file_name": "a05_s10_e03_v02", "length": 21, "label": 5}, {"file_name": "a09_s05_e04_v02", "length": 66, "label": 8}, {"file_name": "a03_s06_e00_v02", "length": 23, "label": 3}, {"file_name": "a12_s02_e01_v02", "length": 40, "label": 10}, {"file_name": "a08_s10_e02_v02", "length": 56, "label": 7}, {"file_name": "a08_s02_e00_v02", "length": 111, "label": 7}, {"file_name": "a06_s10_e03_v02", "length": 21, "label": 6}, {"file_name": "a11_s04_e02_v02", "length": 33, "label": 9}, {"file_name": "a08_s09_e03_v02", "length": 66, "label": 7}, {"file_name": "a12_s06_e04_v02", "length": 11, "label": 10}, {"file_name": "a01_s07_e01_v02", "length": 27, "label": 1}, {"file_name": "a05_s02_e04_v02", "length": 22, "label": 5}, {"file_name": "a09_s08_e00_v02", "length": 41, "label": 8}, {"file_name": "a02_s04_e04_v02", "length": 33, "label": 2}, {"file_name": "a06_s07_e00_v02", "length": 15, "label": 6}, {"file_name": "a04_s09_e01_v02", "length": 21, "label": 4}, {"file_name": "a09_s01_e00_v02", "length": 42, "label": 8}, {"file_name": "a08_s10_e01_v02", "length": 91, "label": 7}, {"file_name": "a11_s10_e02_v02", "length": 56, "label": 9}, {"file_name": "a09_s10_e02_v02", "length": 41, "label": 8}, {"file_name": "a03_s07_e04_v02", "length": 11, "label": 3}, {"file_name": "a05_s08_e00_v02", "length": 26, "label": 5}, {"file_name": "a05_s05_e03_v02", "length": 25, "label": 5}, {"file_name": "a11_s09_e03_v02", "length": 11, "label": 9}, {"file_name": "a12_s04_e04_v02", "length": 36, "label": 10}, {"file_name": "a04_s01_e03_v02", "length": 30, "label": 4}, {"file_name": "a04_s10_e02_v02", "length": 21, "label": 4}, {"file_name": "a06_s10_e04_v02", "length": 21, "label": 6}, {"file_name": "a01_s08_e00_v02", "length": 21, "label": 1}, {"file_name": "a03_s10_e02_v02", "length": 28, "label": 3}, {"file_name": "a03_s07_e01_v02", "length": 11, "label": 3}, {"file_name": "a05_s04_e03_v02", "length": 22, "label": 5}, {"file_name": "a01_s01_e02_v02", "length": 31, "label": 1}, {"file_name": "a05_s10_e04_v02", "length": 21, "label": 5}, {"file_name": "a06_s08_e03_v02", "length": 21, "label": 6}, {"file_name": "a02_s04_e02_v02", "length": 33, "label": 2}, {"file_name": "a04_s01_e09_v02", "length": 33, "label": 4}, {"file_name": "a12_s01_e04_v02", "length": 37, "label": 10}, {"file_name": "a02_s01_e02_v02", "length": 28, "label": 2}, {"file_name": "a12_s10_e00_v02", "length": 21, "label": 10}, {"file_name": "a11_s02_e00_v02", "length": 40, "label": 9}, {"file_name": "a02_s09_e01_v02", "length": 40, "label": 2}, {"file_name": "a02_s04_e00_v02", "length": 46, "label": 2}, {"file_name": "a12_s01_e02_v02", "length": 27, "label": 10}, {"file_name": "a04_s02_e05_v02", "length": 61, "label": 4}, {"file_name": "a03_s01_e04_v02", "length": 36, "label": 3}, {"file_name": "a01_s03_e04_v02", "length": 46, "label": 1}, {"file_name": "a02_s06_e01_v02", "length": 16, "label": 2}, {"file_name": "a12_s07_e04_v02", "length": 11, "label": 10}, {"file_name": "a12_s03_e05_v02", "length": 33, "label": 10}, {"file_name": "a08_s10_e04_v02", "length": 66, "label": 7}, {"file_name": "a02_s03_e02_v02", "length": 58, "label": 2}, {"file_name": "a05_s06_e04_v02", "length": 21, "label": 5}, {"file_name": "a05_s10_e01_v02", "length": 21, "label": 5}, {"file_name": "a09_s10_e01_v02", "length": 49, "label": 8}, {"file_name": "a08_s08_e04_v02", "length": 61, "label": 7}, {"file_name": "a06_s01_e02_v02", "length": 11, "label": 6}, {"file_name": "a01_s01_e01_v02", "length": 28, "label": 1}, {"file_name": "a06_s08_e04_v02", "length": 21, "label": 6}, {"file_name": "a09_s06_e03_v02", "length": 47, "label": 8}, {"file_name": "a06_s09_e01_v02", "length": 16, "label": 6}, {"file_name": "a08_s06_e01_v02", "length": 116, "label": 7}, {"file_name": "a02_s01_e04_v02", "length": 38, "label": 2}, {"file_name": "a11_s01_e00_v02", "length": 31, "label": 9}, {"file_name": "a05_s05_e02_v02", "length": 17, "label": 5}, {"file_name": "a03_s03_e00_v02", "length": 41, "label": 3}, {"file_name": "a01_s04_e04_v02", "length": 34, "label": 1}, {"file_name": "a06_s01_e04_v02", "length": 21, "label": 6}, {"file_name": "a09_s05_e05_v02", "length": 48, "label": 8}, {"file_name": "a01_s10_e01_v02", "length": 21, "label": 1}, {"file_name": "a03_s09_e00_v02", "length": 26, "label": 3}, {"file_name": "a08_s10_e00_v02", "length": 67, "label": 7}, {"file_name": "a05_s10_e02_v02", "length": 21, "label": 5}, {"file_name": "a04_s10_e01_v02", "length": 23, "label": 4}, {"file_name": "a05_s03_e04_v02", "length": 26, "label": 5}, {"file_name": "a05_s07_e02_v02", "length": 36, "label": 5}, {"file_name": "a12_s02_e04_v02", "length": 37, "label": 10}, {"file_name": "a04_s02_e07_v02", "length": 47, "label": 4}, {"file_name": "a06_s02_e03_v02", "length": 13, "label": 6}, {"file_name": "a09_s01_e03_v02", "length": 56, "label": 8}, {"file_name": "a08_s04_e00_v02", "length": 86, "label": 7}, {"file_name": "a02_s10_e01_v02", "length": 32, "label": 2}, {"file_name": "a11_s04_e01_v02", "length": 15, "label": 9}, {"file_name": "a03_s05_e01_v02", "length": 39, "label": 3}, {"file_name": "a06_s07_e04_v02", "length": 19, "label": 6}, {"file_name": "a09_s09_e03_v02", "length": 51, "label": 8}, {"file_name": "a02_s06_e02_v02", "length": 21, "label": 2}, {"file_name": "a05_s01_e04_v02", "length": 21, "label": 5}, {"file_name": "a11_s03_e04_v02", "length": 12, "label": 9}, {"file_name": "a04_s08_e02_v02", "length": 21, "label": 4}, {"file_name": "a04_s09_e04_v02", "length": 36, "label": 4}, {"file_name": "a08_s07_e00_v02", "length": 53, "label": 7}, {"file_name": "a04_s01_e05_v02", "length": 37, "label": 4}, {"file_name": "a12_s07_e01_v02", "length": 14, "label": 10}, {"file_name": "a02_s01_e03_v02", "length": 40, "label": 2}, {"file_name": "a09_s04_e00_v02", "length": 84, "label": 8}, {"file_name": "a09_s05_e01_v02", "length": 65, "label": 8}, {"file_name": "a09_s01_e04_v02", "length": 65, "label": 8}, {"file_name": "a12_s08_e00_v02", "length": 13, "label": 10}, {"file_name": "a04_s06_e03_v02", "length": 12, "label": 4}, {"file_name": "a05_s05_e00_v02", "length": 41, "label": 5}, {"file_name": "a11_s06_e01_v02", "length": 17, "label": 9}, {"file_name": "a01_s10_e02_v02", "length": 26, "label": 1}, {"file_name": "a04_s05_e01_v02", "length": 26, "label": 4}, {"file_name": "a08_s05_e06_v02", "length": 24, "label": 7}, {"file_name": "a02_s10_e04_v02", "length": 29, "label": 2}, {"file_name": "a11_s05_e00_v02", "length": 27, "label": 9}, {"file_name": "a04_s07_e01_v02", "length": 21, "label": 4}, {"file_name": "a03_s04_e01_v02", "length": 39, "label": 3}, {"file_name": "a03_s01_e02_v02", "length": 31, "label": 3}, {"file_name": "a06_s09_e02_v02", "length": 16, "label": 6}, {"file_name": "a03_s07_e00_v02", "length": 21, "label": 3}, {"file_name": "a11_s05_e05_v02", "length": 29, "label": 9}, {"file_name": "a08_s05_e05_v02", "length": 44, "label": 7}, {"file_name": "a06_s04_e02_v02", "length": 41, "label": 6}, {"file_name": "a12_s04_e01_v02", "length": 36, "label": 10}, {"file_name": "a09_s05_e00_v02", "length": 70, "label": 8}, {"file_name": "a04_s06_e02_v02", "length": 16, "label": 4}, {"file_name": "a04_s04_e04_v02", "length": 53, "label": 4}, {"file_name": "a09_s04_e02_v02", "length": 61, "label": 8}, {"file_name": "a02_s02_e01_v02", "length": 26, "label": 2}, {"file_name": "a06_s09_e00_v02", "length": 16, "label": 6}, {"file_name": "a05_s09_e00_v02", "length": 21, "label": 5}, {"file_name": "a05_s03_e01_v02", "length": 28, "label": 5}, {"file_name": "a02_s05_e04_v02", "length": 29, "label": 2}, {"file_name": "a01_s06_e01_v02", "length": 24, "label": 1}, {"file_name": "a01_s04_e05_v02", "length": 29, "label": 1}, {"file_name": "a12_s04_e02_v02", "length": 23, "label": 10}, {"file_name": "a03_s05_e02_v02", "length": 36, "label": 3}, {"file_name": "a01_s03_e02_v02", "length": 61, "label": 1}, {"file_name": "a05_s04_e05_v02", "length": 21, "label": 5}, {"file_name": "a01_s03_e00_v02", "length": 26, "label": 1}, {"file_name": "a08_s06_e03_v02", "length": 103, "label": 7}, {"file_name": "a05_s09_e02_v02", "length": 21, "label": 5}, {"file_name": "a01_s02_e01_v02", "length": 21, "label": 1}, {"file_name": "a01_s03_e01_v02", "length": 42, "label": 1}, {"file_name": "a04_s03_e01_v02", "length": 29, "label": 4}, {"file_name": "a06_s06_e00_v02", "length": 16, "label": 6}, {"file_name": "a12_s06_e02_v02", "length": 26, "label": 10}, {"file_name": "a12_s03_e01_v02", "length": 44, "label": 10}, {"file_name": "a08_s04_e02_v02", "length": 116, "label": 7}, {"file_name": "a06_s04_e04_v02", "length": 20, "label": 6}, {"file_name": "a12_s06_e03_v02", "length": 14, "label": 10}, {"file_name": "a08_s01_e04_v02", "length": 81, "label": 7}, {"file_name": "a04_s03_e00_v02", "length": 28, "label": 4}, {"file_name": "a12_s05_e02_v02", "length": 28, "label": 10}, {"file_name": "a08_s01_e00_v02", "length": 146, "label": 7}, {"file_name": "a01_s03_e03_v02", "length": 53, "label": 1}, {"file_name": "a04_s01_e08_v02", "length": 83, "label": 4}, {"file_name": "a01_s04_e02_v02", "length": 26, "label": 1}, {"file_name": "a06_s05_e00_v02", "length": 30, "label": 6}, {"file_name": "a01_s10_e04_v02", "length": 21, "label": 1}, {"file_name": "a08_s05_e00_v02", "length": 61, "label": 7}, {"file_name": "a09_s02_e00_v02", "length": 32, "label": 8}, {"file_name": "a12_s04_e05_v02", "length": 29, "label": 10}, {"file_name": "a06_s07_e02_v02", "length": 21, "label": 6}, {"file_name": "a08_s07_e02_v02", "length": 40, "label": 7}, {"file_name": "a11_s10_e01_v02", "length": 31, "label": 9}, {"file_name": "a02_s07_e00_v02", "length": 31, "label": 2}, {"file_name": "a06_s08_e01_v02", "length": 16, "label": 6}, {"file_name": "a01_s10_e03_v02", "length": 25, "label": 1}, {"file_name": "a11_s02_e04_v02", "length": 35, "label": 9}, {"file_name": "a02_s09_e04_v02", "length": 1, "label": 2}, {"file_name": "a12_s03_e03_v02", "length": 39, "label": 10}, {"file_name": "a05_s01_e01_v02", "length": 24, "label": 5}, {"file_name": "a05_s08_e02_v02", "length": 16, "label": 5}, {"file_name": "a12_s09_e02_v02", "length": 21, "label": 10}, {"file_name": "a09_s08_e01_v02", "length": 40, "label": 8}, {"file_name": "a01_s08_e04_v02", "length": 21, "label": 1}, {"file_name": "a09_s09_e00_v02", "length": 51, "label": 8}, {"file_name": "a03_s10_e03_v02", "length": 13, "label": 3}, {"file_name": "a09_s05_e03_v02", "length": 46, "label": 8}, {"file_name": "a09_s02_e04_v02", "length": 49, "label": 8}, {"file_name": "a08_s01_e01_v02", "length": 91, "label": 7}, {"file_name": "a09_s10_e00_v02", "length": 41, "label": 8}, {"file_name": "a12_s09_e01_v02", "length": 16, "label": 10}, {"file_name": "a05_s01_e00_v02", "length": 26, "label": 5}, {"file_name": "a06_s02_e01_v02", "length": 13, "label": 6}, {"file_name": "a08_s08_e03_v02", "length": 56, "label": 7}, {"file_name": "a04_s04_e03_v02", "length": 61, "label": 4}, {"file_name": "a12_s05_e04_v02", "length": 36, "label": 10}, {"file_name": "a02_s10_e02_v02", "length": 31, "label": 2}, {"file_name": "a06_s03_e03_v02", "length": 16, "label": 6}, {"file_name": "a05_s07_e04_v02", "length": 21, "label": 5}, {"file_name": "a02_s10_e00_v02", "length": 38, "label": 2}, {"file_name": "a08_s05_e03_v02", "length": 46, "label": 7}, {"file_name": "a12_s04_e00_v02", "length": 46, "label": 10}, {"file_name": "a03_s04_e02_v02", "length": 27, "label": 3}, {"file_name": "a06_s02_e02_v02", "length": 11, "label": 6}, {"file_name": "a03_s04_e03_v02", "length": 31, "label": 3}, {"file_name": "a11_s08_e03_v02", "length": 21, "label": 9}, {"file_name": "a09_s07_e03_v02", "length": 35, "label": 8}, {"file_name": "a05_s03_e03_v02", "length": 26, "label": 5}, {"file_name": "a09_s10_e03_v02", "length": 31, "label": 8}, {"file_name": "a11_s06_e02_v02", "length": 16, "label": 9}, {"file_name": "a05_s05_e01_v02", "length": 23, "label": 5}, {"file_name": "a01_s05_e01_v02", "length": 35, "label": 1}, {"file_name": "a04_s04_e02_v02", "length": 34, "label": 4}, {"file_name": "a11_s08_e02_v02", "length": 17, "label": 9}, {"file_name": "a11_s07_e03_v02", "length": 21, "label": 9}, {"file_name": "a04_s01_e06_v02", "length": 31, "label": 4}, {"file_name": "a06_s01_e01_v02", "length": 21, "label": 6}, {"file_name": "a12_s03_e02_v02", "length": 39, "label": 10}, {"file_name": "a08_s05_e02_v02", "length": 51, "label": 7}, {"file_name": "a03_s04_e00_v02", "length": 26, "label": 3}, {"file_name": "a11_s01_e03_v02", "length": 31, "label": 9}, {"file_name": "a03_s08_e01_v02", "length": 21, "label": 3}, {"file_name": "a11_s04_e00_v02", "length": 32, "label": 9}, {"file_name": "a04_s05_e00_v02", "length": 36, "label": 4}, {"file_name": "a12_s05_e01_v02", "length": 31, "label": 10}, {"file_name": "a02_s05_e02_v02", "length": 26, "label": 2}, {"file_name": "a06_s06_e01_v02", "length": 16, "label": 6}, {"file_name": "a03_s03_e02_v02", "length": 32, "label": 3}, {"file_name": "a11_s07_e02_v02", "length": 21, "label": 9}, {"file_name": "a11_s01_e02_v02", "length": 21, "label": 9}]
self.nw_ucla_root = 'data/NW-UCLA/all_sqe/'
self.time_steps = 52
self.bone = [(1, 2), (2, 3), (3, 3), (4, 3), (5, 3), (6, 5), (7, 6), (8, 7), (9, 3), (10, 9), (11, 10),
(12, 11), (13, 1), (14, 13), (15, 14), (16, 15), (17, 1), (18, 17), (19, 18), (20, 19)]
self.label = []
for index in range(len(self.data_dict)):
info = self.data_dict[index]
self.label.append(int(info['label']) - 1)
self.debug = debug
self.data_path = data_path
self.label_path = label_path
self.random_choose = random_choose
self.random_shift = random_shift
self.random_move = random_move
self.window_size = window_size
self.normalization = normalization
self.use_mmap = use_mmap
self.repeat = repeat
self.load_data()
if normalization:
self.get_mean_map()
def load_data(self):
# data: N C V T M
self.data = []
for data in self.data_dict:
file_name = data['file_name']
with open(self.nw_ucla_root + file_name + '.json', 'r') as f:
json_file = json.load(f)
skeletons = json_file['skeletons']
value = np.array(skeletons)
self.data.append(value)
def get_mean_map(self):
data = self.data
N, C, T, V, M = data.shape
self.mean_map = data.mean(axis=2, keepdims=True).mean(axis=4, keepdims=True).mean(axis=0)
self.std_map = data.transpose((0, 2, 4, 1, 3)).reshape((N * T * M, C * V)).std(axis=0).reshape((C, 1, V, 1))
def __len__(self):
return len(self.data_dict)*self.repeat
def __iter__(self):
return self
def rand_view_transform(self,X, agx, agy, s):
agx = math.radians(agx)
agy = math.radians(agy)
Rx = np.asarray([[1,0,0], [0,math.cos(agx),math.sin(agx)], [0, -math.sin(agx),math.cos(agx)]])
Ry = np.asarray([[math.cos(agy), 0, -math.sin(agy)], [0,1,0], [math.sin(agy), 0, math.cos(agy)]])
Ss = np.asarray([[s,0,0],[0,s,0],[0,0,s]])
X0 = np.dot(np.reshape(X,(-1,3)), np.dot(Ry,np.dot(Rx,Ss)))
X = np.reshape(X0, X.shape)
return X
def __getitem__(self, index):
label = self.label[index % len(self.data_dict)]
value = self.data[index % len(self.data_dict)]
if self.train_val == 'train':
random.random()
agx = random.randint(-60, 60)
agy = random.randint(-60, 60)
s = random.uniform(0.5, 1.5)
center = value[0,1,:]
value = value - center
scalerValue = self.rand_view_transform(value, agx, agy, s)
scalerValue = np.reshape(scalerValue, (-1, 3))
scalerValue = (scalerValue - np.min(scalerValue,axis=0)) / (np.max(scalerValue,axis=0) - np.min(scalerValue,axis=0))
scalerValue = scalerValue*2-1
scalerValue = np.reshape(scalerValue, (-1, 20, 3))
data = np.zeros( (self.time_steps, 20, 3) )
value = scalerValue[:,:,:]
length = value.shape[0]
random_idx = random.sample(list(np.arange(length))*100, self.time_steps)
random_idx.sort()
data[:,:,:] = value[random_idx,:,:]
data[:,:,:] = value[random_idx,:,:]
else:
random.random()
agx = 0
agy = 0
s = 1.0
center = value[0,1,:]
value = value - center
scalerValue = self.rand_view_transform(value, agx, agy, s)
scalerValue = np.reshape(scalerValue, (-1, 3))
scalerValue = (scalerValue - np.min(scalerValue,axis=0)) / (np.max(scalerValue,axis=0) - np.min(scalerValue,axis=0))
scalerValue = scalerValue*2-1
scalerValue = np.reshape(scalerValue, (-1, 20, 3))
data = np.zeros( (self.time_steps, 20, 3) )
value = scalerValue[:,:,:]
length = value.shape[0]
idx = np.linspace(0,length-1,self.time_steps).astype(np.int)
data[:,:,:] = value[idx,:,:] # T,V,C
if 'bone' in self.data_path:
data_bone = np.zeros_like(data)
for bone_idx in range(20):
data_bone[:, self.bone[bone_idx][0] - 1, :] = data[:, self.bone[bone_idx][0] - 1, :] - data[:, self.bone[bone_idx][1] - 1, :]
data = data_bone
if 'motion' in self.data_path:
data_motion = np.zeros_like(data)
data_motion[:-1, :, :] = data[1:, :, :] - data[:-1, :, :]
data = data_motion
data = np.transpose(data, (2, 0, 1))
C,T,V = data.shape
data = np.reshape(data,(C,T,V,1))
return data, label, index
def top_k(self, score, top_k):
rank = score.argsort()
hit_top_k = [l in rank[i, -top_k:] for i, l in enumerate(self.label)]
return sum(hit_top_k) * 1.0 / len(hit_top_k)
def import_class(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
| 94,902
| 603.477707
| 61,388
|
py
|
SkeletonGCL
|
SkeletonGCL-main/feeders/tools.py
|
import random
import matplotlib.pyplot as plt
import numpy as np
import pdb
import torch
import torch.nn.functional as F
def valid_crop_resize(data_numpy,valid_frame_num,p_interval,window):
# input: C,T,V,M
C, T, V, M = data_numpy.shape
begin = 0
end = valid_frame_num
valid_size = end - begin
#crop
if len(p_interval) == 1:
p = p_interval[0]
bias = int((1-p) * valid_size/2)
data = data_numpy[:, begin+bias:end-bias, :, :]# center_crop
cropped_length = data.shape[1]
else:
p = np.random.rand(1)*(p_interval[1]-p_interval[0])+p_interval[0]
cropped_length = np.minimum(np.maximum(int(np.floor(valid_size*p)),64), valid_size)# constraint cropped_length lower bound as 64
bias = np.random.randint(0,valid_size-cropped_length+1)
data = data_numpy[:, begin+bias:begin+bias+cropped_length, :, :]
if data.shape[1] == 0:
print(cropped_length, bias, valid_size)
# resize
data = torch.tensor(data,dtype=torch.float)
data = data.permute(0, 2, 3, 1).contiguous().view(C * V * M, cropped_length)
data = data[None, None, :, :]
data = F.interpolate(data, size=(C * V * M, window), mode='bilinear',align_corners=False).squeeze() # could perform both up sample and down sample
data = data.contiguous().view(C, V, M, window).permute(0, 3, 1, 2).contiguous().numpy()
return data
def downsample(data_numpy, step, random_sample=True):
# input: C,T,V,M
begin = np.random.randint(step) if random_sample else 0
return data_numpy[:, begin::step, :, :]
def temporal_slice(data_numpy, step):
# input: C,T,V,M
C, T, V, M = data_numpy.shape
return data_numpy.reshape(C, T / step, step, V, M).transpose(
(0, 1, 3, 2, 4)).reshape(C, T / step, V, step * M)
def mean_subtractor(data_numpy, mean):
# input: C,T,V,M
# naive version
if mean == 0:
return
C, T, V, M = data_numpy.shape
valid_frame = (data_numpy != 0).sum(axis=3).sum(axis=2).sum(axis=0) > 0
begin = valid_frame.argmax()
end = len(valid_frame) - valid_frame[::-1].argmax()
data_numpy[:, :end, :, :] = data_numpy[:, :end, :, :] - mean
return data_numpy
def auto_pading(data_numpy, size, random_pad=False):
C, T, V, M = data_numpy.shape
if T < size:
begin = random.randint(0, size - T) if random_pad else 0
data_numpy_paded = np.zeros((C, size, V, M))
data_numpy_paded[:, begin:begin + T, :, :] = data_numpy
return data_numpy_paded
else:
return data_numpy
def random_choose(data_numpy, size, auto_pad=True):
# input: C,T,V,M 随机选择其中一段,不是很合理。因为有0
C, T, V, M = data_numpy.shape
if T == size:
return data_numpy
elif T < size:
if auto_pad:
return auto_pading(data_numpy, size, random_pad=True)
else:
return data_numpy
else:
begin = random.randint(0, T - size)
return data_numpy[:, begin:begin + size, :, :]
def random_move(data_numpy,
angle_candidate=[-10., -5., 0., 5., 10.],
scale_candidate=[0.9, 1.0, 1.1],
transform_candidate=[-0.2, -0.1, 0.0, 0.1, 0.2],
move_time_candidate=[1]):
# input: C,T,V,M
C, T, V, M = data_numpy.shape
move_time = random.choice(move_time_candidate)
node = np.arange(0, T, T * 1.0 / move_time).round().astype(int)
node = np.append(node, T)
num_node = len(node)
A = np.random.choice(angle_candidate, num_node)
S = np.random.choice(scale_candidate, num_node)
T_x = np.random.choice(transform_candidate, num_node)
T_y = np.random.choice(transform_candidate, num_node)
a = np.zeros(T)
s = np.zeros(T)
t_x = np.zeros(T)
t_y = np.zeros(T)
# linspace
for i in range(num_node - 1):
a[node[i]:node[i + 1]] = np.linspace(
A[i], A[i + 1], node[i + 1] - node[i]) * np.pi / 180
s[node[i]:node[i + 1]] = np.linspace(S[i], S[i + 1],
node[i + 1] - node[i])
t_x[node[i]:node[i + 1]] = np.linspace(T_x[i], T_x[i + 1],
node[i + 1] - node[i])
t_y[node[i]:node[i + 1]] = np.linspace(T_y[i], T_y[i + 1],
node[i + 1] - node[i])
theta = np.array([[np.cos(a) * s, -np.sin(a) * s],
[np.sin(a) * s, np.cos(a) * s]])
# perform transformation
for i_frame in range(T):
xy = data_numpy[0:2, i_frame, :, :]
new_xy = np.dot(theta[:, :, i_frame], xy.reshape(2, -1))
new_xy[0] += t_x[i_frame]
new_xy[1] += t_y[i_frame]
data_numpy[0:2, i_frame, :, :] = new_xy.reshape(2, V, M)
return data_numpy
def random_shift(data_numpy):
C, T, V, M = data_numpy.shape
data_shift = np.zeros(data_numpy.shape)
valid_frame = (data_numpy != 0).sum(axis=3).sum(axis=2).sum(axis=0) > 0
begin = valid_frame.argmax()
end = len(valid_frame) - valid_frame[::-1].argmax()
size = end - begin
bias = random.randint(0, T - size)
data_shift[:, bias:bias + size, :, :] = data_numpy[:, begin:end, :, :]
return data_shift
def _rot(rot):
"""
rot: T,3
"""
cos_r, sin_r = rot.cos(), rot.sin() # T,3
zeros = torch.zeros(rot.shape[0], 1) # T,1
ones = torch.ones(rot.shape[0], 1) # T,1
r1 = torch.stack((ones, zeros, zeros),dim=-1) # T,1,3
rx2 = torch.stack((zeros, cos_r[:,0:1], sin_r[:,0:1]), dim = -1) # T,1,3
rx3 = torch.stack((zeros, -sin_r[:,0:1], cos_r[:,0:1]), dim = -1) # T,1,3
rx = torch.cat((r1, rx2, rx3), dim = 1) # T,3,3
ry1 = torch.stack((cos_r[:,1:2], zeros, -sin_r[:,1:2]), dim =-1)
r2 = torch.stack((zeros, ones, zeros),dim=-1)
ry3 = torch.stack((sin_r[:,1:2], zeros, cos_r[:,1:2]), dim =-1)
ry = torch.cat((ry1, r2, ry3), dim = 1)
rz1 = torch.stack((cos_r[:,2:3], sin_r[:,2:3], zeros), dim =-1)
r3 = torch.stack((zeros, zeros, ones),dim=-1)
rz2 = torch.stack((-sin_r[:,2:3], cos_r[:,2:3],zeros), dim =-1)
rz = torch.cat((rz1, rz2, r3), dim = 1)
rot = rz.matmul(ry).matmul(rx)
return rot
def random_rot(data_numpy, theta=0.3):
"""
data_numpy: C,T,V,M
"""
data_torch = torch.from_numpy(data_numpy)
C, T, V, M = data_torch.shape
data_torch = data_torch.permute(1, 0, 2, 3).contiguous().view(T, C, V*M) # T,3,V*M
rot = torch.zeros(3).uniform_(-theta, theta)
rot = torch.stack([rot, ] * T, dim=0)
rot = _rot(rot) # T,3,3
data_torch = torch.matmul(rot, data_torch)
data_torch = data_torch.view(T, C, V, M).permute(1, 0, 2, 3).contiguous()
return data_torch
def openpose_match(data_numpy):
C, T, V, M = data_numpy.shape
assert (C == 3)
score = data_numpy[2, :, :, :].sum(axis=1)
# the rank of body confidence in each frame (shape: T-1, M)
rank = (-score[0:T - 1]).argsort(axis=1).reshape(T - 1, M)
# data of frame 1
xy1 = data_numpy[0:2, 0:T - 1, :, :].reshape(2, T - 1, V, M, 1)
# data of frame 2
xy2 = data_numpy[0:2, 1:T, :, :].reshape(2, T - 1, V, 1, M)
# square of distance between frame 1&2 (shape: T-1, M, M)
distance = ((xy2 - xy1) ** 2).sum(axis=2).sum(axis=0)
# match pose
forward_map = np.zeros((T, M), dtype=int) - 1
forward_map[0] = range(M)
for m in range(M):
choose = (rank == m)
forward = distance[choose].argmin(axis=1)
for t in range(T - 1):
distance[t, :, forward[t]] = np.inf
forward_map[1:][choose] = forward
assert (np.all(forward_map >= 0))
# string data
for t in range(T - 1):
forward_map[t + 1] = forward_map[t + 1][forward_map[t]]
# generate data
new_data_numpy = np.zeros(data_numpy.shape)
for t in range(T):
new_data_numpy[:, t, :, :] = data_numpy[:, t, :, forward_map[
t]].transpose(1, 2, 0)
data_numpy = new_data_numpy
# score sort
trace_score = data_numpy[2, :, :, :].sum(axis=1).sum(axis=0)
rank = (-trace_score).argsort()
data_numpy = data_numpy[:, :, :, rank]
return data_numpy
| 8,189
| 33.851064
| 150
|
py
|
SkeletonGCL
|
SkeletonGCL-main/feeders/bone_pairs.py
|
ntu_pairs = (
(1, 2), (2, 21), (3, 21), (4, 3), (5, 21), (6, 5),
(7, 6), (8, 7), (9, 21), (10, 9), (11, 10), (12, 11),
(13, 1), (14, 13), (15, 14), (16, 15), (17, 1), (18, 17),
(19, 18), (20, 19), (22, 23), (21, 21), (23, 8), (24, 25),(25, 12)
)
| 262
| 36.571429
| 70
|
py
|
SkeletonGCL
|
SkeletonGCL-main/feeders/feeder_ntu.py
|
import numpy as np
import torch
from torch.utils.data import Dataset
from feeders import tools
class Feeder(Dataset):
def __init__(self, data_path, label_path=None, p_interval=1, split='train', random_choose=False, random_shift=False,
random_move=False, random_rot=False, window_size=-1, normalization=False, debug=False, use_mmap=False,
bone=False, vel=False):
"""
:param data_path:
:param label_path:
:param split: training set or test set
:param random_choose: If true, randomly choose a portion of the input sequence
:param random_shift: If true, randomly pad zeros at the begining or end of sequence
:param random_move:
:param random_rot: rotate skeleton around xyz axis
:param window_size: The length of the output sequence
:param normalization: If true, normalize input sequence
:param debug: If true, only use the first 100 samples
:param use_mmap: If true, use mmap mode to load data, which can save the running memory
:param bone: use bone modality or not
:param vel: use motion modality or not
:param only_label: only load label for ensemble score compute
"""
self.debug = debug
self.data_path = data_path
self.label_path = label_path
self.split = split
self.random_choose = random_choose
self.random_shift = random_shift
self.random_move = random_move
self.window_size = window_size
self.normalization = normalization
self.use_mmap = use_mmap
self.p_interval = p_interval
self.random_rot = random_rot
self.bone = bone
self.vel = vel
self.load_data()
if normalization:
self.get_mean_map()
def load_data(self):
# data: N C V T M
npz_data = np.load(self.data_path)
if self.split == 'train':
self.data = npz_data['x_train']
self.label = np.where(npz_data['y_train'] > 0)[1]
self.sample_name = ['train_' + str(i) for i in range(len(self.data))]
elif self.split == 'test':
self.data = npz_data['x_test']
self.label = np.where(npz_data['y_test'] > 0)[1]
self.sample_name = ['test_' + str(i) for i in range(len(self.data))]
else:
raise NotImplementedError('data split only supports train/test')
N, T, _ = self.data.shape
self.data = self.data.reshape((N, T, 2, 25, 3)).transpose(0, 4, 1, 3, 2)
def get_mean_map(self):
data = self.data
N, C, T, V, M = data.shape
self.mean_map = data.mean(axis=2, keepdims=True).mean(axis=4, keepdims=True).mean(axis=0)
self.std_map = data.transpose((0, 2, 4, 1, 3)).reshape((N * T * M, C * V)).std(axis=0).reshape((C, 1, V, 1))
def __len__(self):
return len(self.label)
def __iter__(self):
return self
def __getitem__(self, index):
data_numpy = self.data[index]
label = self.label[index]
data_numpy = np.array(data_numpy)
valid_frame_num = np.sum(data_numpy.sum(0).sum(-1).sum(-1) != 0)
# reshape Tx(MVC) to CTVM
data_numpy = tools.valid_crop_resize(data_numpy, valid_frame_num, self.p_interval, self.window_size)
if self.random_rot:
data_numpy = tools.random_rot(data_numpy)
if self.bone:
from .bone_pairs import ntu_pairs
bone_data_numpy = np.zeros_like(data_numpy)
for v1, v2 in ntu_pairs:
bone_data_numpy[:, :, v1 - 1] = data_numpy[:, :, v1 - 1] - data_numpy[:, :, v2 - 1]
data_numpy = bone_data_numpy
if self.vel:
data_numpy[:, :-1] = data_numpy[:, 1:] - data_numpy[:, :-1]
data_numpy[:, -1] = 0
return data_numpy, label, index
# data_numpy_list = []
# for i in range(2):
# # reshape Tx(MVC) to CTVM
# data_numpy_ = tools.valid_crop_resize(data_numpy, valid_frame_num, self.p_interval, self.window_size)
# if self.random_rot:
# data_numpy_ = tools.random_rot(data_numpy_)
# if self.bone:
# from .bone_pairs import ntu_pairs
# bone_data_numpy = np.zeros_like(data_numpy_)
# for v1, v2 in ntu_pairs:
# bone_data_numpy[:, :, v1 - 1] = data_numpy_[:, :, v1 - 1] - data_numpy_[:, :, v2 - 1]
# data_numpy_ = bone_data_numpy
# if self.vel:
# data_numpy_[:, :-1] = data_numpy_[:, 1:] - data_numpy_[:, :-1]
# data_numpy_[:, -1] = 0
# data_numpy_list.append(data_numpy_)
# if self.split == 'train':
# data_numpy = torch.stack(data_numpy_list, 0) # 2, C, T, V, M
# else:
# data_numpy = data_numpy_list[0]
# return data_numpy, label, index
def top_k(self, score, top_k):
rank = score.argsort()
hit_top_k = [l in rank[i, -top_k:] for i, l in enumerate(self.label)]
return sum(hit_top_k) * 1.0 / len(hit_top_k)
def import_class(name):
components = name.split('.')
mod = __import__(components[0])
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
| 5,311
| 41.496
| 120
|
py
|
SkeletonGCL
|
SkeletonGCL-main/feeders/__init__.py
|
from . import tools
from . import feeder_ucla
from . import feeder_ntu
| 70
| 22.666667
| 25
|
py
|
covid19model
|
covid19model-master/Python/src/dataset.py
|
import yaml
import pandas as pd
import numpy as np
from src.util import poly, dt_to_dec
from scipy.stats import gamma as gamma_scipy
from numpy.random import gamma as gamma_np
from statsmodels.distributions.empirical_distribution import ECDF
class HierarchicalDataset:
"""Base Dataset class containing attributes relating to the datasets used for the modelling and methods
for data wrangling
Args:
- config_dir
- cases_dir
- ifr_dir
- serial_interval_dir
- interventions_dir
- num_countries
- num_covariates
- N2: number of days including forecast
- DEBUG: flag for debugging setting
Attributes:
- countries
- cases
- serial_interval
- num_countries
- num_covariates
- DEBUG
- ifr
- covariate_names
- covariates
"""
def __init__(
self,
config_dir="../../data/catalog.yml",
cases_dir="../../data/COVID-19-up-to-date.csv",
ifr_dir="../../data/weighted_fatality.csv",
serial_interval_dir="../../data/serial_interval.csv",
interventions_dir="../../data/interventions.csv",
num_countries=11,
num_covariates=6,
N2=75,
DEBUG=False,
):
with open(config_dir, "r") as stream:
# merci https://stackoverflow.com/questions/1773805/how-can-i-parse-a-yaml-file-in-python
try:
config = yaml.safe_load(stream)
except yaml.YAMLError as exc:
print(exc)
# read in all the datasets
self.countries = config["countries"]
self.cases = pd.read_csv(cases_dir, encoding="ISO-8859-1")
self.serial_interval = pd.read_csv(serial_interval_dir)
covariates = pd.read_csv(interventions_dir)
self.num_countries = num_countries
self.num_covariates = num_covariates
# whether to use smaller dataset for debugging
self.DEBUG = DEBUG
# process the datasets
# remaing column and the UK in particular
ifr = pd.read_csv(ifr_dir)
# inefficient bit but couldn't figure out why .rename() doesn't work
ifr["country"] = ifr.iloc[:, 1]
# rename the UK
ifr["country"][ifr["country"] == "United Kingdom"] = "United_Kingdom"
self.ifr = ifr
# pick out the covariates for the countries (11 by default, 8 interventions)
# num_covariates+1 because we need the Country index column too
covariates = covariates.iloc[:num_countries, : num_covariates + 1]
self.covariate_names = list(covariates.columns)[1:]
# convert the dates to datetime
for covariate_name in self.covariate_names:
covariates[covariate_name] = covariates[covariate_name].apply(
pd.to_datetime, format="%Y-%m-%d"
)
# making all covariates that happen after lockdown to have same date as lockdown
non_lockdown_covariates = self.covariate_names.copy()
non_lockdown_covariates.remove("lockdown")
for covariate_name in non_lockdown_covariates:
ind = covariates[covariate_name] > covariates["lockdown"]
covariates[covariate_name][ind] = covariates["lockdown"][ind]
self.covariates = covariates
def get_stan_data(self, N2):
"""Returns a dictionary object containing data to be fed into the Stan compiler
Args:
N2: number of days including forecast
"""
stan_data = {}
# M, number of countries
stan_data["M"] = self.num_countries
stan_data["p"] = self.num_covariates
stan_data["x1"] = poly(np.linspace(0, N2 - 1, N2), 2)[:, 0]
# for some reason it is negative, check util.py
stan_data["x2"] = -poly(np.linspace(0, N2 - 1, N2), 2)[:, 1]
# TODO: this is hardcoded in base.r, beware
stan_data["N0"] = self.num_covariates
stan_data["N2"] = N2
stan_data["SI"] = self.serial_interval["fit"][:N2]
stan_data["x"] = np.linspace(1, N2, N2)
# TODO: we will use lists, but we need to be careful of stack memory in the future
stan_data["EpidemicStart"] = []
stan_data["y"] = []
stan_data["N"] = []
# initialise with number of covariates
for i in range(1, self.num_covariates+1):
stan_data["covariate{}".format(i)] = np.zeros((N2, self.num_countries))
# store the covariates in a numpy array, initialised
stan_data["deaths"] = np.ones((N2, self.num_countries)) * (-1)
stan_data["cases"] = np.zeros((N2, self.num_countries)) * (-1)
stan_data["f"] = np.zeros((N2, self.num_countries))
# we will generate the dataset in this country order. Could also use a pandas dataframe, but not necessary in my opinion
for country_num, country in enumerate(self.countries):
ifr = self.ifr["weighted_fatality"][self.ifr["country"] == country]
covariates1 = self.covariates.loc[
self.covariates["Country"] == country, self.covariate_names
]
cases = self.cases[self.cases["countriesAndTerritories"] == country]
cases["date"] = cases["dateRep"].apply(pd.to_datetime, format="%d/%m/%Y")
cases["t"] = cases["date"].apply(lambda v: dt_to_dec(v))
cases = cases.sort_values(by="t")
cases = cases.reset_index()
# where the first case occurs
index = cases[(cases["cases"] > 0)].index[0]
# where the cumulative deaths reaches 10
index_1 = cases[(cases["deaths"].cumsum() >= 10)].index[0]
# 30 days before 10th death
index_2 = index_1 - 30
# TODO: what is the latter?
print(
"First non-zero cases is on day {}, and 30 days before 5 days is day {}".format(
index, index_2
)
)
# # only care about this timeframe
cases = cases[index_2 : cases.shape[0]]
# update Epidemic Start day for each country
stan_data["EpidemicStart"].append(index_1 + 1 - index_2)
# turn intervention dates into boolean
for covariate in self.covariate_names:
cases[covariate] = (
cases["date"] > covariates1[covariate].values[0]
) * 1
# record dates for cases in the country
cases[country] = cases["date"]
# Hazard estimation
N = cases.shape[0]
print("{} has {} of data".format(country, N))
# number of days to forecast
forecast = N2 - N
if forecast < 0:
raise ValueError("Increase N2 to make it work. N2=N, forecast=N2-N")
# discrete hazard rate from time t=0,...,99
h = np.zeros(forecast + N)
if self.DEBUG:
mean = 18.8
cv = 0.45
loc = 1 / cv ** 2
scale = mean * cv ** 2
for i in range(len(h)):
h[i] = (
ifr * gamma_scipy.cdf(i, loc=loc, scale=scale)
- ifr * gamma_scipy.cdf(i - 1, loc=loc, scale=scale)
) / (1 - ifr * gamma_scipy.cdf(i - 1, loc=loc, scale=scale))
else:
# infection to onset
mean_1 = 5.1
cv_1 = 0.86
loc_1 = 1 / cv_1 ** 2
scale_1 = mean_1 * cv_1 ** 2
# onset to death
mean_2 = 18.8
cv_2 = 0.45
loc_2 = 1 / cv_2 ** 2
scale_2 = mean_2 * cv_2 ** 2
# assume that IFR is probability of dying given infection
x1 = gamma_np(shape=loc_1, scale=scale_1, size=int(5e6))
# infection-to-onset ----> do all people who are infected get to onset?
x2 = gamma_np(shape=loc_2, scale=scale_2, size=int(5e6))
# CDF of sum of 2 gamma distributions
gamma_cdf = ECDF(x1 + x2)
# probability distribution of the infection-to-death distribution \pi_m in the paper
def convolution(u):
return ifr * gamma_cdf(u)
h[0] = convolution(1.5) - convolution(0)
for i in range(1, len(h)):
h[i] = (convolution(i + 0.5) - convolution(i - 0.5)) / (
1 - convolution(i - 0.5)
)
# TODO: Check these quantities via tests
s = np.zeros(N2)
s[0] = 1
for i in range(1, N2):
s[i] = s[i - 1] * (1 - h[i - 1])
# slot in these values
stan_data["N"].append(N)
stan_data["f"][:, country_num] = h * s
stan_data["y"].append(cases["cases"].values[0])
stan_data["deaths"][:N, country_num] = cases["deaths"]
stan_data["cases"][:N, country_num] = cases["cases"]
covariates2 = np.zeros((N2, self.num_covariates))
covariates2[:N, :] = cases[self.covariate_names].values
covariates2[N:N2, :] = covariates2[N - 1, :]
covariates2 = pd.DataFrame(covariates2, columns=self.covariate_names)
for j, covariate in enumerate(self.covariate_names):
stan_data["covariate{}".format(j+1)][:, country_num] = covariates2[
covariate
]
# convert these arrays to integer dtype
stan_data["cases"] = stan_data["cases"].astype(int)
stan_data["deaths"] = stan_data["deaths"].astype(int)
return stan_data
| 9,851
| 38.408
| 128
|
py
|
covid19model
|
covid19model-master/Python/src/util.py
|
import numpy as np
from datetime import datetime
def poly(x, p):
"""
Thanks to https://stackoverflow.com/questions/41317127/python-equivalent-to-r-poly-function
"""
x = np.array(x)
X = np.transpose(np.vstack((x**k for k in range(p+1))))
return np.linalg.qr(X)[0][:,1:]
def dt_to_dec(dt):
"""Convert a datetime to decimal year.
Thanks to https://stackoverflow.com/questions/29851357/python-datetime-to-decimal-year-one-day-off-where-is-the-bug
"""
year_start = datetime(dt.year, 1, 1)
year_end = year_start.replace(year=dt.year+1)
return dt.year + ((dt - year_start).total_seconds() / # seconds so far
float((year_end - year_start).total_seconds())) # seconds in year
| 726
| 37.263158
| 119
|
py
|
STDEN
|
STDEN-main/stden_train.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import yaml
from lib.utils import load_graph_data
from model.stden_supervisor import STDENSupervisor
import numpy as np
import torch
def main(args):
with open(args.config_filename) as f:
supervisor_config = yaml.load(f)
graph_pkl_filename = supervisor_config['data'].get('graph_pkl_filename')
adj_mx = load_graph_data(graph_pkl_filename)
supervisor = STDENSupervisor(adj_mx=adj_mx, **supervisor_config)
supervisor.train()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_filename', default=None, type=str,
help='Configuration filename for restoring the model.')
parser.add_argument('--use_cpu_only', default=False, type=bool, help='Set to true to only use cpu.')
parser.add_argument('-r', '--random_seed', type=int, default=2021, help="Random seed for reproduction.")
args = parser.parse_args()
torch.manual_seed(args.random_seed)
np.random.seed(args.random_seed)
main(args)
| 1,156
| 29.447368
| 108
|
py
|
STDEN
|
STDEN-main/stden_eval.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import yaml
from lib.utils import load_graph_data
from model.stden_supervisor import STDENSupervisor
import numpy as np
import torch
def main(args):
with open(args.config_filename) as f:
supervisor_config = yaml.load(f)
graph_pkl_filename = supervisor_config['data'].get('graph_pkl_filename')
adj_mx = load_graph_data(graph_pkl_filename)
supervisor = STDENSupervisor(adj_mx=adj_mx, **supervisor_config)
horizon = supervisor_config['model'].get('horizon')
extract_latent = supervisor_config['model'].get('save_latent')
supervisor.eval_more(dataset='test',
save=args.save_pred,
seq_len=np.arange(1, horizon+1, 1),
extract_latent=extract_latent)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_filename', default=None, type=str,
help='Configuration filename for restoring the model.')
parser.add_argument('--use_cpu_only', default=False, type=bool, help='Set to true to only use cpu.')
parser.add_argument('-r', '--random_seed', type=int, default=2021, help="Random seed for reproduction.")
parser.add_argument('--save_pred', action='store_true', help='Save the prediction.')
args = parser.parse_args()
torch.manual_seed(args.random_seed)
np.random.seed(args.random_seed)
main(args)
| 1,577
| 34.863636
| 108
|
py
|
STDEN
|
STDEN-main/model/diffeq_solver.py
|
import torch
import torch.nn as nn
import time
from torchdiffeq import odeint
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class DiffeqSolver(nn.Module):
def __init__(self, odefunc, method, latent_dim,
odeint_rtol = 1e-4, odeint_atol = 1e-5):
nn.Module.__init__(self)
self.ode_method = method
self.odefunc = odefunc
self.latent_dim = latent_dim
self.rtol = odeint_rtol
self.atol = odeint_atol
def forward(self, first_point, time_steps_to_pred):
"""
Decoder the trajectory through the ODE Solver.
:param time_steps_to_pred: horizon
:param first_point: (n_traj_samples, batch_size, num_nodes * latent_dim)
:return: pred_y: # shape (horizon, n_traj_samples, batch_size, self.num_nodes * self.output_dim)
"""
n_traj_samples, batch_size = first_point.size()[0], first_point.size()[1]
first_point = first_point.reshape(n_traj_samples * batch_size, -1) # reduce the complexity by merging dimension
# pred_y shape: (horizon, n_traj_samples * batch_size, num_nodes * latent_dim)
start_time = time.time()
self.odefunc.nfe = 0
pred_y = odeint(self.odefunc,
first_point,
time_steps_to_pred,
rtol=self.rtol,
atol=self.atol,
method=self.ode_method)
time_fe = time.time() - start_time
# pred_y shape: (horizon, n_traj_samples, batch_size, num_nodes * latent_dim)
pred_y = pred_y.reshape(pred_y.size()[0], n_traj_samples, batch_size, -1)
# assert(pred_y.size()[1] == n_traj_samples)
# assert(pred_y.size()[2] == batch_size)
return pred_y, (self.odefunc.nfe, time_fe)
| 1,877
| 37.326531
| 119
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.