metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "test_cosmology.py",
"repo_name": "rennehan/yt-swift",
"repo_path": "yt-swift_extracted/yt-swift-main/yt/utilities/tests/test_cosmology.py",
"type": "Python"
}
|
import os
import numpy as np
from numpy.testing import assert_almost_equal, assert_equal
from yt.testing import assert_rel_equal, requires_file, requires_module
from yt.units.yt_array import YTArray, YTQuantity
from yt.utilities.answer_testing.framework import data_dir_load
from yt.utilities.cosmology import Cosmology
from yt.utilities.on_demand_imports import _yaml as yaml
local_dir = os.path.dirname(os.path.abspath(__file__))
def z_from_t_analytic(my_time, hubble_constant=0.7, omega_matter=0.3, omega_lambda=0.7):
"""
Compute the redshift from time after the big bang. This is based on
Enzo's CosmologyComputeExpansionFactor.C, but altered to use physical
units.
"""
hubble_constant = YTQuantity(hubble_constant, "100*km/s/Mpc")
omega_curvature = 1.0 - omega_matter - omega_lambda
OMEGA_TOLERANCE = 1e-5
ETA_TOLERANCE = 1.0e-10
# Convert the time to Time * H0.
if not isinstance(my_time, YTArray):
my_time = YTArray(my_time, "s")
t0 = (my_time.in_units("s") * hubble_constant.in_units("1/s")).to_ndarray()
# For a flat universe with omega_matter = 1, it's easy.
if np.fabs(omega_matter - 1) < OMEGA_TOLERANCE and omega_lambda < OMEGA_TOLERANCE:
a = np.power(1.5 * t0, 2.0 / 3.0)
# For omega_matter < 1 and omega_lambda == 0 see
# Peebles 1993, eq. 13-3, 13-10.
# Actually, this is a little tricky since we must solve an equation
# of the form eta - np.sinh(eta) + x = 0..
elif omega_matter < 1 and omega_lambda < OMEGA_TOLERANCE:
x = 2 * t0 * np.power(1.0 - omega_matter, 1.5) / omega_matter
# Compute eta in a three step process, first from a third-order
# Taylor expansion of the formula above, then use that in a fifth-order
# approximation. Then finally, iterate on the formula itself, solving for
# eta. This works well because parts 1 & 2 are an excellent approximation
# when x is small and part 3 converges quickly when x is large.
eta = np.power(6 * x, 1.0 / 3.0) # part 1
eta = np.power(120 * x / (20 + eta * eta), 1.0 / 3.0) # part 2
mask = np.ones(eta.size, dtype=bool)
max_iter = 1000
for i in range(max_iter): # part 3
eta_old = eta[mask]
eta[mask] = np.arcsinh(eta[mask] + x[mask])
mask[mask] = np.fabs(eta[mask] - eta_old) >= ETA_TOLERANCE
if not mask.any():
break
if i == max_iter - 1:
raise RuntimeError("No convergence after %d iterations." % i)
# Now use eta to compute the expansion factor (eq. 13-10, part 2).
a = omega_matter / (2.0 * (1.0 - omega_matter)) * (np.cosh(eta) - 1.0)
# For flat universe, with non-zero omega_lambda, see eq. 13-20.
elif np.fabs(omega_curvature) < OMEGA_TOLERANCE and omega_lambda > OMEGA_TOLERANCE:
a = np.power(omega_matter / (1 - omega_matter), 1.0 / 3.0) * np.power(
np.sinh(1.5 * np.sqrt(1.0 - omega_matter) * t0), 2.0 / 3.0
)
else:
raise NotImplementedError
redshift = (1.0 / a) - 1.0
return redshift
def t_from_z_analytic(z, hubble_constant=0.7, omega_matter=0.3, omega_lambda=0.7):
"""
Compute the age of the Universe from redshift. This is based on Enzo's
CosmologyComputeTimeFromRedshift.C, but altered to use physical units.
"""
hubble_constant = YTQuantity(hubble_constant, "100*km/s/Mpc")
omega_curvature = 1.0 - omega_matter - omega_lambda
# For a flat universe with omega_matter = 1, things are easy.
if omega_matter == 1.0 and omega_lambda == 0.0:
t0 = 2.0 / 3.0 / np.power(1 + z, 1.5)
# For omega_matter < 1 and omega_lambda == 0 see
# Peebles 1993, eq. 13-3, 13-10.
elif omega_matter < 1 and omega_lambda == 0:
eta = np.arccosh(1 + 2 * (1 - omega_matter) / omega_matter / (1 + z))
t0 = (
omega_matter
/ (2 * np.power(1.0 - omega_matter, 1.5))
* (np.sinh(eta) - eta)
)
# For flat universe, with non-zero omega_lambda, see eq. 13-20.
elif np.fabs(omega_curvature) < 1.0e-3 and omega_lambda != 0:
t0 = (
2.0
/ 3.0
/ np.sqrt(1 - omega_matter)
* np.arcsinh(
np.sqrt((1 - omega_matter) / omega_matter) / np.power(1 + z, 1.5)
)
)
else:
raise NotImplementedError(f"{hubble_constant}, {omega_matter}, {omega_lambda}")
# Now convert from Time * H0 to time.
my_time = t0 / hubble_constant
return my_time
def test_z_t_roundtrip():
"""
Make sure t_from_z and z_from_t are consistent.
"""
co = Cosmology()
# random sample in log(a) from -6 to 6
my_random = np.random.RandomState(6132305)
la = 12 * my_random.random_sample(10000) - 6
z1 = 1 / np.power(10, la) - 1
t = co.t_from_z(z1)
z2 = co.z_from_t(t)
assert_rel_equal(z1, z2, 4)
def test_z_t_analytic():
"""
Test z/t conversions against analytic solutions.
"""
cosmos = (
{"hubble_constant": 0.7, "omega_matter": 0.3, "omega_lambda": 0.7},
{"hubble_constant": 0.7, "omega_matter": 1.0, "omega_lambda": 0.0},
{"hubble_constant": 0.7, "omega_matter": 0.3, "omega_lambda": 0.0},
)
for cosmo in cosmos:
omega_curvature = 1 - cosmo["omega_matter"] - cosmo["omega_lambda"]
co = Cosmology(omega_curvature=omega_curvature, **cosmo)
# random sample in log(a) from -6 to 6
my_random = np.random.RandomState(10132324)
la = 12 * my_random.random_sample(1000) - 6
z = 1 / np.power(10, la) - 1
t_an = t_from_z_analytic(z, **cosmo).to("Gyr")
t_co = co.t_from_z(z).to("Gyr")
assert_rel_equal(
t_an,
t_co,
4,
err_msg=f"t_from_z does not match analytic version for cosmology {cosmo}.",
)
# random sample in log(t/t0) from -3 to 1
t0 = np.power(10, 4 * my_random.random_sample(1000) - 3)
t = (t0 / co.hubble_constant).to("Gyr")
z_an = z_from_t_analytic(t, **cosmo)
z_co = co.z_from_t(t)
# compare scale factors since z approaches 0
assert_rel_equal(
1 / (1 + z_an),
1 / (1 + z_co),
5,
err_msg=f"z_from_t does not match analytic version for cosmology {cosmo}.",
)
def test_dark_factor():
"""
Test that dark factor returns same value for when not
being used and when w_0 = -1 and w_z = 0.
"""
co = Cosmology(w_0=-1, w_a=0, use_dark_factor=False)
assert_equal(co.get_dark_factor(0), 1.0)
co.use_dark_factor = True
assert_equal(co.get_dark_factor(0), 1.0)
@requires_module("yaml")
def test_cosmology_calculator_answers():
"""
Test cosmology calculator functions against previously calculated values.
"""
fn = os.path.join(local_dir, "cosmology_answers.yml")
with open(fn) as fh:
data = yaml.load(fh, Loader=yaml.FullLoader)
cosmologies = data["cosmologies"]
functions = data["functions"]
for cname, copars in cosmologies.items():
omega_curvature = (
1
- copars["omega_matter"]
- copars["omega_lambda"]
- copars["omega_radiation"]
)
cosmology = Cosmology(omega_curvature=omega_curvature, **copars)
for fname, finfo in functions.items():
func = getattr(cosmology, fname)
args = finfo.get("args", [])
val = func(*args)
units = finfo.get("units")
if units is not None:
val.convert_to_units(units)
val = float(val)
err_msg = (
"{} answer has changed for {} cosmology, old: {:f}, new: {:f}.".format(
fname,
cname,
finfo["answers"][cname],
val,
)
)
assert_almost_equal(val, finfo["answers"][cname], 10, err_msg=err_msg)
enzotiny = "enzo_tiny_cosmology/DD0020/DD0020"
@requires_module("h5py")
@requires_file(enzotiny)
def test_dataset_cosmology_calculator():
"""
Test datasets's cosmology calculator against standalone.
"""
ds = data_dir_load(enzotiny)
co = Cosmology(
hubble_constant=ds.hubble_constant,
omega_matter=ds.omega_matter,
omega_lambda=ds.omega_lambda,
)
v1 = ds.cosmology.comoving_radial_distance(1, 5).to("Mpccm").v
v2 = co.comoving_radial_distance(1, 5).to("Mpccm").v
assert_equal(v1, v2)
|
rennehanREPO_NAMEyt-swiftPATH_START.@yt-swift_extracted@yt-swift-main@yt@utilities@tests@test_cosmology.py@.PATH_END.py
|
{
"filename": "backend_pdf.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/lib/matplotlib/backends/backend_pdf.py",
"type": "Python"
}
|
"""
A PDF Matplotlib backend.
Author: Jouni K Seppänen <jks@iki.fi> and others.
"""
import codecs
from datetime import timezone
from datetime import datetime
from enum import Enum
from functools import total_ordering
from io import BytesIO
import itertools
import logging
import math
import os
import string
import struct
import sys
import time
import types
import warnings
import zlib
import numpy as np
from PIL import Image
import matplotlib as mpl
from matplotlib import _api, _text_helpers, _type1font, cbook, dviread
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import (
_Backend, FigureCanvasBase, FigureManagerBase, GraphicsContextBase,
RendererBase)
from matplotlib.backends.backend_mixed import MixedModeRenderer
from matplotlib.figure import Figure
from matplotlib.font_manager import get_font, fontManager as _fontManager
from matplotlib._afm import AFM
from matplotlib.ft2font import FT2Font, FaceFlags, Kerning, LoadFlags, StyleFlags
from matplotlib.transforms import Affine2D, BboxBase
from matplotlib.path import Path
from matplotlib.dates import UTC
from matplotlib import _path
from . import _backend_pdf_ps
_log = logging.getLogger(__name__)
# Overview
#
# The low-level knowledge about pdf syntax lies mainly in the pdfRepr
# function and the classes Reference, Name, Operator, and Stream. The
# PdfFile class knows about the overall structure of pdf documents.
# It provides a "write" method for writing arbitrary strings in the
# file, and an "output" method that passes objects through the pdfRepr
# function before writing them in the file. The output method is
# called by the RendererPdf class, which contains the various draw_foo
# methods. RendererPdf contains a GraphicsContextPdf instance, and
# each draw_foo calls self.check_gc before outputting commands. This
# method checks whether the pdf graphics state needs to be modified
# and outputs the necessary commands. GraphicsContextPdf represents
# the graphics state, and its "delta" method returns the commands that
# modify the state.
# Add "pdf.use14corefonts: True" in your configuration file to use only
# the 14 PDF core fonts. These fonts do not need to be embedded; every
# PDF viewing application is required to have them. This results in very
# light PDF files you can use directly in LaTeX or ConTeXt documents
# generated with pdfTeX, without any conversion.
# These fonts are: Helvetica, Helvetica-Bold, Helvetica-Oblique,
# Helvetica-BoldOblique, Courier, Courier-Bold, Courier-Oblique,
# Courier-BoldOblique, Times-Roman, Times-Bold, Times-Italic,
# Times-BoldItalic, Symbol, ZapfDingbats.
#
# Some tricky points:
#
# 1. The clip path can only be widened by popping from the state
# stack. Thus the state must be pushed onto the stack before narrowing
# the clip path. This is taken care of by GraphicsContextPdf.
#
# 2. Sometimes it is necessary to refer to something (e.g., font,
# image, or extended graphics state, which contains the alpha value)
# in the page stream by a name that needs to be defined outside the
# stream. PdfFile provides the methods fontName, imageObject, and
# alphaState for this purpose. The implementations of these methods
# should perhaps be generalized.
# TODOs:
#
# * encoding of fonts, including mathtext fonts and Unicode support
# * TTF support has lots of small TODOs, e.g., how do you know if a font
# is serif/sans-serif, or symbolic/non-symbolic?
# * draw_quad_mesh
def _fill(strings, linelen=75):
"""
Make one string from sequence of strings, with whitespace in between.
The whitespace is chosen to form lines of at most *linelen* characters,
if possible.
"""
currpos = 0
lasti = 0
result = []
for i, s in enumerate(strings):
length = len(s)
if currpos + length < linelen:
currpos += length + 1
else:
result.append(b' '.join(strings[lasti:i]))
lasti = i
currpos = length
result.append(b' '.join(strings[lasti:]))
return b'\n'.join(result)
def _create_pdf_info_dict(backend, metadata):
"""
Create a PDF infoDict based on user-supplied metadata.
A default ``Creator``, ``Producer``, and ``CreationDate`` are added, though
the user metadata may override it. The date may be the current time, or a
time set by the ``SOURCE_DATE_EPOCH`` environment variable.
Metadata is verified to have the correct keys and their expected types. Any
unknown keys/types will raise a warning.
Parameters
----------
backend : str
The name of the backend to use in the Producer value.
metadata : dict[str, Union[str, datetime, Name]]
A dictionary of metadata supplied by the user with information
following the PDF specification, also defined in
`~.backend_pdf.PdfPages` below.
If any value is *None*, then the key will be removed. This can be used
to remove any pre-defined values.
Returns
-------
dict[str, Union[str, datetime, Name]]
A validated dictionary of metadata.
"""
# get source date from SOURCE_DATE_EPOCH, if set
# See https://reproducible-builds.org/specs/source-date-epoch/
source_date_epoch = os.getenv("SOURCE_DATE_EPOCH")
if source_date_epoch:
source_date = datetime.fromtimestamp(int(source_date_epoch), timezone.utc)
source_date = source_date.replace(tzinfo=UTC)
else:
source_date = datetime.today()
info = {
'Creator': f'Matplotlib v{mpl.__version__}, https://matplotlib.org',
'Producer': f'Matplotlib {backend} backend v{mpl.__version__}',
'CreationDate': source_date,
**metadata
}
info = {k: v for (k, v) in info.items() if v is not None}
def is_string_like(x):
return isinstance(x, str)
is_string_like.text_for_warning = "an instance of str"
def is_date(x):
return isinstance(x, datetime)
is_date.text_for_warning = "an instance of datetime.datetime"
def check_trapped(x):
if isinstance(x, Name):
return x.name in (b'True', b'False', b'Unknown')
else:
return x in ('True', 'False', 'Unknown')
check_trapped.text_for_warning = 'one of {"True", "False", "Unknown"}'
keywords = {
'Title': is_string_like,
'Author': is_string_like,
'Subject': is_string_like,
'Keywords': is_string_like,
'Creator': is_string_like,
'Producer': is_string_like,
'CreationDate': is_date,
'ModDate': is_date,
'Trapped': check_trapped,
}
for k in info:
if k not in keywords:
_api.warn_external(f'Unknown infodict keyword: {k!r}. '
f'Must be one of {set(keywords)!r}.')
elif not keywords[k](info[k]):
_api.warn_external(f'Bad value for infodict keyword {k}. '
f'Got {info[k]!r} which is not '
f'{keywords[k].text_for_warning}.')
if 'Trapped' in info:
info['Trapped'] = Name(info['Trapped'])
return info
def _datetime_to_pdf(d):
"""
Convert a datetime to a PDF string representing it.
Used for PDF and PGF.
"""
r = d.strftime('D:%Y%m%d%H%M%S')
z = d.utcoffset()
if z is not None:
z = z.seconds
else:
if time.daylight:
z = time.altzone
else:
z = time.timezone
if z == 0:
r += 'Z'
elif z < 0:
r += "+%02d'%02d'" % ((-z) // 3600, (-z) % 3600)
else:
r += "-%02d'%02d'" % (z // 3600, z % 3600)
return r
def _calculate_quad_point_coordinates(x, y, width, height, angle=0):
"""
Calculate the coordinates of rectangle when rotated by angle around x, y
"""
angle = math.radians(-angle)
sin_angle = math.sin(angle)
cos_angle = math.cos(angle)
a = x + height * sin_angle
b = y + height * cos_angle
c = x + width * cos_angle + height * sin_angle
d = y - width * sin_angle + height * cos_angle
e = x + width * cos_angle
f = y - width * sin_angle
return ((x, y), (e, f), (c, d), (a, b))
def _get_coordinates_of_block(x, y, width, height, angle=0):
"""
Get the coordinates of rotated rectangle and rectangle that covers the
rotated rectangle.
"""
vertices = _calculate_quad_point_coordinates(x, y, width,
height, angle)
# Find min and max values for rectangle
# adjust so that QuadPoints is inside Rect
# PDF docs says that QuadPoints should be ignored if any point lies
# outside Rect, but for Acrobat it is enough that QuadPoints is on the
# border of Rect.
pad = 0.00001 if angle % 90 else 0
min_x = min(v[0] for v in vertices) - pad
min_y = min(v[1] for v in vertices) - pad
max_x = max(v[0] for v in vertices) + pad
max_y = max(v[1] for v in vertices) + pad
return (tuple(itertools.chain.from_iterable(vertices)),
(min_x, min_y, max_x, max_y))
def _get_link_annotation(gc, x, y, width, height, angle=0):
"""
Create a link annotation object for embedding URLs.
"""
quadpoints, rect = _get_coordinates_of_block(x, y, width, height, angle)
link_annotation = {
'Type': Name('Annot'),
'Subtype': Name('Link'),
'Rect': rect,
'Border': [0, 0, 0],
'A': {
'S': Name('URI'),
'URI': gc.get_url(),
},
}
if angle % 90:
# Add QuadPoints
link_annotation['QuadPoints'] = quadpoints
return link_annotation
# PDF strings are supposed to be able to include any eight-bit data, except
# that unbalanced parens and backslashes must be escaped by a backslash.
# However, sf bug #2708559 shows that the carriage return character may get
# read as a newline; these characters correspond to \gamma and \Omega in TeX's
# math font encoding. Escaping them fixes the bug.
_str_escapes = str.maketrans({
'\\': '\\\\', '(': '\\(', ')': '\\)', '\n': '\\n', '\r': '\\r'})
def pdfRepr(obj):
"""Map Python objects to PDF syntax."""
# Some objects defined later have their own pdfRepr method.
if hasattr(obj, 'pdfRepr'):
return obj.pdfRepr()
# Floats. PDF does not have exponential notation (1.0e-10) so we
# need to use %f with some precision. Perhaps the precision
# should adapt to the magnitude of the number?
elif isinstance(obj, (float, np.floating)):
if not np.isfinite(obj):
raise ValueError("Can only output finite numbers in PDF")
r = b"%.10f" % obj
return r.rstrip(b'0').rstrip(b'.')
# Booleans. Needs to be tested before integers since
# isinstance(True, int) is true.
elif isinstance(obj, bool):
return [b'false', b'true'][obj]
# Integers are written as such.
elif isinstance(obj, (int, np.integer)):
return b"%d" % obj
# Non-ASCII Unicode strings are encoded in UTF-16BE with byte-order mark.
elif isinstance(obj, str):
return pdfRepr(obj.encode('ascii') if obj.isascii()
else codecs.BOM_UTF16_BE + obj.encode('UTF-16BE'))
# Strings are written in parentheses, with backslashes and parens
# escaped. Actually balanced parens are allowed, but it is
# simpler to escape them all. TODO: cut long strings into lines;
# I believe there is some maximum line length in PDF.
# Despite the extra decode/encode, translate is faster than regex.
elif isinstance(obj, bytes):
return (
b'(' +
obj.decode('latin-1').translate(_str_escapes).encode('latin-1')
+ b')')
# Dictionaries. The keys must be PDF names, so if we find strings
# there, we make Name objects from them. The values may be
# anything, so the caller must ensure that PDF names are
# represented as Name objects.
elif isinstance(obj, dict):
return _fill([
b"<<",
*[Name(k).pdfRepr() + b" " + pdfRepr(v) for k, v in obj.items()],
b">>",
])
# Lists.
elif isinstance(obj, (list, tuple)):
return _fill([b"[", *[pdfRepr(val) for val in obj], b"]"])
# The null keyword.
elif obj is None:
return b'null'
# A date.
elif isinstance(obj, datetime):
return pdfRepr(_datetime_to_pdf(obj))
# A bounding box
elif isinstance(obj, BboxBase):
return _fill([pdfRepr(val) for val in obj.bounds])
else:
raise TypeError(f"Don't know a PDF representation for {type(obj)} "
"objects")
def _font_supports_glyph(fonttype, glyph):
"""
Returns True if the font is able to provide codepoint *glyph* in a PDF.
For a Type 3 font, this method returns True only for single-byte
characters. For Type 42 fonts this method return True if the character is
from the Basic Multilingual Plane.
"""
if fonttype == 3:
return glyph <= 255
if fonttype == 42:
return glyph <= 65535
raise NotImplementedError()
class Reference:
"""
PDF reference object.
Use PdfFile.reserveObject() to create References.
"""
def __init__(self, id):
self.id = id
def __repr__(self):
return "<Reference %d>" % self.id
def pdfRepr(self):
return b"%d 0 R" % self.id
def write(self, contents, file):
write = file.write
write(b"%d 0 obj\n" % self.id)
write(pdfRepr(contents))
write(b"\nendobj\n")
@total_ordering
class Name:
"""PDF name object."""
__slots__ = ('name',)
_hexify = {c: '#%02x' % c
for c in {*range(256)} - {*range(ord('!'), ord('~') + 1)}}
def __init__(self, name):
if isinstance(name, Name):
self.name = name.name
else:
if isinstance(name, bytes):
name = name.decode('ascii')
self.name = name.translate(self._hexify).encode('ascii')
def __repr__(self):
return "<Name %s>" % self.name
def __str__(self):
return '/' + self.name.decode('ascii')
def __eq__(self, other):
return isinstance(other, Name) and self.name == other.name
def __lt__(self, other):
return isinstance(other, Name) and self.name < other.name
def __hash__(self):
return hash(self.name)
def pdfRepr(self):
return b'/' + self.name
class Verbatim:
"""Store verbatim PDF command content for later inclusion in the stream."""
def __init__(self, x):
self._x = x
def pdfRepr(self):
return self._x
class Op(Enum):
"""PDF operators (not an exhaustive list)."""
close_fill_stroke = b'b'
fill_stroke = b'B'
fill = b'f'
closepath = b'h'
close_stroke = b's'
stroke = b'S'
endpath = b'n'
begin_text = b'BT'
end_text = b'ET'
curveto = b'c'
rectangle = b're'
lineto = b'l'
moveto = b'm'
concat_matrix = b'cm'
use_xobject = b'Do'
setgray_stroke = b'G'
setgray_nonstroke = b'g'
setrgb_stroke = b'RG'
setrgb_nonstroke = b'rg'
setcolorspace_stroke = b'CS'
setcolorspace_nonstroke = b'cs'
setcolor_stroke = b'SCN'
setcolor_nonstroke = b'scn'
setdash = b'd'
setlinejoin = b'j'
setlinecap = b'J'
setgstate = b'gs'
gsave = b'q'
grestore = b'Q'
textpos = b'Td'
selectfont = b'Tf'
textmatrix = b'Tm'
show = b'Tj'
showkern = b'TJ'
setlinewidth = b'w'
clip = b'W'
shading = b'sh'
def pdfRepr(self):
return self.value
@classmethod
def paint_path(cls, fill, stroke):
"""
Return the PDF operator to paint a path.
Parameters
----------
fill : bool
Fill the path with the fill color.
stroke : bool
Stroke the outline of the path with the line color.
"""
if stroke:
if fill:
return cls.fill_stroke
else:
return cls.stroke
else:
if fill:
return cls.fill
else:
return cls.endpath
class Stream:
"""
PDF stream object.
This has no pdfRepr method. Instead, call begin(), then output the
contents of the stream by calling write(), and finally call end().
"""
__slots__ = ('id', 'len', 'pdfFile', 'file', 'compressobj', 'extra', 'pos')
def __init__(self, id, len, file, extra=None, png=None):
"""
Parameters
----------
id : int
Object id of the stream.
len : Reference or None
An unused Reference object for the length of the stream;
None means to use a memory buffer so the length can be inlined.
file : PdfFile
The underlying object to write the stream to.
extra : dict from Name to anything, or None
Extra key-value pairs to include in the stream header.
png : dict or None
If the data is already png encoded, the decode parameters.
"""
self.id = id # object id
self.len = len # id of length object
self.pdfFile = file
self.file = file.fh # file to which the stream is written
self.compressobj = None # compression object
if extra is None:
self.extra = dict()
else:
self.extra = extra.copy()
if png is not None:
self.extra.update({'Filter': Name('FlateDecode'),
'DecodeParms': png})
self.pdfFile.recordXref(self.id)
if mpl.rcParams['pdf.compression'] and not png:
self.compressobj = zlib.compressobj(
mpl.rcParams['pdf.compression'])
if self.len is None:
self.file = BytesIO()
else:
self._writeHeader()
self.pos = self.file.tell()
def _writeHeader(self):
write = self.file.write
write(b"%d 0 obj\n" % self.id)
dict = self.extra
dict['Length'] = self.len
if mpl.rcParams['pdf.compression']:
dict['Filter'] = Name('FlateDecode')
write(pdfRepr(dict))
write(b"\nstream\n")
def end(self):
"""Finalize stream."""
self._flush()
if self.len is None:
contents = self.file.getvalue()
self.len = len(contents)
self.file = self.pdfFile.fh
self._writeHeader()
self.file.write(contents)
self.file.write(b"\nendstream\nendobj\n")
else:
length = self.file.tell() - self.pos
self.file.write(b"\nendstream\nendobj\n")
self.pdfFile.writeObject(self.len, length)
def write(self, data):
"""Write some data on the stream."""
if self.compressobj is None:
self.file.write(data)
else:
compressed = self.compressobj.compress(data)
self.file.write(compressed)
def _flush(self):
"""Flush the compression object."""
if self.compressobj is not None:
compressed = self.compressobj.flush()
self.file.write(compressed)
self.compressobj = None
def _get_pdf_charprocs(font_path, glyph_ids):
font = get_font(font_path, hinting_factor=1)
conv = 1000 / font.units_per_EM # Conversion to PS units (1/1000's).
procs = {}
for glyph_id in glyph_ids:
g = font.load_glyph(glyph_id, LoadFlags.NO_SCALE)
# NOTE: We should be using round(), but instead use
# "(x+.5).astype(int)" to keep backcompat with the old ttconv code
# (this is different for negative x's).
d1 = (np.array([g.horiAdvance, 0, *g.bbox]) * conv + .5).astype(int)
v, c = font.get_path()
v = (v * 64).astype(int) # Back to TrueType's internal units (1/64's).
# Backcompat with old ttconv code: control points between two quads are
# omitted if they are exactly at the midpoint between the control of
# the quad before and the quad after, but ttconv used to interpolate
# *after* conversion to PS units, causing floating point errors. Here
# we reproduce ttconv's logic, detecting these "implicit" points and
# re-interpolating them. Note that occasionally (e.g. with DejaVu Sans
# glyph "0") a point detected as "implicit" is actually explicit, and
# will thus be shifted by 1.
quads, = np.nonzero(c == 3)
quads_on = quads[1::2]
quads_mid_on = np.array(
sorted({*quads_on} & {*(quads - 1)} & {*(quads + 1)}), int)
implicit = quads_mid_on[
(v[quads_mid_on] # As above, use astype(int), not // division
== ((v[quads_mid_on - 1] + v[quads_mid_on + 1]) / 2).astype(int))
.all(axis=1)]
if (font.postscript_name, glyph_id) in [
("DejaVuSerif-Italic", 77), # j
("DejaVuSerif-Italic", 135), # \AA
]:
v[:, 0] -= 1 # Hard-coded backcompat (FreeType shifts glyph by 1).
v = (v * conv + .5).astype(int) # As above re: truncation vs rounding.
v[implicit] = (( # Fix implicit points; again, truncate.
(v[implicit - 1] + v[implicit + 1]) / 2).astype(int))
procs[font.get_glyph_name(glyph_id)] = (
" ".join(map(str, d1)).encode("ascii") + b" d1\n"
+ _path.convert_to_string(
Path(v, c), None, None, False, None, -1,
# no code for quad Beziers triggers auto-conversion to cubics.
[b"m", b"l", b"", b"c", b"h"], True)
+ b"f")
return procs
class PdfFile:
"""PDF file object."""
def __init__(self, filename, metadata=None):
"""
Parameters
----------
filename : str or path-like or file-like
Output target; if a string, a file will be opened for writing.
metadata : dict from strings to strings and dates
Information dictionary object (see PDF reference section 10.2.1
'Document Information Dictionary'), e.g.:
``{'Creator': 'My software', 'Author': 'Me', 'Title': 'Awesome'}``.
The standard keys are 'Title', 'Author', 'Subject', 'Keywords',
'Creator', 'Producer', 'CreationDate', 'ModDate', and
'Trapped'. Values have been predefined for 'Creator', 'Producer'
and 'CreationDate'. They can be removed by setting them to `None`.
"""
super().__init__()
self._object_seq = itertools.count(1) # consumed by reserveObject
self.xrefTable = [[0, 65535, 'the zero object']]
self.passed_in_file_object = False
self.original_file_like = None
self.tell_base = 0
fh, opened = cbook.to_filehandle(filename, "wb", return_opened=True)
if not opened:
try:
self.tell_base = filename.tell()
except OSError:
fh = BytesIO()
self.original_file_like = filename
else:
fh = filename
self.passed_in_file_object = True
self.fh = fh
self.currentstream = None # stream object to write to, if any
fh.write(b"%PDF-1.4\n") # 1.4 is the first version to have alpha
# Output some eight-bit chars as a comment so various utilities
# recognize the file as binary by looking at the first few
# lines (see note in section 3.4.1 of the PDF reference).
fh.write(b"%\254\334 \253\272\n")
self.rootObject = self.reserveObject('root')
self.pagesObject = self.reserveObject('pages')
self.pageList = []
self.fontObject = self.reserveObject('fonts')
self._extGStateObject = self.reserveObject('extended graphics states')
self.hatchObject = self.reserveObject('tiling patterns')
self.gouraudObject = self.reserveObject('Gouraud triangles')
self.XObjectObject = self.reserveObject('external objects')
self.resourceObject = self.reserveObject('resources')
root = {'Type': Name('Catalog'),
'Pages': self.pagesObject}
self.writeObject(self.rootObject, root)
self.infoDict = _create_pdf_info_dict('pdf', metadata or {})
self.fontNames = {} # maps filenames to internal font names
self._internal_font_seq = (Name(f'F{i}') for i in itertools.count(1))
self.dviFontInfo = {} # maps dvi font names to embedding information
# differently encoded Type-1 fonts may share the same descriptor
self.type1Descriptors = {}
self._character_tracker = _backend_pdf_ps.CharacterTracker()
self.alphaStates = {} # maps alpha values to graphics state objects
self._alpha_state_seq = (Name(f'A{i}') for i in itertools.count(1))
self._soft_mask_states = {}
self._soft_mask_seq = (Name(f'SM{i}') for i in itertools.count(1))
self._soft_mask_groups = []
self._hatch_patterns = {}
self._hatch_pattern_seq = (Name(f'H{i}') for i in itertools.count(1))
self.gouraudTriangles = []
self._images = {}
self._image_seq = (Name(f'I{i}') for i in itertools.count(1))
self.markers = {}
self.multi_byte_charprocs = {}
self.paths = []
# A list of annotations for each page. Each entry is a tuple of the
# overall Annots object reference that's inserted into the page object,
# followed by a list of the actual annotations.
self._annotations = []
# For annotations added before a page is created; mostly for the
# purpose of newTextnote.
self.pageAnnotations = []
# The PDF spec recommends to include every procset
procsets = [Name(x) for x in "PDF Text ImageB ImageC ImageI".split()]
# Write resource dictionary.
# Possibly TODO: more general ExtGState (graphics state dictionaries)
# ColorSpace Pattern Shading Properties
resources = {'Font': self.fontObject,
'XObject': self.XObjectObject,
'ExtGState': self._extGStateObject,
'Pattern': self.hatchObject,
'Shading': self.gouraudObject,
'ProcSet': procsets}
self.writeObject(self.resourceObject, resources)
def newPage(self, width, height):
self.endStream()
self.width, self.height = width, height
contentObject = self.reserveObject('page contents')
annotsObject = self.reserveObject('annotations')
thePage = {'Type': Name('Page'),
'Parent': self.pagesObject,
'Resources': self.resourceObject,
'MediaBox': [0, 0, 72 * width, 72 * height],
'Contents': contentObject,
'Annots': annotsObject,
}
pageObject = self.reserveObject('page')
self.writeObject(pageObject, thePage)
self.pageList.append(pageObject)
self._annotations.append((annotsObject, self.pageAnnotations))
self.beginStream(contentObject.id,
self.reserveObject('length of content stream'))
# Initialize the pdf graphics state to match the default Matplotlib
# graphics context (colorspace and joinstyle).
self.output(Name('DeviceRGB'), Op.setcolorspace_stroke)
self.output(Name('DeviceRGB'), Op.setcolorspace_nonstroke)
self.output(GraphicsContextPdf.joinstyles['round'], Op.setlinejoin)
# Clear the list of annotations for the next page
self.pageAnnotations = []
def newTextnote(self, text, positionRect=[-100, -100, 0, 0]):
# Create a new annotation of type text
theNote = {'Type': Name('Annot'),
'Subtype': Name('Text'),
'Contents': text,
'Rect': positionRect,
}
self.pageAnnotations.append(theNote)
def _get_subsetted_psname(self, ps_name, charmap):
def toStr(n, base):
if n < base:
return string.ascii_uppercase[n]
else:
return (
toStr(n // base, base) + string.ascii_uppercase[n % base]
)
# encode to string using base 26
hashed = hash(frozenset(charmap.keys())) % ((sys.maxsize + 1) * 2)
prefix = toStr(hashed, 26)
# get first 6 characters from prefix
return prefix[:6] + "+" + ps_name
def finalize(self):
"""Write out the various deferred objects and the pdf end matter."""
self.endStream()
self._write_annotations()
self.writeFonts()
self.writeExtGSTates()
self._write_soft_mask_groups()
self.writeHatches()
self.writeGouraudTriangles()
xobjects = {
name: ob for image, name, ob in self._images.values()}
for tup in self.markers.values():
xobjects[tup[0]] = tup[1]
for name, value in self.multi_byte_charprocs.items():
xobjects[name] = value
for name, path, trans, ob, join, cap, padding, filled, stroked \
in self.paths:
xobjects[name] = ob
self.writeObject(self.XObjectObject, xobjects)
self.writeImages()
self.writeMarkers()
self.writePathCollectionTemplates()
self.writeObject(self.pagesObject,
{'Type': Name('Pages'),
'Kids': self.pageList,
'Count': len(self.pageList)})
self.writeInfoDict()
# Finalize the file
self.writeXref()
self.writeTrailer()
def close(self):
"""Flush all buffers and free all resources."""
self.endStream()
if self.passed_in_file_object:
self.fh.flush()
else:
if self.original_file_like is not None:
self.original_file_like.write(self.fh.getvalue())
self.fh.close()
def write(self, data):
if self.currentstream is None:
self.fh.write(data)
else:
self.currentstream.write(data)
def output(self, *data):
self.write(_fill([pdfRepr(x) for x in data]))
self.write(b'\n')
def beginStream(self, id, len, extra=None, png=None):
assert self.currentstream is None
self.currentstream = Stream(id, len, self, extra, png)
def endStream(self):
if self.currentstream is not None:
self.currentstream.end()
self.currentstream = None
def outputStream(self, ref, data, *, extra=None):
self.beginStream(ref.id, None, extra)
self.currentstream.write(data)
self.endStream()
def _write_annotations(self):
for annotsObject, annotations in self._annotations:
self.writeObject(annotsObject, annotations)
def fontName(self, fontprop):
"""
Select a font based on fontprop and return a name suitable for
Op.selectfont. If fontprop is a string, it will be interpreted
as the filename of the font.
"""
if isinstance(fontprop, str):
filenames = [fontprop]
elif mpl.rcParams['pdf.use14corefonts']:
filenames = _fontManager._find_fonts_by_props(
fontprop, fontext='afm', directory=RendererPdf._afm_font_dir
)
else:
filenames = _fontManager._find_fonts_by_props(fontprop)
first_Fx = None
for fname in filenames:
Fx = self.fontNames.get(fname)
if not first_Fx:
first_Fx = Fx
if Fx is None:
Fx = next(self._internal_font_seq)
self.fontNames[fname] = Fx
_log.debug('Assigning font %s = %r', Fx, fname)
if not first_Fx:
first_Fx = Fx
# find_fontsprop's first value always adheres to
# findfont's value, so technically no behaviour change
return first_Fx
def dviFontName(self, dvifont):
"""
Given a dvi font object, return a name suitable for Op.selectfont.
This registers the font information in ``self.dviFontInfo`` if not yet
registered.
"""
dvi_info = self.dviFontInfo.get(dvifont.texname)
if dvi_info is not None:
return dvi_info.pdfname
tex_font_map = dviread.PsfontsMap(dviread.find_tex_file('pdftex.map'))
psfont = tex_font_map[dvifont.texname]
if psfont.filename is None:
raise ValueError(
"No usable font file found for {} (TeX: {}); "
"the font may lack a Type-1 version"
.format(psfont.psname, dvifont.texname))
pdfname = next(self._internal_font_seq)
_log.debug('Assigning font %s = %s (dvi)', pdfname, dvifont.texname)
self.dviFontInfo[dvifont.texname] = types.SimpleNamespace(
dvifont=dvifont,
pdfname=pdfname,
fontfile=psfont.filename,
basefont=psfont.psname,
encodingfile=psfont.encoding,
effects=psfont.effects)
return pdfname
def writeFonts(self):
fonts = {}
for dviname, info in sorted(self.dviFontInfo.items()):
Fx = info.pdfname
_log.debug('Embedding Type-1 font %s from dvi.', dviname)
fonts[Fx] = self._embedTeXFont(info)
for filename in sorted(self.fontNames):
Fx = self.fontNames[filename]
_log.debug('Embedding font %s.', filename)
if filename.endswith('.afm'):
# from pdf.use14corefonts
_log.debug('Writing AFM font.')
fonts[Fx] = self._write_afm_font(filename)
else:
# a normal TrueType font
_log.debug('Writing TrueType font.')
chars = self._character_tracker.used.get(filename)
if chars:
fonts[Fx] = self.embedTTF(filename, chars)
self.writeObject(self.fontObject, fonts)
def _write_afm_font(self, filename):
with open(filename, 'rb') as fh:
font = AFM(fh)
fontname = font.get_fontname()
fontdict = {'Type': Name('Font'),
'Subtype': Name('Type1'),
'BaseFont': Name(fontname),
'Encoding': Name('WinAnsiEncoding')}
fontdictObject = self.reserveObject('font dictionary')
self.writeObject(fontdictObject, fontdict)
return fontdictObject
def _embedTeXFont(self, fontinfo):
_log.debug('Embedding TeX font %s - fontinfo=%s',
fontinfo.dvifont.texname, fontinfo.__dict__)
# Widths
widthsObject = self.reserveObject('font widths')
self.writeObject(widthsObject, fontinfo.dvifont.widths)
# Font dictionary
fontdictObject = self.reserveObject('font dictionary')
fontdict = {
'Type': Name('Font'),
'Subtype': Name('Type1'),
'FirstChar': 0,
'LastChar': len(fontinfo.dvifont.widths) - 1,
'Widths': widthsObject,
}
# Encoding (if needed)
if fontinfo.encodingfile is not None:
fontdict['Encoding'] = {
'Type': Name('Encoding'),
'Differences': [
0, *map(Name, dviread._parse_enc(fontinfo.encodingfile))],
}
# If no file is specified, stop short
if fontinfo.fontfile is None:
_log.warning(
"Because of TeX configuration (pdftex.map, see updmap option "
"pdftexDownloadBase14) the font %s is not embedded. This is "
"deprecated as of PDF 1.5 and it may cause the consumer "
"application to show something that was not intended.",
fontinfo.basefont)
fontdict['BaseFont'] = Name(fontinfo.basefont)
self.writeObject(fontdictObject, fontdict)
return fontdictObject
# We have a font file to embed - read it in and apply any effects
t1font = _type1font.Type1Font(fontinfo.fontfile)
if fontinfo.effects:
t1font = t1font.transform(fontinfo.effects)
fontdict['BaseFont'] = Name(t1font.prop['FontName'])
# Font descriptors may be shared between differently encoded
# Type-1 fonts, so only create a new descriptor if there is no
# existing descriptor for this font.
effects = (fontinfo.effects.get('slant', 0.0),
fontinfo.effects.get('extend', 1.0))
fontdesc = self.type1Descriptors.get((fontinfo.fontfile, effects))
if fontdesc is None:
fontdesc = self.createType1Descriptor(t1font, fontinfo.fontfile)
self.type1Descriptors[(fontinfo.fontfile, effects)] = fontdesc
fontdict['FontDescriptor'] = fontdesc
self.writeObject(fontdictObject, fontdict)
return fontdictObject
def createType1Descriptor(self, t1font, fontfile):
# Create and write the font descriptor and the font file
# of a Type-1 font
fontdescObject = self.reserveObject('font descriptor')
fontfileObject = self.reserveObject('font file')
italic_angle = t1font.prop['ItalicAngle']
fixed_pitch = t1font.prop['isFixedPitch']
flags = 0
# fixed width
if fixed_pitch:
flags |= 1 << 0
# TODO: serif
if 0:
flags |= 1 << 1
# TODO: symbolic (most TeX fonts are)
if 1:
flags |= 1 << 2
# non-symbolic
else:
flags |= 1 << 5
# italic
if italic_angle:
flags |= 1 << 6
# TODO: all caps
if 0:
flags |= 1 << 16
# TODO: small caps
if 0:
flags |= 1 << 17
# TODO: force bold
if 0:
flags |= 1 << 18
ft2font = get_font(fontfile)
descriptor = {
'Type': Name('FontDescriptor'),
'FontName': Name(t1font.prop['FontName']),
'Flags': flags,
'FontBBox': ft2font.bbox,
'ItalicAngle': italic_angle,
'Ascent': ft2font.ascender,
'Descent': ft2font.descender,
'CapHeight': 1000, # TODO: find this out
'XHeight': 500, # TODO: this one too
'FontFile': fontfileObject,
'FontFamily': t1font.prop['FamilyName'],
'StemV': 50, # TODO
# (see also revision 3874; but not all TeX distros have AFM files!)
# 'FontWeight': a number where 400 = Regular, 700 = Bold
}
self.writeObject(fontdescObject, descriptor)
self.outputStream(fontfileObject, b"".join(t1font.parts[:2]),
extra={'Length1': len(t1font.parts[0]),
'Length2': len(t1font.parts[1]),
'Length3': 0})
return fontdescObject
def _get_xobject_glyph_name(self, filename, glyph_name):
Fx = self.fontName(filename)
return "-".join([
Fx.name.decode(),
os.path.splitext(os.path.basename(filename))[0],
glyph_name])
_identityToUnicodeCMap = b"""/CIDInit /ProcSet findresource begin
12 dict begin
begincmap
/CIDSystemInfo
<< /Registry (Adobe)
/Ordering (UCS)
/Supplement 0
>> def
/CMapName /Adobe-Identity-UCS def
/CMapType 2 def
1 begincodespacerange
<0000> <ffff>
endcodespacerange
%d beginbfrange
%s
endbfrange
endcmap
CMapName currentdict /CMap defineresource pop
end
end"""
def embedTTF(self, filename, characters):
"""Embed the TTF font from the named file into the document."""
font = get_font(filename)
fonttype = mpl.rcParams['pdf.fonttype']
def cvt(length, upe=font.units_per_EM, nearest=True):
"""Convert font coordinates to PDF glyph coordinates."""
value = length / upe * 1000
if nearest:
return round(value)
# Best(?) to round away from zero for bounding boxes and the like.
if value < 0:
return math.floor(value)
else:
return math.ceil(value)
def embedTTFType3(font, characters, descriptor):
"""The Type 3-specific part of embedding a Truetype font"""
widthsObject = self.reserveObject('font widths')
fontdescObject = self.reserveObject('font descriptor')
fontdictObject = self.reserveObject('font dictionary')
charprocsObject = self.reserveObject('character procs')
differencesArray = []
firstchar, lastchar = 0, 255
bbox = [cvt(x, nearest=False) for x in font.bbox]
fontdict = {
'Type': Name('Font'),
'BaseFont': ps_name,
'FirstChar': firstchar,
'LastChar': lastchar,
'FontDescriptor': fontdescObject,
'Subtype': Name('Type3'),
'Name': descriptor['FontName'],
'FontBBox': bbox,
'FontMatrix': [.001, 0, 0, .001, 0, 0],
'CharProcs': charprocsObject,
'Encoding': {
'Type': Name('Encoding'),
'Differences': differencesArray},
'Widths': widthsObject
}
from encodings import cp1252
# Make the "Widths" array
def get_char_width(charcode):
s = ord(cp1252.decoding_table[charcode])
width = font.load_char(
s, flags=LoadFlags.NO_SCALE | LoadFlags.NO_HINTING).horiAdvance
return cvt(width)
with warnings.catch_warnings():
# Ignore 'Required glyph missing from current font' warning
# from ft2font: here we're just building the widths table, but
# the missing glyphs may not even be used in the actual string.
warnings.filterwarnings("ignore")
widths = [get_char_width(charcode)
for charcode in range(firstchar, lastchar+1)]
descriptor['MaxWidth'] = max(widths)
# Make the "Differences" array, sort the ccodes < 255 from
# the multi-byte ccodes, and build the whole set of glyph ids
# that we need from this font.
glyph_ids = []
differences = []
multi_byte_chars = set()
for c in characters:
ccode = c
gind = font.get_char_index(ccode)
glyph_ids.append(gind)
glyph_name = font.get_glyph_name(gind)
if ccode <= 255:
differences.append((ccode, glyph_name))
else:
multi_byte_chars.add(glyph_name)
differences.sort()
last_c = -2
for c, name in differences:
if c != last_c + 1:
differencesArray.append(c)
differencesArray.append(Name(name))
last_c = c
# Make the charprocs array.
rawcharprocs = _get_pdf_charprocs(filename, glyph_ids)
charprocs = {}
for charname in sorted(rawcharprocs):
stream = rawcharprocs[charname]
charprocDict = {}
# The 2-byte characters are used as XObjects, so they
# need extra info in their dictionary
if charname in multi_byte_chars:
charprocDict = {'Type': Name('XObject'),
'Subtype': Name('Form'),
'BBox': bbox}
# Each glyph includes bounding box information,
# but xpdf and ghostscript can't handle it in a
# Form XObject (they segfault!!!), so we remove it
# from the stream here. It's not needed anyway,
# since the Form XObject includes it in its BBox
# value.
stream = stream[stream.find(b"d1") + 2:]
charprocObject = self.reserveObject('charProc')
self.outputStream(charprocObject, stream, extra=charprocDict)
# Send the glyphs with ccode > 255 to the XObject dictionary,
# and the others to the font itself
if charname in multi_byte_chars:
name = self._get_xobject_glyph_name(filename, charname)
self.multi_byte_charprocs[name] = charprocObject
else:
charprocs[charname] = charprocObject
# Write everything out
self.writeObject(fontdictObject, fontdict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(widthsObject, widths)
self.writeObject(charprocsObject, charprocs)
return fontdictObject
def embedTTFType42(font, characters, descriptor):
"""The Type 42-specific part of embedding a Truetype font"""
fontdescObject = self.reserveObject('font descriptor')
cidFontDictObject = self.reserveObject('CID font dictionary')
type0FontDictObject = self.reserveObject('Type 0 font dictionary')
cidToGidMapObject = self.reserveObject('CIDToGIDMap stream')
fontfileObject = self.reserveObject('font file stream')
wObject = self.reserveObject('Type 0 widths')
toUnicodeMapObject = self.reserveObject('ToUnicode map')
subset_str = "".join(chr(c) for c in characters)
_log.debug("SUBSET %s characters: %s", filename, subset_str)
with _backend_pdf_ps.get_glyphs_subset(filename, subset_str) as subset:
fontdata = _backend_pdf_ps.font_as_file(subset)
_log.debug(
"SUBSET %s %d -> %d", filename,
os.stat(filename).st_size, fontdata.getbuffer().nbytes
)
# We need this ref for XObjects
full_font = font
# reload the font object from the subset
# (all the necessary data could probably be obtained directly
# using fontLib.ttLib)
font = FT2Font(fontdata)
cidFontDict = {
'Type': Name('Font'),
'Subtype': Name('CIDFontType2'),
'BaseFont': ps_name,
'CIDSystemInfo': {
'Registry': 'Adobe',
'Ordering': 'Identity',
'Supplement': 0},
'FontDescriptor': fontdescObject,
'W': wObject,
'CIDToGIDMap': cidToGidMapObject
}
type0FontDict = {
'Type': Name('Font'),
'Subtype': Name('Type0'),
'BaseFont': ps_name,
'Encoding': Name('Identity-H'),
'DescendantFonts': [cidFontDictObject],
'ToUnicode': toUnicodeMapObject
}
# Make fontfile stream
descriptor['FontFile2'] = fontfileObject
self.outputStream(
fontfileObject, fontdata.getvalue(),
extra={'Length1': fontdata.getbuffer().nbytes})
# Make the 'W' (Widths) array, CidToGidMap and ToUnicode CMap
# at the same time
cid_to_gid_map = ['\0'] * 65536
widths = []
max_ccode = 0
for c in characters:
ccode = c
gind = font.get_char_index(ccode)
glyph = font.load_char(ccode,
flags=LoadFlags.NO_SCALE | LoadFlags.NO_HINTING)
widths.append((ccode, cvt(glyph.horiAdvance)))
if ccode < 65536:
cid_to_gid_map[ccode] = chr(gind)
max_ccode = max(ccode, max_ccode)
widths.sort()
cid_to_gid_map = cid_to_gid_map[:max_ccode + 1]
last_ccode = -2
w = []
max_width = 0
unicode_groups = []
for ccode, width in widths:
if ccode != last_ccode + 1:
w.append(ccode)
w.append([width])
unicode_groups.append([ccode, ccode])
else:
w[-1].append(width)
unicode_groups[-1][1] = ccode
max_width = max(max_width, width)
last_ccode = ccode
unicode_bfrange = []
for start, end in unicode_groups:
# Ensure the CID map contains only chars from BMP
if start > 65535:
continue
end = min(65535, end)
unicode_bfrange.append(
b"<%04x> <%04x> [%s]" %
(start, end,
b" ".join(b"<%04x>" % x for x in range(start, end+1))))
unicode_cmap = (self._identityToUnicodeCMap %
(len(unicode_groups), b"\n".join(unicode_bfrange)))
# Add XObjects for unsupported chars
glyph_ids = []
for ccode in characters:
if not _font_supports_glyph(fonttype, ccode):
gind = full_font.get_char_index(ccode)
glyph_ids.append(gind)
bbox = [cvt(x, nearest=False) for x in full_font.bbox]
rawcharprocs = _get_pdf_charprocs(filename, glyph_ids)
for charname in sorted(rawcharprocs):
stream = rawcharprocs[charname]
charprocDict = {'Type': Name('XObject'),
'Subtype': Name('Form'),
'BBox': bbox}
# Each glyph includes bounding box information,
# but xpdf and ghostscript can't handle it in a
# Form XObject (they segfault!!!), so we remove it
# from the stream here. It's not needed anyway,
# since the Form XObject includes it in its BBox
# value.
stream = stream[stream.find(b"d1") + 2:]
charprocObject = self.reserveObject('charProc')
self.outputStream(charprocObject, stream, extra=charprocDict)
name = self._get_xobject_glyph_name(filename, charname)
self.multi_byte_charprocs[name] = charprocObject
# CIDToGIDMap stream
cid_to_gid_map = "".join(cid_to_gid_map).encode("utf-16be")
self.outputStream(cidToGidMapObject, cid_to_gid_map)
# ToUnicode CMap
self.outputStream(toUnicodeMapObject, unicode_cmap)
descriptor['MaxWidth'] = max_width
# Write everything out
self.writeObject(cidFontDictObject, cidFontDict)
self.writeObject(type0FontDictObject, type0FontDict)
self.writeObject(fontdescObject, descriptor)
self.writeObject(wObject, w)
return type0FontDictObject
# Beginning of main embedTTF function...
ps_name = self._get_subsetted_psname(
font.postscript_name,
font.get_charmap()
)
ps_name = ps_name.encode('ascii', 'replace')
ps_name = Name(ps_name)
pclt = font.get_sfnt_table('pclt') or {'capHeight': 0, 'xHeight': 0}
post = font.get_sfnt_table('post') or {'italicAngle': (0, 0)}
ff = font.face_flags
sf = font.style_flags
flags = 0
symbolic = False # ps_name.name in ('Cmsy10', 'Cmmi10', 'Cmex10')
if FaceFlags.FIXED_WIDTH in ff:
flags |= 1 << 0
if 0: # TODO: serif
flags |= 1 << 1
if symbolic:
flags |= 1 << 2
else:
flags |= 1 << 5
if StyleFlags.ITALIC in sf:
flags |= 1 << 6
if 0: # TODO: all caps
flags |= 1 << 16
if 0: # TODO: small caps
flags |= 1 << 17
if 0: # TODO: force bold
flags |= 1 << 18
descriptor = {
'Type': Name('FontDescriptor'),
'FontName': ps_name,
'Flags': flags,
'FontBBox': [cvt(x, nearest=False) for x in font.bbox],
'Ascent': cvt(font.ascender, nearest=False),
'Descent': cvt(font.descender, nearest=False),
'CapHeight': cvt(pclt['capHeight'], nearest=False),
'XHeight': cvt(pclt['xHeight']),
'ItalicAngle': post['italicAngle'][1], # ???
'StemV': 0 # ???
}
if fonttype == 3:
return embedTTFType3(font, characters, descriptor)
elif fonttype == 42:
return embedTTFType42(font, characters, descriptor)
def alphaState(self, alpha):
"""Return name of an ExtGState that sets alpha to the given value."""
state = self.alphaStates.get(alpha, None)
if state is not None:
return state[0]
name = next(self._alpha_state_seq)
self.alphaStates[alpha] = \
(name, {'Type': Name('ExtGState'),
'CA': alpha[0], 'ca': alpha[1]})
return name
def _soft_mask_state(self, smask):
"""
Return an ExtGState that sets the soft mask to the given shading.
Parameters
----------
smask : Reference
Reference to a shading in DeviceGray color space, whose luminosity
is to be used as the alpha channel.
Returns
-------
Name
"""
state = self._soft_mask_states.get(smask, None)
if state is not None:
return state[0]
name = next(self._soft_mask_seq)
groupOb = self.reserveObject('transparency group for soft mask')
self._soft_mask_states[smask] = (
name,
{
'Type': Name('ExtGState'),
'AIS': False,
'SMask': {
'Type': Name('Mask'),
'S': Name('Luminosity'),
'BC': [1],
'G': groupOb
}
}
)
self._soft_mask_groups.append((
groupOb,
{
'Type': Name('XObject'),
'Subtype': Name('Form'),
'FormType': 1,
'Group': {
'S': Name('Transparency'),
'CS': Name('DeviceGray')
},
'Matrix': [1, 0, 0, 1, 0, 0],
'Resources': {'Shading': {'S': smask}},
'BBox': [0, 0, 1, 1]
},
[Name('S'), Op.shading]
))
return name
def writeExtGSTates(self):
self.writeObject(
self._extGStateObject,
dict([
*self.alphaStates.values(),
*self._soft_mask_states.values()
])
)
def _write_soft_mask_groups(self):
for ob, attributes, content in self._soft_mask_groups:
self.beginStream(ob.id, None, attributes)
self.output(*content)
self.endStream()
def hatchPattern(self, hatch_style):
# The colors may come in as numpy arrays, which aren't hashable
edge, face, hatch, lw = hatch_style
if edge is not None:
edge = tuple(edge)
if face is not None:
face = tuple(face)
hatch_style = (edge, face, hatch, lw)
pattern = self._hatch_patterns.get(hatch_style, None)
if pattern is not None:
return pattern
name = next(self._hatch_pattern_seq)
self._hatch_patterns[hatch_style] = name
return name
hatchPatterns = _api.deprecated("3.10")(property(lambda self: {
k: (e, f, h) for k, (e, f, h, l) in self._hatch_patterns.items()
}))
def writeHatches(self):
hatchDict = dict()
sidelen = 72.0
for hatch_style, name in self._hatch_patterns.items():
ob = self.reserveObject('hatch pattern')
hatchDict[name] = ob
res = {'Procsets':
[Name(x) for x in "PDF Text ImageB ImageC ImageI".split()]}
self.beginStream(
ob.id, None,
{'Type': Name('Pattern'),
'PatternType': 1, 'PaintType': 1, 'TilingType': 1,
'BBox': [0, 0, sidelen, sidelen],
'XStep': sidelen, 'YStep': sidelen,
'Resources': res,
# Change origin to match Agg at top-left.
'Matrix': [1, 0, 0, 1, 0, self.height * 72]})
stroke_rgb, fill_rgb, hatch, lw = hatch_style
self.output(stroke_rgb[0], stroke_rgb[1], stroke_rgb[2],
Op.setrgb_stroke)
if fill_rgb is not None:
self.output(fill_rgb[0], fill_rgb[1], fill_rgb[2],
Op.setrgb_nonstroke,
0, 0, sidelen, sidelen, Op.rectangle,
Op.fill)
self.output(lw, Op.setlinewidth)
self.output(*self.pathOperations(
Path.hatch(hatch),
Affine2D().scale(sidelen),
simplify=False))
self.output(Op.fill_stroke)
self.endStream()
self.writeObject(self.hatchObject, hatchDict)
def addGouraudTriangles(self, points, colors):
"""
Add a Gouraud triangle shading.
Parameters
----------
points : np.ndarray
Triangle vertices, shape (n, 3, 2)
where n = number of triangles, 3 = vertices, 2 = x, y.
colors : np.ndarray
Vertex colors, shape (n, 3, 1) or (n, 3, 4)
as with points, but last dimension is either (gray,)
or (r, g, b, alpha).
Returns
-------
Name, Reference
"""
name = Name('GT%d' % len(self.gouraudTriangles))
ob = self.reserveObject(f'Gouraud triangle {name}')
self.gouraudTriangles.append((name, ob, points, colors))
return name, ob
def writeGouraudTriangles(self):
gouraudDict = dict()
for name, ob, points, colors in self.gouraudTriangles:
gouraudDict[name] = ob
shape = points.shape
flat_points = points.reshape((shape[0] * shape[1], 2))
colordim = colors.shape[2]
assert colordim in (1, 4)
flat_colors = colors.reshape((shape[0] * shape[1], colordim))
if colordim == 4:
# strip the alpha channel
colordim = 3
points_min = np.min(flat_points, axis=0) - (1 << 8)
points_max = np.max(flat_points, axis=0) + (1 << 8)
factor = 0xffffffff / (points_max - points_min)
self.beginStream(
ob.id, None,
{'ShadingType': 4,
'BitsPerCoordinate': 32,
'BitsPerComponent': 8,
'BitsPerFlag': 8,
'ColorSpace': Name(
'DeviceRGB' if colordim == 3 else 'DeviceGray'
),
'AntiAlias': False,
'Decode': ([points_min[0], points_max[0],
points_min[1], points_max[1]]
+ [0, 1] * colordim),
})
streamarr = np.empty(
(shape[0] * shape[1],),
dtype=[('flags', 'u1'),
('points', '>u4', (2,)),
('colors', 'u1', (colordim,))])
streamarr['flags'] = 0
streamarr['points'] = (flat_points - points_min) * factor
streamarr['colors'] = flat_colors[:, :colordim] * 255.0
self.write(streamarr.tobytes())
self.endStream()
self.writeObject(self.gouraudObject, gouraudDict)
def imageObject(self, image):
"""Return name of an image XObject representing the given image."""
entry = self._images.get(id(image), None)
if entry is not None:
return entry[1]
name = next(self._image_seq)
ob = self.reserveObject(f'image {name}')
self._images[id(image)] = (image, name, ob)
return name
def _unpack(self, im):
"""
Unpack image array *im* into ``(data, alpha)``, which have shape
``(height, width, 3)`` (RGB) or ``(height, width, 1)`` (grayscale or
alpha), except that alpha is None if the image is fully opaque.
"""
im = im[::-1]
if im.ndim == 2:
return im, None
else:
rgb = im[:, :, :3]
rgb = np.array(rgb, order='C')
# PDF needs a separate alpha image
if im.shape[2] == 4:
alpha = im[:, :, 3][..., None]
if np.all(alpha == 255):
alpha = None
else:
alpha = np.array(alpha, order='C')
else:
alpha = None
return rgb, alpha
def _writePng(self, img):
"""
Write the image *img* into the pdf file using png
predictors with Flate compression.
"""
buffer = BytesIO()
img.save(buffer, format="png")
buffer.seek(8)
png_data = b''
bit_depth = palette = None
while True:
length, type = struct.unpack(b'!L4s', buffer.read(8))
if type in [b'IHDR', b'PLTE', b'IDAT']:
data = buffer.read(length)
if len(data) != length:
raise RuntimeError("truncated data")
if type == b'IHDR':
bit_depth = int(data[8])
elif type == b'PLTE':
palette = data
elif type == b'IDAT':
png_data += data
elif type == b'IEND':
break
else:
buffer.seek(length, 1)
buffer.seek(4, 1) # skip CRC
return png_data, bit_depth, palette
def _writeImg(self, data, id, smask=None):
"""
Write the image *data*, of shape ``(height, width, 1)`` (grayscale) or
``(height, width, 3)`` (RGB), as pdf object *id* and with the soft mask
(alpha channel) *smask*, which should be either None or a ``(height,
width, 1)`` array.
"""
height, width, color_channels = data.shape
obj = {'Type': Name('XObject'),
'Subtype': Name('Image'),
'Width': width,
'Height': height,
'ColorSpace': Name({1: 'DeviceGray', 3: 'DeviceRGB'}[color_channels]),
'BitsPerComponent': 8}
if smask:
obj['SMask'] = smask
if mpl.rcParams['pdf.compression']:
if data.shape[-1] == 1:
data = data.squeeze(axis=-1)
png = {'Predictor': 10, 'Colors': color_channels, 'Columns': width}
img = Image.fromarray(data)
img_colors = img.getcolors(maxcolors=256)
if color_channels == 3 and img_colors is not None:
# Convert to indexed color if there are 256 colors or fewer. This can
# significantly reduce the file size.
num_colors = len(img_colors)
palette = np.array([comp for _, color in img_colors for comp in color],
dtype=np.uint8)
palette24 = ((palette[0::3].astype(np.uint32) << 16) |
(palette[1::3].astype(np.uint32) << 8) |
palette[2::3])
rgb24 = ((data[:, :, 0].astype(np.uint32) << 16) |
(data[:, :, 1].astype(np.uint32) << 8) |
data[:, :, 2])
indices = np.argsort(palette24).astype(np.uint8)
rgb8 = indices[np.searchsorted(palette24, rgb24, sorter=indices)]
img = Image.fromarray(rgb8, mode='P')
img.putpalette(palette)
png_data, bit_depth, palette = self._writePng(img)
if bit_depth is None or palette is None:
raise RuntimeError("invalid PNG header")
palette = palette[:num_colors * 3] # Trim padding; remove for Pillow>=9
obj['ColorSpace'] = [Name('Indexed'), Name('DeviceRGB'),
num_colors - 1, palette]
obj['BitsPerComponent'] = bit_depth
png['Colors'] = 1
png['BitsPerComponent'] = bit_depth
else:
png_data, _, _ = self._writePng(img)
else:
png = None
self.beginStream(
id,
self.reserveObject('length of image stream'),
obj,
png=png
)
if png:
self.currentstream.write(png_data)
else:
self.currentstream.write(data.tobytes())
self.endStream()
def writeImages(self):
for img, name, ob in self._images.values():
data, adata = self._unpack(img)
if adata is not None:
smaskObject = self.reserveObject("smask")
self._writeImg(adata, smaskObject.id)
else:
smaskObject = None
self._writeImg(data, ob.id, smaskObject)
def markerObject(self, path, trans, fill, stroke, lw, joinstyle,
capstyle):
"""Return name of a marker XObject representing the given path."""
# self.markers used by markerObject, writeMarkers, close:
# mapping from (path operations, fill?, stroke?) to
# [name, object reference, bounding box, linewidth]
# This enables different draw_markers calls to share the XObject
# if the gc is sufficiently similar: colors etc can vary, but
# the choices of whether to fill and whether to stroke cannot.
# We need a bounding box enclosing all of the XObject path,
# but since line width may vary, we store the maximum of all
# occurring line widths in self.markers.
# close() is somewhat tightly coupled in that it expects the
# first two components of each value in self.markers to be the
# name and object reference.
pathops = self.pathOperations(path, trans, simplify=False)
key = (tuple(pathops), bool(fill), bool(stroke), joinstyle, capstyle)
result = self.markers.get(key)
if result is None:
name = Name('M%d' % len(self.markers))
ob = self.reserveObject('marker %d' % len(self.markers))
bbox = path.get_extents(trans)
self.markers[key] = [name, ob, bbox, lw]
else:
if result[-1] < lw:
result[-1] = lw
name = result[0]
return name
def writeMarkers(self):
for ((pathops, fill, stroke, joinstyle, capstyle),
(name, ob, bbox, lw)) in self.markers.items():
# bbox wraps the exact limits of the control points, so half a line
# will appear outside it. If the join style is miter and the line
# is not parallel to the edge, then the line will extend even
# further. From the PDF specification, Section 8.4.3.5, the miter
# limit is miterLength / lineWidth and from Table 52, the default
# is 10. With half the miter length outside, that works out to the
# following padding:
bbox = bbox.padded(lw * 5)
self.beginStream(
ob.id, None,
{'Type': Name('XObject'), 'Subtype': Name('Form'),
'BBox': list(bbox.extents)})
self.output(GraphicsContextPdf.joinstyles[joinstyle],
Op.setlinejoin)
self.output(GraphicsContextPdf.capstyles[capstyle], Op.setlinecap)
self.output(*pathops)
self.output(Op.paint_path(fill, stroke))
self.endStream()
def pathCollectionObject(self, gc, path, trans, padding, filled, stroked):
name = Name('P%d' % len(self.paths))
ob = self.reserveObject('path %d' % len(self.paths))
self.paths.append(
(name, path, trans, ob, gc.get_joinstyle(), gc.get_capstyle(),
padding, filled, stroked))
return name
def writePathCollectionTemplates(self):
for (name, path, trans, ob, joinstyle, capstyle, padding, filled,
stroked) in self.paths:
pathops = self.pathOperations(path, trans, simplify=False)
bbox = path.get_extents(trans)
if not np.all(np.isfinite(bbox.extents)):
extents = [0, 0, 0, 0]
else:
bbox = bbox.padded(padding)
extents = list(bbox.extents)
self.beginStream(
ob.id, None,
{'Type': Name('XObject'), 'Subtype': Name('Form'),
'BBox': extents})
self.output(GraphicsContextPdf.joinstyles[joinstyle],
Op.setlinejoin)
self.output(GraphicsContextPdf.capstyles[capstyle], Op.setlinecap)
self.output(*pathops)
self.output(Op.paint_path(filled, stroked))
self.endStream()
@staticmethod
def pathOperations(path, transform, clip=None, simplify=None, sketch=None):
return [Verbatim(_path.convert_to_string(
path, transform, clip, simplify, sketch,
6,
[Op.moveto.value, Op.lineto.value, b'', Op.curveto.value,
Op.closepath.value],
True))]
def writePath(self, path, transform, clip=False, sketch=None):
if clip:
clip = (0.0, 0.0, self.width * 72, self.height * 72)
simplify = path.should_simplify
else:
clip = None
simplify = False
cmds = self.pathOperations(path, transform, clip, simplify=simplify,
sketch=sketch)
self.output(*cmds)
def reserveObject(self, name=''):
"""
Reserve an ID for an indirect object.
The name is used for debugging in case we forget to print out
the object with writeObject.
"""
id = next(self._object_seq)
self.xrefTable.append([None, 0, name])
return Reference(id)
def recordXref(self, id):
self.xrefTable[id][0] = self.fh.tell() - self.tell_base
def writeObject(self, object, contents):
self.recordXref(object.id)
object.write(contents, self)
def writeXref(self):
"""Write out the xref table."""
self.startxref = self.fh.tell() - self.tell_base
self.write(b"xref\n0 %d\n" % len(self.xrefTable))
for i, (offset, generation, name) in enumerate(self.xrefTable):
if offset is None:
raise AssertionError(
'No offset for object %d (%s)' % (i, name))
else:
key = b"f" if name == 'the zero object' else b"n"
text = b"%010d %05d %b \n" % (offset, generation, key)
self.write(text)
def writeInfoDict(self):
"""Write out the info dictionary, checking it for good form"""
self.infoObject = self.reserveObject('info')
self.writeObject(self.infoObject, self.infoDict)
def writeTrailer(self):
"""Write out the PDF trailer."""
self.write(b"trailer\n")
self.write(pdfRepr(
{'Size': len(self.xrefTable),
'Root': self.rootObject,
'Info': self.infoObject}))
# Could add 'ID'
self.write(b"\nstartxref\n%d\n%%%%EOF\n" % self.startxref)
class RendererPdf(_backend_pdf_ps.RendererPDFPSBase):
_afm_font_dir = cbook._get_data_path("fonts/pdfcorefonts")
_use_afm_rc_name = "pdf.use14corefonts"
def __init__(self, file, image_dpi, height, width):
super().__init__(width, height)
self.file = file
self.gc = self.new_gc()
self.image_dpi = image_dpi
def finalize(self):
self.file.output(*self.gc.finalize())
def check_gc(self, gc, fillcolor=None):
orig_fill = getattr(gc, '_fillcolor', (0., 0., 0.))
gc._fillcolor = fillcolor
orig_alphas = getattr(gc, '_effective_alphas', (1.0, 1.0))
if gc.get_rgb() is None:
# It should not matter what color here since linewidth should be
# 0 unless affected by global settings in rcParams, hence setting
# zero alpha just in case.
gc.set_foreground((0, 0, 0, 0), isRGBA=True)
if gc._forced_alpha:
gc._effective_alphas = (gc._alpha, gc._alpha)
elif fillcolor is None or len(fillcolor) < 4:
gc._effective_alphas = (gc._rgb[3], 1.0)
else:
gc._effective_alphas = (gc._rgb[3], fillcolor[3])
delta = self.gc.delta(gc)
if delta:
self.file.output(*delta)
# Restore gc to avoid unwanted side effects
gc._fillcolor = orig_fill
gc._effective_alphas = orig_alphas
def get_image_magnification(self):
return self.image_dpi/72.0
def draw_image(self, gc, x, y, im, transform=None):
# docstring inherited
h, w = im.shape[:2]
if w == 0 or h == 0:
return
if transform is None:
# If there's no transform, alpha has already been applied
gc.set_alpha(1.0)
self.check_gc(gc)
w = 72.0 * w / self.image_dpi
h = 72.0 * h / self.image_dpi
imob = self.file.imageObject(im)
if transform is None:
self.file.output(Op.gsave,
w, 0, 0, h, x, y, Op.concat_matrix,
imob, Op.use_xobject, Op.grestore)
else:
tr1, tr2, tr3, tr4, tr5, tr6 = transform.frozen().to_values()
self.file.output(Op.gsave,
1, 0, 0, 1, x, y, Op.concat_matrix,
tr1, tr2, tr3, tr4, tr5, tr6, Op.concat_matrix,
imob, Op.use_xobject, Op.grestore)
def draw_path(self, gc, path, transform, rgbFace=None):
# docstring inherited
self.check_gc(gc, rgbFace)
self.file.writePath(
path, transform,
rgbFace is None and gc.get_hatch_path() is None,
gc.get_sketch_params())
self.file.output(self.gc.paint())
def draw_path_collection(self, gc, master_transform, paths, all_transforms,
offsets, offset_trans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position):
# We can only reuse the objects if the presence of fill and
# stroke (and the amount of alpha for each) is the same for
# all of them
can_do_optimization = True
facecolors = np.asarray(facecolors)
edgecolors = np.asarray(edgecolors)
if not len(facecolors):
filled = False
can_do_optimization = not gc.get_hatch()
else:
if np.all(facecolors[:, 3] == facecolors[0, 3]):
filled = facecolors[0, 3] != 0.0
else:
can_do_optimization = False
if not len(edgecolors):
stroked = False
else:
if np.all(np.asarray(linewidths) == 0.0):
stroked = False
elif np.all(edgecolors[:, 3] == edgecolors[0, 3]):
stroked = edgecolors[0, 3] != 0.0
else:
can_do_optimization = False
# Is the optimization worth it? Rough calculation:
# cost of emitting a path in-line is len_path * uses_per_path
# cost of XObject is len_path + 5 for the definition,
# uses_per_path for the uses
len_path = len(paths[0].vertices) if len(paths) > 0 else 0
uses_per_path = self._iter_collection_uses_per_path(
paths, all_transforms, offsets, facecolors, edgecolors)
should_do_optimization = \
len_path + uses_per_path + 5 < len_path * uses_per_path
if (not can_do_optimization) or (not should_do_optimization):
return RendererBase.draw_path_collection(
self, gc, master_transform, paths, all_transforms,
offsets, offset_trans, facecolors, edgecolors,
linewidths, linestyles, antialiaseds, urls,
offset_position)
padding = np.max(linewidths)
path_codes = []
for i, (path, transform) in enumerate(self._iter_collection_raw_paths(
master_transform, paths, all_transforms)):
name = self.file.pathCollectionObject(
gc, path, transform, padding, filled, stroked)
path_codes.append(name)
output = self.file.output
output(*self.gc.push())
lastx, lasty = 0, 0
for xo, yo, path_id, gc0, rgbFace in self._iter_collection(
gc, path_codes, offsets, offset_trans,
facecolors, edgecolors, linewidths, linestyles,
antialiaseds, urls, offset_position):
self.check_gc(gc0, rgbFace)
dx, dy = xo - lastx, yo - lasty
output(1, 0, 0, 1, dx, dy, Op.concat_matrix, path_id,
Op.use_xobject)
lastx, lasty = xo, yo
output(*self.gc.pop())
def draw_markers(self, gc, marker_path, marker_trans, path, trans,
rgbFace=None):
# docstring inherited
# Same logic as in draw_path_collection
len_marker_path = len(marker_path)
uses = len(path)
if len_marker_path * uses < len_marker_path + uses + 5:
RendererBase.draw_markers(self, gc, marker_path, marker_trans,
path, trans, rgbFace)
return
self.check_gc(gc, rgbFace)
fill = gc.fill(rgbFace)
stroke = gc.stroke()
output = self.file.output
marker = self.file.markerObject(
marker_path, marker_trans, fill, stroke, self.gc._linewidth,
gc.get_joinstyle(), gc.get_capstyle())
output(Op.gsave)
lastx, lasty = 0, 0
for vertices, code in path.iter_segments(
trans,
clip=(0, 0, self.file.width*72, self.file.height*72),
simplify=False):
if len(vertices):
x, y = vertices[-2:]
if not (0 <= x <= self.file.width * 72
and 0 <= y <= self.file.height * 72):
continue
dx, dy = x - lastx, y - lasty
output(1, 0, 0, 1, dx, dy, Op.concat_matrix,
marker, Op.use_xobject)
lastx, lasty = x, y
output(Op.grestore)
def draw_gouraud_triangles(self, gc, points, colors, trans):
assert len(points) == len(colors)
if len(points) == 0:
return
assert points.ndim == 3
assert points.shape[1] == 3
assert points.shape[2] == 2
assert colors.ndim == 3
assert colors.shape[1] == 3
assert colors.shape[2] in (1, 4)
shape = points.shape
points = points.reshape((shape[0] * shape[1], 2))
tpoints = trans.transform(points)
tpoints = tpoints.reshape(shape)
name, _ = self.file.addGouraudTriangles(tpoints, colors)
output = self.file.output
if colors.shape[2] == 1:
# grayscale
gc.set_alpha(1.0)
self.check_gc(gc)
output(name, Op.shading)
return
alpha = colors[0, 0, 3]
if np.allclose(alpha, colors[:, :, 3]):
# single alpha value
gc.set_alpha(alpha)
self.check_gc(gc)
output(name, Op.shading)
else:
# varying alpha: use a soft mask
alpha = colors[:, :, 3][:, :, None]
_, smask_ob = self.file.addGouraudTriangles(tpoints, alpha)
gstate = self.file._soft_mask_state(smask_ob)
output(Op.gsave, gstate, Op.setgstate,
name, Op.shading,
Op.grestore)
def _setup_textpos(self, x, y, angle, oldx=0, oldy=0, oldangle=0):
if angle == oldangle == 0:
self.file.output(x - oldx, y - oldy, Op.textpos)
else:
angle = math.radians(angle)
self.file.output(math.cos(angle), math.sin(angle),
-math.sin(angle), math.cos(angle),
x, y, Op.textmatrix)
self.file.output(0, 0, Op.textpos)
def draw_mathtext(self, gc, x, y, s, prop, angle):
# TODO: fix positioning and encoding
width, height, descent, glyphs, rects = \
self._text2path.mathtext_parser.parse(s, 72, prop)
if gc.get_url() is not None:
self.file._annotations[-1][1].append(_get_link_annotation(
gc, x, y, width, height, angle))
fonttype = mpl.rcParams['pdf.fonttype']
# Set up a global transformation matrix for the whole math expression
a = math.radians(angle)
self.file.output(Op.gsave)
self.file.output(math.cos(a), math.sin(a),
-math.sin(a), math.cos(a),
x, y, Op.concat_matrix)
self.check_gc(gc, gc._rgb)
prev_font = None, None
oldx, oldy = 0, 0
unsupported_chars = []
self.file.output(Op.begin_text)
for font, fontsize, num, ox, oy in glyphs:
self.file._character_tracker.track_glyph(font, num)
fontname = font.fname
if not _font_supports_glyph(fonttype, num):
# Unsupported chars (i.e. multibyte in Type 3 or beyond BMP in
# Type 42) must be emitted separately (below).
unsupported_chars.append((font, fontsize, ox, oy, num))
else:
self._setup_textpos(ox, oy, 0, oldx, oldy)
oldx, oldy = ox, oy
if (fontname, fontsize) != prev_font:
self.file.output(self.file.fontName(fontname), fontsize,
Op.selectfont)
prev_font = fontname, fontsize
self.file.output(self.encode_string(chr(num), fonttype),
Op.show)
self.file.output(Op.end_text)
for font, fontsize, ox, oy, num in unsupported_chars:
self._draw_xobject_glyph(
font, fontsize, font.get_char_index(num), ox, oy)
# Draw any horizontal lines in the math layout
for ox, oy, width, height in rects:
self.file.output(Op.gsave, ox, oy, width, height,
Op.rectangle, Op.fill, Op.grestore)
# Pop off the global transformation
self.file.output(Op.grestore)
def draw_tex(self, gc, x, y, s, prop, angle, *, mtext=None):
# docstring inherited
texmanager = self.get_texmanager()
fontsize = prop.get_size_in_points()
dvifile = texmanager.make_dvi(s, fontsize)
with dviread.Dvi(dvifile, 72) as dvi:
page, = dvi
if gc.get_url() is not None:
self.file._annotations[-1][1].append(_get_link_annotation(
gc, x, y, page.width, page.height, angle))
# Gather font information and do some setup for combining
# characters into strings. The variable seq will contain a
# sequence of font and text entries. A font entry is a list
# ['font', name, size] where name is a Name object for the
# font. A text entry is ['text', x, y, glyphs, x+w] where x
# and y are the starting coordinates, w is the width, and
# glyphs is a list; in this phase it will always contain just
# one single-character string, but later it may have longer
# strings interspersed with kern amounts.
oldfont, seq = None, []
for x1, y1, dvifont, glyph, width in page.text:
if dvifont != oldfont:
pdfname = self.file.dviFontName(dvifont)
seq += [['font', pdfname, dvifont.size]]
oldfont = dvifont
seq += [['text', x1, y1, [bytes([glyph])], x1+width]]
# Find consecutive text strings with constant y coordinate and
# combine into a sequence of strings and kerns, or just one
# string (if any kerns would be less than 0.1 points).
i, curx, fontsize = 0, 0, None
while i < len(seq)-1:
elt, nxt = seq[i:i+2]
if elt[0] == 'font':
fontsize = elt[2]
elif elt[0] == nxt[0] == 'text' and elt[2] == nxt[2]:
offset = elt[4] - nxt[1]
if abs(offset) < 0.1:
elt[3][-1] += nxt[3][0]
elt[4] += nxt[4]-nxt[1]
else:
elt[3] += [offset*1000.0/fontsize, nxt[3][0]]
elt[4] = nxt[4]
del seq[i+1]
continue
i += 1
# Create a transform to map the dvi contents to the canvas.
mytrans = Affine2D().rotate_deg(angle).translate(x, y)
# Output the text.
self.check_gc(gc, gc._rgb)
self.file.output(Op.begin_text)
curx, cury, oldx, oldy = 0, 0, 0, 0
for elt in seq:
if elt[0] == 'font':
self.file.output(elt[1], elt[2], Op.selectfont)
elif elt[0] == 'text':
curx, cury = mytrans.transform((elt[1], elt[2]))
self._setup_textpos(curx, cury, angle, oldx, oldy)
oldx, oldy = curx, cury
if len(elt[3]) == 1:
self.file.output(elt[3][0], Op.show)
else:
self.file.output(elt[3], Op.showkern)
else:
assert False
self.file.output(Op.end_text)
# Then output the boxes (e.g., variable-length lines of square
# roots).
boxgc = self.new_gc()
boxgc.copy_properties(gc)
boxgc.set_linewidth(0)
pathops = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.CLOSEPOLY]
for x1, y1, h, w in page.boxes:
path = Path([[x1, y1], [x1+w, y1], [x1+w, y1+h], [x1, y1+h],
[0, 0]], pathops)
self.draw_path(boxgc, path, mytrans, gc._rgb)
def encode_string(self, s, fonttype):
if fonttype in (1, 3):
return s.encode('cp1252', 'replace')
return s.encode('utf-16be', 'replace')
def draw_text(self, gc, x, y, s, prop, angle, ismath=False, mtext=None):
# docstring inherited
# TODO: combine consecutive texts into one BT/ET delimited section
self.check_gc(gc, gc._rgb)
if ismath:
return self.draw_mathtext(gc, x, y, s, prop, angle)
fontsize = prop.get_size_in_points()
if mpl.rcParams['pdf.use14corefonts']:
font = self._get_font_afm(prop)
fonttype = 1
else:
font = self._get_font_ttf(prop)
self.file._character_tracker.track(font, s)
fonttype = mpl.rcParams['pdf.fonttype']
if gc.get_url() is not None:
font.set_text(s)
width, height = font.get_width_height()
self.file._annotations[-1][1].append(_get_link_annotation(
gc, x, y, width / 64, height / 64, angle))
# If fonttype is neither 3 nor 42, emit the whole string at once
# without manual kerning.
if fonttype not in [3, 42]:
self.file.output(Op.begin_text,
self.file.fontName(prop), fontsize, Op.selectfont)
self._setup_textpos(x, y, angle)
self.file.output(self.encode_string(s, fonttype),
Op.show, Op.end_text)
# A sequence of characters is broken into multiple chunks. The chunking
# serves two purposes:
# - For Type 3 fonts, there is no way to access multibyte characters,
# as they cannot have a CIDMap. Therefore, in this case we break
# the string into chunks, where each chunk contains either a string
# of consecutive 1-byte characters or a single multibyte character.
# - A sequence of 1-byte characters is split into chunks to allow for
# kerning adjustments between consecutive chunks.
#
# Each chunk is emitted with a separate command: 1-byte characters use
# the regular text show command (TJ) with appropriate kerning between
# chunks, whereas multibyte characters use the XObject command (Do).
else:
# List of (ft_object, start_x, [prev_kern, char, char, ...]),
# w/o zero kerns.
singlebyte_chunks = []
# List of (ft_object, start_x, glyph_index).
multibyte_glyphs = []
prev_was_multibyte = True
prev_font = font
for item in _text_helpers.layout(s, font, kern_mode=Kerning.UNFITTED):
if _font_supports_glyph(fonttype, ord(item.char)):
if prev_was_multibyte or item.ft_object != prev_font:
singlebyte_chunks.append((item.ft_object, item.x, []))
prev_font = item.ft_object
if item.prev_kern:
singlebyte_chunks[-1][2].append(item.prev_kern)
singlebyte_chunks[-1][2].append(item.char)
prev_was_multibyte = False
else:
multibyte_glyphs.append((item.ft_object, item.x, item.glyph_idx))
prev_was_multibyte = True
# Do the rotation and global translation as a single matrix
# concatenation up front
self.file.output(Op.gsave)
a = math.radians(angle)
self.file.output(math.cos(a), math.sin(a),
-math.sin(a), math.cos(a),
x, y, Op.concat_matrix)
# Emit all the 1-byte characters in a BT/ET group.
self.file.output(Op.begin_text)
prev_start_x = 0
for ft_object, start_x, kerns_or_chars in singlebyte_chunks:
ft_name = self.file.fontName(ft_object.fname)
self.file.output(ft_name, fontsize, Op.selectfont)
self._setup_textpos(start_x, 0, 0, prev_start_x, 0, 0)
self.file.output(
# See pdf spec "Text space details" for the 1000/fontsize
# (aka. 1000/T_fs) factor.
[-1000 * next(group) / fontsize if tp == float # a kern
else self.encode_string("".join(group), fonttype)
for tp, group in itertools.groupby(kerns_or_chars, type)],
Op.showkern)
prev_start_x = start_x
self.file.output(Op.end_text)
# Then emit all the multibyte characters, one at a time.
for ft_object, start_x, glyph_idx in multibyte_glyphs:
self._draw_xobject_glyph(
ft_object, fontsize, glyph_idx, start_x, 0
)
self.file.output(Op.grestore)
def _draw_xobject_glyph(self, font, fontsize, glyph_idx, x, y):
"""Draw a multibyte character from a Type 3 font as an XObject."""
glyph_name = font.get_glyph_name(glyph_idx)
name = self.file._get_xobject_glyph_name(font.fname, glyph_name)
self.file.output(
Op.gsave,
0.001 * fontsize, 0, 0, 0.001 * fontsize, x, y, Op.concat_matrix,
Name(name), Op.use_xobject,
Op.grestore,
)
def new_gc(self):
# docstring inherited
return GraphicsContextPdf(self.file)
class GraphicsContextPdf(GraphicsContextBase):
def __init__(self, file):
super().__init__()
self._fillcolor = (0.0, 0.0, 0.0)
self._effective_alphas = (1.0, 1.0)
self.file = file
self.parent = None
def __repr__(self):
d = dict(self.__dict__)
del d['file']
del d['parent']
return repr(d)
def stroke(self):
"""
Predicate: does the path need to be stroked (its outline drawn)?
This tests for the various conditions that disable stroking
the path, in which case it would presumably be filled.
"""
# _linewidth > 0: in pdf a line of width 0 is drawn at minimum
# possible device width, but e.g., agg doesn't draw at all
return (self._linewidth > 0 and self._alpha > 0 and
(len(self._rgb) <= 3 or self._rgb[3] != 0.0))
def fill(self, *args):
"""
Predicate: does the path need to be filled?
An optional argument can be used to specify an alternative
_fillcolor, as needed by RendererPdf.draw_markers.
"""
if len(args):
_fillcolor = args[0]
else:
_fillcolor = self._fillcolor
return (self._hatch or
(_fillcolor is not None and
(len(_fillcolor) <= 3 or _fillcolor[3] != 0.0)))
def paint(self):
"""
Return the appropriate pdf operator to cause the path to be
stroked, filled, or both.
"""
return Op.paint_path(self.fill(), self.stroke())
capstyles = {'butt': 0, 'round': 1, 'projecting': 2}
joinstyles = {'miter': 0, 'round': 1, 'bevel': 2}
def capstyle_cmd(self, style):
return [self.capstyles[style], Op.setlinecap]
def joinstyle_cmd(self, style):
return [self.joinstyles[style], Op.setlinejoin]
def linewidth_cmd(self, width):
return [width, Op.setlinewidth]
def dash_cmd(self, dashes):
offset, dash = dashes
if dash is None:
dash = []
offset = 0
return [list(dash), offset, Op.setdash]
def alpha_cmd(self, alpha, forced, effective_alphas):
name = self.file.alphaState(effective_alphas)
return [name, Op.setgstate]
def hatch_cmd(self, hatch, hatch_color, hatch_linewidth):
if not hatch:
if self._fillcolor is not None:
return self.fillcolor_cmd(self._fillcolor)
else:
return [Name('DeviceRGB'), Op.setcolorspace_nonstroke]
else:
hatch_style = (hatch_color, self._fillcolor, hatch, hatch_linewidth)
name = self.file.hatchPattern(hatch_style)
return [Name('Pattern'), Op.setcolorspace_nonstroke,
name, Op.setcolor_nonstroke]
def rgb_cmd(self, rgb):
if mpl.rcParams['pdf.inheritcolor']:
return []
if rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_stroke]
else:
return [*rgb[:3], Op.setrgb_stroke]
def fillcolor_cmd(self, rgb):
if rgb is None or mpl.rcParams['pdf.inheritcolor']:
return []
elif rgb[0] == rgb[1] == rgb[2]:
return [rgb[0], Op.setgray_nonstroke]
else:
return [*rgb[:3], Op.setrgb_nonstroke]
def push(self):
parent = GraphicsContextPdf(self.file)
parent.copy_properties(self)
parent.parent = self.parent
self.parent = parent
return [Op.gsave]
def pop(self):
assert self.parent is not None
self.copy_properties(self.parent)
self.parent = self.parent.parent
return [Op.grestore]
def clip_cmd(self, cliprect, clippath):
"""Set clip rectangle. Calls `.pop()` and `.push()`."""
cmds = []
# Pop graphics state until we hit the right one or the stack is empty
while ((self._cliprect, self._clippath) != (cliprect, clippath)
and self.parent is not None):
cmds.extend(self.pop())
# Unless we hit the right one, set the clip polygon
if ((self._cliprect, self._clippath) != (cliprect, clippath) or
self.parent is None):
cmds.extend(self.push())
if self._cliprect != cliprect:
cmds.extend([cliprect, Op.rectangle, Op.clip, Op.endpath])
if self._clippath != clippath:
path, affine = clippath.get_transformed_path_and_affine()
cmds.extend(
PdfFile.pathOperations(path, affine, simplify=False) +
[Op.clip, Op.endpath])
return cmds
commands = (
# must come first since may pop
(('_cliprect', '_clippath'), clip_cmd),
(('_alpha', '_forced_alpha', '_effective_alphas'), alpha_cmd),
(('_capstyle',), capstyle_cmd),
(('_fillcolor',), fillcolor_cmd),
(('_joinstyle',), joinstyle_cmd),
(('_linewidth',), linewidth_cmd),
(('_dashes',), dash_cmd),
(('_rgb',), rgb_cmd),
# must come after fillcolor and rgb
(('_hatch', '_hatch_color', '_hatch_linewidth'), hatch_cmd),
)
def delta(self, other):
"""
Copy properties of other into self and return PDF commands
needed to transform *self* into *other*.
"""
cmds = []
fill_performed = False
for params, cmd in self.commands:
different = False
for p in params:
ours = getattr(self, p)
theirs = getattr(other, p)
try:
if ours is None or theirs is None:
different = ours is not theirs
else:
different = bool(ours != theirs)
except ValueError:
ours = np.asarray(ours)
theirs = np.asarray(theirs)
different = (ours.shape != theirs.shape or
np.any(ours != theirs))
if different:
break
# Need to update hatching if we also updated fillcolor
if cmd.__name__ == 'hatch_cmd' and fill_performed:
different = True
if different:
if cmd.__name__ == 'fillcolor_cmd':
fill_performed = True
theirs = [getattr(other, p) for p in params]
cmds.extend(cmd(self, *theirs))
for p in params:
setattr(self, p, getattr(other, p))
return cmds
def copy_properties(self, other):
"""
Copy properties of other into self.
"""
super().copy_properties(other)
fillcolor = getattr(other, '_fillcolor', self._fillcolor)
effective_alphas = getattr(other, '_effective_alphas',
self._effective_alphas)
self._fillcolor = fillcolor
self._effective_alphas = effective_alphas
def finalize(self):
"""
Make sure every pushed graphics state is popped.
"""
cmds = []
while self.parent is not None:
cmds.extend(self.pop())
return cmds
class PdfPages:
"""
A multi-page PDF file.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> # Initialize:
>>> with PdfPages('foo.pdf') as pdf:
... # As many times as you like, create a figure fig and save it:
... fig = plt.figure()
... pdf.savefig(fig)
... # When no figure is specified the current figure is saved
... pdf.savefig()
Notes
-----
In reality `PdfPages` is a thin wrapper around `PdfFile`, in order to avoid
confusion when using `~.pyplot.savefig` and forgetting the format argument.
"""
@_api.delete_parameter("3.10", "keep_empty",
addendum="This parameter does nothing.")
def __init__(self, filename, keep_empty=None, metadata=None):
"""
Create a new PdfPages object.
Parameters
----------
filename : str or path-like or file-like
Plots using `PdfPages.savefig` will be written to a file at this location.
The file is opened when a figure is saved for the first time (overwriting
any older file with the same name).
metadata : dict, optional
Information dictionary object (see PDF reference section 10.2.1
'Document Information Dictionary'), e.g.:
``{'Creator': 'My software', 'Author': 'Me', 'Title': 'Awesome'}``.
The standard keys are 'Title', 'Author', 'Subject', 'Keywords',
'Creator', 'Producer', 'CreationDate', 'ModDate', and
'Trapped'. Values have been predefined for 'Creator', 'Producer'
and 'CreationDate'. They can be removed by setting them to `None`.
"""
self._filename = filename
self._metadata = metadata
self._file = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
def _ensure_file(self):
if self._file is None:
self._file = PdfFile(self._filename, metadata=self._metadata) # init.
return self._file
def close(self):
"""
Finalize this object, making the underlying file a complete
PDF file.
"""
if self._file is not None:
self._file.finalize()
self._file.close()
self._file = None
def infodict(self):
"""
Return a modifiable information dictionary object
(see PDF reference section 10.2.1 'Document Information
Dictionary').
"""
return self._ensure_file().infoDict
def savefig(self, figure=None, **kwargs):
"""
Save a `.Figure` to this file as a new page.
Any other keyword arguments are passed to `~.Figure.savefig`.
Parameters
----------
figure : `.Figure` or int, default: the active figure
The figure, or index of the figure, that is saved to the file.
"""
if not isinstance(figure, Figure):
if figure is None:
manager = Gcf.get_active()
else:
manager = Gcf.get_fig_manager(figure)
if manager is None:
raise ValueError(f"No figure {figure}")
figure = manager.canvas.figure
# Force use of pdf backend, as PdfPages is tightly coupled with it.
figure.savefig(self, format="pdf", backend="pdf", **kwargs)
def get_pagecount(self):
"""Return the current number of pages in the multipage pdf file."""
return len(self._ensure_file().pageList)
def attach_note(self, text, positionRect=[-100, -100, 0, 0]):
"""
Add a new text note to the page to be saved next. The optional
positionRect specifies the position of the new note on the
page. It is outside the page per default to make sure it is
invisible on printouts.
"""
self._ensure_file().newTextnote(text, positionRect)
class FigureCanvasPdf(FigureCanvasBase):
# docstring inherited
fixed_dpi = 72
filetypes = {'pdf': 'Portable Document Format'}
def get_default_filetype(self):
return 'pdf'
def print_pdf(self, filename, *,
bbox_inches_restore=None, metadata=None):
dpi = self.figure.dpi
self.figure.dpi = 72 # there are 72 pdf points to an inch
width, height = self.figure.get_size_inches()
if isinstance(filename, PdfPages):
file = filename._ensure_file()
else:
file = PdfFile(filename, metadata=metadata)
try:
file.newPage(width, height)
renderer = MixedModeRenderer(
self.figure, width, height, dpi,
RendererPdf(file, dpi, height, width),
bbox_inches_restore=bbox_inches_restore)
self.figure.draw(renderer)
renderer.finalize()
if not isinstance(filename, PdfPages):
file.finalize()
finally:
if isinstance(filename, PdfPages): # finish off this page
file.endStream()
else: # we opened the file above; now finish it off
file.close()
def draw(self):
self.figure.draw_without_rendering()
return super().draw()
FigureManagerPdf = FigureManagerBase
@_Backend.export
class _BackendPdf(_Backend):
FigureCanvas = FigureCanvasPdf
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@lib@matplotlib@backends@backend_pdf.py@.PATH_END.py
|
{
"filename": "plot_shapelets.py",
"repo_name": "tslearn-team/tslearn",
"repo_path": "tslearn_extracted/tslearn-main/docs/examples/classification/plot_shapelets.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
"""
Learning Shapelets
==================
This example illustrates how the "Learning Shapelets" method can quickly
find a set of shapelets that results in excellent predictive performance
when used for a shapelet transform.
More information on the method can be found at:
http://fs.ismll.de/publicspace/LearningShapelets/.
"""
# Author: Romain Tavenard
# License: BSD 3 clause
import numpy
from sklearn.metrics import accuracy_score
import tensorflow as tf
import matplotlib.pyplot as plt
from tslearn.datasets import CachedDatasets
from tslearn.preprocessing import TimeSeriesScalerMinMax
from tslearn.shapelets import LearningShapelets, \
grabocka_params_to_shapelet_size_dict
from tslearn.utils import ts_size
# Set seed for determinism
numpy.random.seed(0)
# Load the Trace dataset
X_train, y_train, X_test, y_test = CachedDatasets().load_dataset("Trace")
# Normalize each of the timeseries in the Trace dataset
X_train = TimeSeriesScalerMinMax().fit_transform(X_train)
X_test = TimeSeriesScalerMinMax().fit_transform(X_test)
# Get statistics of the dataset
n_ts, ts_sz = X_train.shape[:2]
n_classes = len(set(y_train))
# Set the number of shapelets per size as done in the original paper
shapelet_sizes = grabocka_params_to_shapelet_size_dict(n_ts=n_ts,
ts_sz=ts_sz,
n_classes=n_classes,
l=0.1,
r=1)
# Define the model using parameters provided by the authors (except that we
# use fewer iterations here)
shp_clf = LearningShapelets(n_shapelets_per_size=shapelet_sizes,
optimizer=tf.optimizers.Adam(.01),
batch_size=16,
weight_regularizer=.01,
max_iter=200,
random_state=42,
verbose=0)
shp_clf.fit(X_train, y_train)
# Make predictions and calculate accuracy score
pred_labels = shp_clf.predict(X_test)
print("Correct classification rate:", accuracy_score(y_test, pred_labels))
# Plot the different discovered shapelets
plt.figure()
for i, sz in enumerate(shapelet_sizes.keys()):
plt.subplot(len(shapelet_sizes), 1, i + 1)
plt.title("%d shapelets of size %d" % (shapelet_sizes[sz], sz))
for shp in shp_clf.shapelets_:
if ts_size(shp) == sz:
plt.plot(shp.ravel())
plt.xlim([0, max(shapelet_sizes.keys()) - 1])
plt.tight_layout()
plt.show()
# The loss history is accessible via the `model_` that is a keras model
plt.figure()
plt.plot(numpy.arange(1, shp_clf.n_iter_ + 1), shp_clf.history_["loss"])
plt.title("Evolution of cross-entropy loss during training")
plt.xlabel("Epochs")
plt.show()
|
tslearn-teamREPO_NAMEtslearnPATH_START.@tslearn_extracted@tslearn-main@docs@examples@classification@plot_shapelets.py@.PATH_END.py
|
{
"filename": "astroquery_test.py",
"repo_name": "mshubat/galaxy_data_mines",
"repo_path": "galaxy_data_mines_extracted/galaxy_data_mines-master/galaxy_data_mines/tobeintegrated/astroquery_test.py",
"type": "Python"
}
|
'''
from astropy.table import Table
from astroquery.ned import Ned
result_table = Ned.query_region("m83")
result_table.pprint(show_unit=True)
cols = result_table.columns
print(type(cols))
t = Table(result_table)
for col in cols:
if not col == "RA(deg)":
del(t[col])
result_table.show_in_browser(jsviewer=True)
for col in cols:
if not col == "RA(deg)":
del(result_table[col])
result_table.show_in_browser(jsviewer=True)
'''
from astroquery.simbad import Simbad
# Simbad.list_votable_fields()
# Simbad.get_field_description("otype")
customSimbad = Simbad()
# customSimbad.get_votable_fields()
customSimbad.add_votable_fields("otype")
result_table = customSimbad.query_region("m3")
result_table.pprint(show_unit=True)
result_table.show_in_browser(jsviewer=True)
'''
from astroquery.simbad import Simbad
from astropy import coordinates
import astropy.units as u
# works only for ICRS coordinates:
c = coordinates.SkyCoord("05h35m17.3s -05d23m28s", frame='icrs')
r = 5 * u.arcminute
result_table = Simbad.query_region(c, radius=r)
result_table.pprint(show_unit=True, max_width=80, max_lines=5)
result_table.show_in_browser()
'''
|
mshubatREPO_NAMEgalaxy_data_minesPATH_START.@galaxy_data_mines_extracted@galaxy_data_mines-master@galaxy_data_mines@tobeintegrated@astroquery_test.py@.PATH_END.py
|
{
"filename": "bispectrum.py",
"repo_name": "SBU-COSMOLIKE/CAMB-Monodromic",
"repo_path": "CAMB-Monodromic_extracted/CAMB-Monodromic-main/camb/bispectrum.py",
"type": "Python"
}
|
from ctypes import c_int, c_double, c_char
# Note currently there is no python interface for getting bispectrum results.
Ini_max_string_len = 1024
max_bispectrum_deltas = 5
class TBispectrumParams:
_fields_ = [
("do_lensing_bispectrum", c_int), # logical
("do_primordial_bispectrum", c_int), # logical
("nfields", c_int),
("Slice_Base_L", c_int),
("deltas", c_int * max_bispectrum_deltas),
("do_parity_odd", c_int), # logical
("DoFisher", c_int), # logical
("export_alpha_beta", c_int), # logical
("FisherNoise", c_double),
("FisherNoisePol", c_double),
("FisherNoiseFwhmArcmin", c_double),
("FullOutputFile", c_char * Ini_max_string_len),
("SparseFullOutput", c_int), # logical
]
|
SBU-COSMOLIKEREPO_NAMECAMB-MonodromicPATH_START.@CAMB-Monodromic_extracted@CAMB-Monodromic-main@camb@bispectrum.py@.PATH_END.py
|
{
"filename": "strain.py",
"repo_name": "gwastro/pycbc",
"repo_path": "pycbc_extracted/pycbc-master/pycbc/strain/strain.py",
"type": "Python"
}
|
#Copyright (C) 2013 Alex Nitz
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Generals
# Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
This modules contains functions reading, generating, and segmenting strain data
"""
import copy
import logging
import functools
import numpy
from scipy.signal import kaiserord
import pycbc.types
from pycbc.types import TimeSeries, zeros
from pycbc.types import Array, FrequencySeries
from pycbc.types import MultiDetOptionAppendAction, MultiDetOptionAction
from pycbc.types import MultiDetOptionActionSpecial
from pycbc.types import DictOptionAction, MultiDetDictOptionAction
from pycbc.types import required_opts, required_opts_multi_ifo
from pycbc.types import ensure_one_opt, ensure_one_opt_multi_ifo
from pycbc.types import copy_opts_for_single_ifo, complex_same_precision_as
from pycbc.inject import InjectionSet, SGBurstInjectionSet
from pycbc.filter import resample_to_delta_t, lowpass, highpass, make_frequency_series
from pycbc.filter.zpk import filter_zpk
from pycbc.waveform.spa_tmplt import spa_distance
import pycbc.psd
from pycbc.fft import FFT, IFFT
import pycbc.events
import pycbc.frame
import pycbc.filter
logger = logging.getLogger('pycbc.strain.strain')
def next_power_of_2(n):
"""Return the smallest integer power of 2 larger than the argument.
Parameters
----------
n : int
A positive integer.
Returns
-------
m : int
Smallest integer power of 2 larger than n.
"""
return 1 << n.bit_length()
def detect_loud_glitches(strain, psd_duration=4., psd_stride=2.,
psd_avg_method='median', low_freq_cutoff=30.,
threshold=50., cluster_window=5., corrupt_time=4.,
high_freq_cutoff=None, output_intermediates=False):
"""Automatic identification of loud transients for gating purposes.
This function first estimates the PSD of the input time series using the
FindChirp Welch method. Then it whitens the time series using that
estimate. Finally, it computes the magnitude of the whitened series,
thresholds it and applies the FindChirp clustering over time to the
surviving samples.
Parameters
----------
strain : TimeSeries
Input strain time series to detect glitches over.
psd_duration : {float, 4}
Duration of the segments for PSD estimation in seconds.
psd_stride : {float, 2}
Separation between PSD estimation segments in seconds.
psd_avg_method : {string, 'median'}
Method for averaging PSD estimation segments.
low_freq_cutoff : {float, 30}
Minimum frequency to include in the whitened strain.
threshold : {float, 50}
Minimum magnitude of whitened strain for considering a transient to
be present.
cluster_window : {float, 5}
Length of time window to cluster surviving samples over, in seconds.
corrupt_time : {float, 4}
Amount of time to be discarded at the beginning and end of the input
time series.
high_frequency_cutoff : {float, None}
Maximum frequency to include in the whitened strain. If given, the
input series is downsampled accordingly. If omitted, the Nyquist
frequency is used.
output_intermediates : {bool, False}
Save intermediate time series for debugging.
"""
if high_freq_cutoff:
strain = resample_to_delta_t(strain, 0.5 / high_freq_cutoff,
method='ldas')
else:
strain = strain.copy()
# taper strain
corrupt_length = int(corrupt_time * strain.sample_rate)
w = numpy.arange(corrupt_length) / float(corrupt_length)
strain[0:corrupt_length] *= pycbc.types.Array(w, dtype=strain.dtype)
strain[(len(strain) - corrupt_length):] *= \
pycbc.types.Array(w[::-1], dtype=strain.dtype)
if output_intermediates:
strain.save_to_wav('strain_conditioned.wav')
# zero-pad strain to a power-of-2 length
strain_pad_length = next_power_of_2(len(strain))
pad_start = int(strain_pad_length / 2 - len(strain) / 2)
pad_end = pad_start + len(strain)
pad_epoch = strain.start_time - pad_start / float(strain.sample_rate)
strain_pad = pycbc.types.TimeSeries(
pycbc.types.zeros(strain_pad_length, dtype=strain.dtype),
delta_t=strain.delta_t, copy=False, epoch=pad_epoch)
strain_pad[pad_start:pad_end] = strain[:]
# estimate the PSD
psd = pycbc.psd.welch(strain[corrupt_length:(len(strain)-corrupt_length)],
seg_len=int(psd_duration * strain.sample_rate),
seg_stride=int(psd_stride * strain.sample_rate),
avg_method=psd_avg_method,
require_exact_data_fit=False)
psd = pycbc.psd.interpolate(psd, 1. / strain_pad.duration)
psd = pycbc.psd.inverse_spectrum_truncation(
psd, int(psd_duration * strain.sample_rate),
low_frequency_cutoff=low_freq_cutoff,
trunc_method='hann')
kmin = int(low_freq_cutoff / psd.delta_f)
psd[0:kmin] = numpy.inf
if high_freq_cutoff:
kmax = int(high_freq_cutoff / psd.delta_f)
psd[kmax:] = numpy.inf
# whiten
strain_tilde = strain_pad.to_frequencyseries()
if high_freq_cutoff:
norm = high_freq_cutoff - low_freq_cutoff
else:
norm = strain.sample_rate / 2. - low_freq_cutoff
strain_tilde *= (psd * norm) ** (-0.5)
strain_pad = strain_tilde.to_timeseries()
if output_intermediates:
strain_pad[pad_start:pad_end].save_to_wav('strain_whitened.wav')
mag = abs(strain_pad[pad_start:pad_end])
if output_intermediates:
mag.save('strain_whitened_mag.npy')
mag = mag.numpy()
# remove strain corrupted by filters at the ends
mag[0:corrupt_length] = 0
mag[-1:-corrupt_length-1:-1] = 0
# find peaks and their times
indices = numpy.where(mag > threshold)[0]
cluster_idx = pycbc.events.findchirp_cluster_over_window(
indices, numpy.array(mag[indices]),
int(cluster_window*strain.sample_rate))
times = [idx * strain.delta_t + strain.start_time \
for idx in indices[cluster_idx]]
return times
def from_cli(opt, dyn_range_fac=1, precision='single',
inj_filter_rejector=None):
"""Parses the CLI options related to strain data reading and conditioning.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (gps-start-time, gps-end-time, strain-high-pass,
pad-data, sample-rate, (frame-cache or frame-files), channel-name,
fake-strain, fake-strain-seed, fake-strain-from-file, gating_file).
dyn_range_fac : {float, 1}, optional
A large constant to reduce the dynamic range of the strain.
precision : string
Precision of the returned strain ('single' or 'double').
inj_filter_rejector : InjFilterRejector instance; optional, default=None
If given send the InjFilterRejector instance to the inject module so
that it can store a reduced representation of injections if
necessary.
Returns
-------
strain : TimeSeries
The time series containing the conditioned strain data.
"""
gating_info = {}
injector = InjectionSet.from_cli(opt)
if opt.frame_cache or opt.frame_files or opt.frame_type or opt.hdf_store:
if opt.frame_cache:
frame_source = opt.frame_cache
if opt.frame_files:
frame_source = opt.frame_files
logger.info("Reading Frames")
if hasattr(opt, 'frame_sieve') and opt.frame_sieve:
sieve = opt.frame_sieve
else:
sieve = None
if opt.frame_type:
strain = pycbc.frame.query_and_read_frame(
opt.frame_type, opt.channel_name,
start_time=opt.gps_start_time-opt.pad_data,
end_time=opt.gps_end_time+opt.pad_data,
sieve=sieve)
elif opt.frame_files or opt.frame_cache:
strain = pycbc.frame.read_frame(
frame_source, opt.channel_name,
start_time=opt.gps_start_time-opt.pad_data,
end_time=opt.gps_end_time+opt.pad_data,
sieve=sieve)
elif opt.hdf_store:
strain = pycbc.frame.read_store(opt.hdf_store, opt.channel_name,
opt.gps_start_time - opt.pad_data,
opt.gps_end_time + opt.pad_data)
elif opt.fake_strain or opt.fake_strain_from_file:
logger.info("Generating Fake Strain")
duration = opt.gps_end_time - opt.gps_start_time
duration += 2 * opt.pad_data
pdf = 1.0 / opt.fake_strain_filter_duration
fake_flow = opt.fake_strain_flow
fake_rate = opt.fake_strain_sample_rate
fake_extra_args = opt.fake_strain_extra_args
plen = round(opt.sample_rate / pdf) // 2 + 1
if opt.fake_strain_from_file:
logger.info("Reading ASD from file")
strain_psd = pycbc.psd.from_txt(opt.fake_strain_from_file,
plen, pdf,
fake_flow,
is_asd_file=True)
elif opt.fake_strain != 'zeroNoise':
logger.info("Making PSD for strain")
strain_psd = pycbc.psd.from_string(opt.fake_strain, plen, pdf,
fake_flow, **fake_extra_args)
if opt.fake_strain == 'zeroNoise':
logger.info("Making zero-noise time series")
strain = TimeSeries(pycbc.types.zeros(duration * fake_rate),
delta_t=1.0 / fake_rate,
epoch=opt.gps_start_time - opt.pad_data)
else:
logger.info("Making colored noise")
from pycbc.noise.reproduceable import colored_noise
strain = colored_noise(strain_psd,
opt.gps_start_time - opt.pad_data,
opt.gps_end_time + opt.pad_data,
seed=opt.fake_strain_seed,
sample_rate=fake_rate,
low_frequency_cutoff=fake_flow,
filter_duration=1.0/pdf)
if not strain.sample_rate_close(fake_rate):
err_msg = "Actual sample rate of generated data does not match "
err_msg += "that expected. Possible causes of this:\n"
err_msg += "The desired duration is not a multiple of delta_t. "
err_msg += "e.g. If using LISA with delta_t = 15 the duration "
err_msg += "must be a multiple of 15 seconds."
raise ValueError(err_msg)
if not opt.channel_name and (opt.injection_file \
or opt.sgburst_injection_file):
raise ValueError('Please provide channel names with the format '
'ifo:channel (e.g. H1:CALIB-STRAIN) to inject '
'simulated signals into fake strain')
if opt.zpk_z and opt.zpk_p and opt.zpk_k:
logger.info("Highpass Filtering")
strain = highpass(strain, frequency=opt.strain_high_pass)
logger.info("Applying zpk filter")
z = numpy.array(opt.zpk_z)
p = numpy.array(opt.zpk_p)
k = float(opt.zpk_k)
strain = filter_zpk(strain.astype(numpy.float64), z, p, k)
if opt.normalize_strain:
logger.info("Dividing strain by constant")
l = opt.normalize_strain
strain = strain / l
if opt.strain_high_pass:
logger.info("Highpass Filtering")
strain = highpass(strain, frequency=opt.strain_high_pass)
if opt.sample_rate:
logger.info("Resampling data")
strain = resample_to_delta_t(strain,
1. / opt.sample_rate,
method='ldas')
if injector is not None:
logger.info("Applying injections")
injections = \
injector.apply(strain, opt.channel_name.split(':')[0],
distance_scale=opt.injection_scale_factor,
injection_sample_rate=opt.injection_sample_rate,
inj_filter_rejector=inj_filter_rejector)
if opt.sgburst_injection_file:
logger.info("Applying sine-Gaussian burst injections")
injector = SGBurstInjectionSet(opt.sgburst_injection_file)
injector.apply(strain, opt.channel_name.split(':')[0],
distance_scale=opt.injection_scale_factor)
if precision == 'single':
logger.info("Converting to float32")
strain = (strain * dyn_range_fac).astype(pycbc.types.float32)
elif precision == "double":
logger.info("Converting to float64")
strain = (strain * dyn_range_fac).astype(pycbc.types.float64)
else:
raise ValueError("Unrecognized precision {}".format(precision))
if opt.gating_file is not None:
logger.info("Gating times contained in gating file")
gate_params = numpy.loadtxt(opt.gating_file)
if len(gate_params.shape) == 1:
gate_params = [gate_params]
for gate_time, gate_window, gate_taper in gate_params:
strain = strain.gate(gate_time, window=gate_window,
method=opt.gating_method,
copy=False,
taper_width=gate_taper)
gating_info['file'] = \
[gp for gp in gate_params \
if (gp[0] + gp[1] + gp[2] >= strain.start_time) \
and (gp[0] - gp[1] - gp[2] <= strain.end_time)]
if opt.autogating_threshold is not None:
gating_info['auto'] = []
for _ in range(opt.autogating_max_iterations):
glitch_times = detect_loud_glitches(
strain, threshold=opt.autogating_threshold,
cluster_window=opt.autogating_cluster,
low_freq_cutoff=opt.strain_high_pass,
corrupt_time=opt.pad_data + opt.autogating_pad)
gate_params = [[gt, opt.autogating_width, opt.autogating_taper]
for gt in glitch_times]
gating_info['auto'] += gate_params
for gate_time, gate_window, gate_taper in gate_params:
strain = strain.gate(gate_time, window=gate_window,
method=opt.gating_method,
copy=False,
taper_width=gate_taper)
if len(glitch_times) > 0:
logger.info('Autogating at %s',
', '.join(['%.3f' % gt
for gt in glitch_times]))
else:
break
if opt.strain_high_pass:
logger.info("Highpass Filtering")
strain = highpass(strain, frequency=opt.strain_high_pass)
if opt.strain_low_pass:
logger.info("Lowpass Filtering")
strain = lowpass(strain, frequency=opt.strain_low_pass)
if hasattr(opt, 'witness_frame_type') and opt.witness_frame_type:
stilde = strain.to_frequencyseries()
from pycbc.io.hdf import HFile
tf_file = HFile(opt.witness_tf_file)
for key in tf_file:
witness = pycbc.frame.query_and_read_frame(opt.witness_frame_type,
str(key),
start_time=strain.start_time,
end_time=strain.end_time)
witness = (witness * dyn_range_fac).astype(strain.dtype)
tf = pycbc.types.load_frequencyseries(opt.witness_tf_file,
group=key)
tf = tf.astype(stilde.dtype)
flen = int(opt.witness_filter_length * strain.sample_rate)
tf = pycbc.psd.interpolate(tf, stilde.delta_f)
tf_time = tf.to_timeseries()
window = Array(numpy.hanning(flen * 2), dtype=strain.dtype)
tf_time[0:flen] *= window[flen:]
tf_time[len(tf_time)-flen:] *= window[0:flen]
tf = tf_time.to_frequencyseries()
kmax = min(len(tf), len(stilde) - 1)
stilde[:kmax] -= tf[:kmax] * witness.to_frequencyseries()[:kmax]
strain = stilde.to_timeseries()
if opt.pad_data:
logger.info("Remove Padding")
start = int(opt.pad_data * strain.sample_rate)
end = int(len(strain) - strain.sample_rate * opt.pad_data)
strain = strain[start:end]
if opt.taper_data:
logger.info("Tapering data")
# Use auto-gating, a one-sided gate is a taper
pd_taper_window = opt.taper_data
gate_params = [(strain.start_time, 0., pd_taper_window)]
gate_params.append((strain.end_time, 0., pd_taper_window))
gate_data(strain, gate_params)
if injector is not None:
strain.injections = injections
strain.gating_info = gating_info
return strain
def from_cli_single_ifo(opt, ifo, inj_filter_rejector=None, **kwargs):
"""
Get the strain for a single ifo when using the multi-detector CLI
"""
single_det_opt = copy_opts_for_single_ifo(opt, ifo)
return from_cli(single_det_opt,
inj_filter_rejector=inj_filter_rejector, **kwargs)
def from_cli_multi_ifos(opt, ifos, inj_filter_rejector_dict=None, **kwargs):
"""
Get the strain for all ifos when using the multi-detector CLI
"""
strain = {}
if inj_filter_rejector_dict is None:
inj_filter_rejector_dict = {ifo: None for ifo in ifos}
for ifo in ifos:
strain[ifo] = from_cli_single_ifo(opt, ifo,
inj_filter_rejector_dict[ifo], **kwargs)
return strain
def insert_strain_option_group(parser, gps_times=True):
""" Add strain-related options to the optparser object.
Adds the options used to call the pycbc.strain.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance.
gps_times : bool, optional
Include ``--gps-start-time`` and ``--gps-end-time`` options. Default
is True.
"""
data_reading_group = parser.add_argument_group("Options for obtaining h(t)",
"These options are used for generating h(t) either by "
"reading from a file or by generating it. This is only "
"needed if the PSD is to be estimated from the data, ie. "
" if the --psd-estimation option is given.")
# Required options
if gps_times:
data_reading_group.add_argument("--gps-start-time",
help="The gps start time of the data "
"(integer seconds)", type=int)
data_reading_group.add_argument("--gps-end-time",
help="The gps end time of the data "
"(integer seconds)", type=int)
data_reading_group.add_argument("--strain-high-pass", type=float,
help="High pass frequency")
data_reading_group.add_argument("--strain-low-pass", type=float,
help="Low pass frequency")
data_reading_group.add_argument("--pad-data", default=8,
help="Extra padding to remove highpass corruption "
"(integer seconds, default 8)", type=int)
data_reading_group.add_argument("--taper-data",
help="Taper ends of data to zero using the supplied length as a "
"window (integer seconds)", type=int, default=0)
data_reading_group.add_argument("--sample-rate", type=float,
help="The sample rate to use for h(t) generation (integer Hz)")
data_reading_group.add_argument("--channel-name", type=str,
help="The channel containing the gravitational strain data")
# Read from cache file
data_reading_group.add_argument("--frame-cache", type=str, nargs="+",
help="Cache file containing the frame locations.")
# Read from frame files
data_reading_group.add_argument("--frame-files",
type=str, nargs="+",
help="list of frame files")
# Read from hdf store file
data_reading_group.add_argument("--hdf-store",
type=str,
help="Store of time series data in hdf format")
# Use datafind to get frame files
data_reading_group.add_argument("--frame-type",
type=str,
metavar="S:TYPE",
help="(optional), replaces frame-files. Use datafind "
"to get the needed frame file(s) of this type "
"from site S.")
# Filter frame files by URL
data_reading_group.add_argument("--frame-sieve",
type=str,
help="(optional), Only use frame files where the "
"URL matches the regular expression given.")
# Generate gaussian noise with given psd
data_reading_group.add_argument("--fake-strain",
help="Name of model PSD for generating fake gaussian noise.",
choices=pycbc.psd.get_psd_model_list() + ['zeroNoise'])
data_reading_group.add_argument("--fake-strain-extra-args",
nargs='+', action=DictOptionAction,
metavar='PARAM:VALUE', default={}, type=float,
help="(optional) Extra arguments passed to "
"the PSD models.")
data_reading_group.add_argument("--fake-strain-seed", type=int, default=0,
help="Seed value for the generation of fake colored"
" gaussian noise")
data_reading_group.add_argument("--fake-strain-from-file",
help="File containing ASD for generating fake noise from it.")
data_reading_group.add_argument("--fake-strain-flow",
default=1.0, type=float,
help="Low frequency cutoff of the fake strain")
data_reading_group.add_argument("--fake-strain-filter-duration",
default=128.0, type=float,
help="Duration in seconds of the fake data coloring filter")
data_reading_group.add_argument("--fake-strain-sample-rate",
default=16384, type=float,
help="Sample rate of the fake data generation")
# Injection options
data_reading_group.add_argument("--injection-file", type=str,
help="(optional) Injection file containing parameters"
" of CBC signals to be added to the strain")
data_reading_group.add_argument("--sgburst-injection-file", type=str,
help="(optional) Injection file containing parameters"
"of sine-Gaussian burst signals to add to the strain")
data_reading_group.add_argument("--injection-scale-factor", type=float,
default=1,
help="Divide injections by this factor "
"before adding to the strain data")
data_reading_group.add_argument("--injection-sample-rate", type=float,
help="Sample rate to use for injections (integer Hz). "
"Typically similar to the strain data sample rate."
"If not provided, the strain sample rate will be "
"used")
data_reading_group.add_argument("--injection-f-ref", type=float,
help="Reference frequency in Hz for creating CBC "
"injections from an XML file")
data_reading_group.add_argument("--injection-f-final", type=float,
help="Override the f_final field of a CBC XML "
"injection file (frequency in Hz)")
# Gating options
data_reading_group.add_argument("--gating-file", type=str,
help="(optional) Text file of gating segments to apply."
" Format of each line is (all values in seconds):"
" gps_time zeros_half_width pad_half_width")
data_reading_group.add_argument('--autogating-threshold', type=float,
metavar='SIGMA',
help='If given, find and gate glitches '
'producing a deviation larger than '
'SIGMA in the whitened strain time '
'series.')
data_reading_group.add_argument('--autogating-max-iterations', type=int,
metavar='SIGMA', default=1,
help='If given, iteratively apply '
'autogating')
data_reading_group.add_argument('--autogating-cluster', type=float,
metavar='SECONDS', default=5.,
help='Length of clustering window for '
'detecting glitches for autogating.')
data_reading_group.add_argument('--autogating-width', type=float,
metavar='SECONDS', default=0.25,
help='Half-width of the gating window.')
data_reading_group.add_argument('--autogating-taper', type=float,
metavar='SECONDS', default=0.25,
help='Taper the strain before and after '
'each gating window over a duration '
'of SECONDS.')
data_reading_group.add_argument('--autogating-pad', type=float,
metavar='SECONDS', default=16,
help='Ignore the given length of whitened '
'strain at the ends of a segment, to '
'avoid filters ringing.')
data_reading_group.add_argument('--gating-method', type=str,
default='taper',
help='Choose the method for gating. '
'Default: `taper`',
choices=['hard', 'taper', 'paint'])
# Optional
data_reading_group.add_argument("--normalize-strain", type=float,
help="(optional) Divide frame data by constant.")
data_reading_group.add_argument("--zpk-z", type=float, nargs="+",
help="(optional) Zero-pole-gain (zpk) filter strain. "
"A list of zeros for transfer function")
data_reading_group.add_argument("--zpk-p", type=float, nargs="+",
help="(optional) Zero-pole-gain (zpk) filter strain. "
"A list of poles for transfer function")
data_reading_group.add_argument("--zpk-k", type=float,
help="(optional) Zero-pole-gain (zpk) filter strain. "
"Transfer function gain")
# Options to apply to subtract noise from a witness channel and known
# transfer function.
data_reading_group.add_argument("--witness-frame-type", type=str,
help="(optional), frame type which will be use to query the"
" witness channel data.")
data_reading_group.add_argument("--witness-tf-file", type=str,
help="an hdf file containing the transfer"
" functions and the associated channel names")
data_reading_group.add_argument("--witness-filter-length", type=float,
help="filter length in seconds for the transfer function")
return data_reading_group
# FIXME: This repeats almost all of the options above. Any nice way of reducing
# this?
def insert_strain_option_group_multi_ifo(parser, gps_times=True):
"""
Adds the options used to call the pycbc.strain.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance.
gps_times : bool, optional
Include ``--gps-start-time`` and ``--gps-end-time`` options. Default
is True.
"""
data_reading_group_multi = parser.add_argument_group("Options for obtaining"
" h(t)",
"These options are used for generating h(t) either by "
"reading from a file or by generating it. This is only "
"needed if the PSD is to be estimated from the data, ie. "
"if the --psd-estimation option is given. This group "
"supports reading from multiple ifos simultaneously.")
# Required options
if gps_times:
data_reading_group_multi.add_argument(
"--gps-start-time", nargs='+', action=MultiDetOptionAction,
metavar='IFO:TIME', type=int,
help="The gps start time of the data (integer seconds)")
data_reading_group_multi.add_argument(
"--gps-end-time", nargs='+', action=MultiDetOptionAction,
metavar='IFO:TIME', type=int,
help="The gps end time of the data (integer seconds)")
data_reading_group_multi.add_argument("--strain-high-pass", nargs='+',
action=MultiDetOptionAction,
type=float, metavar='IFO:FREQUENCY',
help="High pass frequency")
data_reading_group_multi.add_argument("--strain-low-pass", nargs='+',
action=MultiDetOptionAction,
type=float, metavar='IFO:FREQUENCY',
help="Low pass frequency")
data_reading_group_multi.add_argument("--pad-data", nargs='+', default=8,
action=MultiDetOptionAction,
type=int, metavar='IFO:LENGTH',
help="Extra padding to remove highpass corruption "
"(integer seconds, default 8)")
data_reading_group_multi.add_argument("--taper-data", nargs='+',
action=MultiDetOptionAction,
type=int, default=0, metavar='IFO:LENGTH',
help="Taper ends of data to zero using the "
"supplied length as a window (integer seconds)")
data_reading_group_multi.add_argument("--sample-rate", type=float,
nargs='+',
action=MultiDetOptionAction, metavar='IFO:RATE',
help="The sample rate to use for h(t) generation "
" (integer Hz).")
data_reading_group_multi.add_argument("--channel-name", type=str, nargs='+',
action=MultiDetOptionActionSpecial,
metavar='IFO:CHANNEL',
help="The channel containing the gravitational "
"strain data")
# Read from cache file
data_reading_group_multi.add_argument("--frame-cache", type=str, nargs="+",
action=MultiDetOptionAppendAction,
metavar='IFO:FRAME_CACHE',
help="Cache file containing the frame locations.")
# Read from frame files
data_reading_group_multi.add_argument("--frame-files", type=str, nargs="+",
action=MultiDetOptionAppendAction,
metavar='IFO:FRAME_FILES',
help="list of frame files")
# Read from hdf store file
data_reading_group_multi.add_argument("--hdf-store", type=str, nargs='+',
action=MultiDetOptionAction,
metavar='IFO:HDF_STORE_FILE',
help="Store of time series data in hdf format")
# Use datafind to get frame files
data_reading_group_multi.add_argument("--frame-type", type=str, nargs="+",
action=MultiDetOptionActionSpecial,
metavar='IFO:FRAME_TYPE',
help="(optional) Replaces frame-files. "
"Use datafind to get the needed frame "
"file(s) of this type.")
# Filter frame files by URL
data_reading_group_multi.add_argument("--frame-sieve", type=str, nargs="+",
action=MultiDetOptionAction,
metavar='IFO:FRAME_SIEVE',
help="(optional), Only use frame files where the "
"URL matches the regular expression given.")
# Generate gaussian noise with given psd
data_reading_group_multi.add_argument("--fake-strain", type=str, nargs="+",
action=MultiDetOptionAction, metavar='IFO:CHOICE',
help="Name of model PSD for generating fake "
"gaussian noise. Choose from %s or zeroNoise" \
%((', ').join(pycbc.psd.get_lalsim_psd_list()),) )
data_reading_group_multi.add_argument("--fake-strain-extra-args",
nargs='+', action=MultiDetDictOptionAction,
metavar='DETECTOR:PARAM:VALUE', default={},
type=float, help="(optional) Extra arguments "
"passed to the PSD models.")
data_reading_group_multi.add_argument("--fake-strain-seed", type=int,
default=0, nargs="+", action=MultiDetOptionAction,
metavar='IFO:SEED',
help="Seed value for the generation of fake "
"colored gaussian noise")
data_reading_group_multi.add_argument("--fake-strain-from-file", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="File containing ASD for generating fake "
"noise from it.")
data_reading_group_multi.add_argument("--fake-strain-flow",
default=1.0, type=float,
nargs="+", action=MultiDetOptionAction,
help="Low frequency cutoff of the fake strain")
data_reading_group_multi.add_argument("--fake-strain-filter-duration",
default=128.0, type=float,
nargs="+", action=MultiDetOptionAction,
help="Duration in seconds of the fake data coloring filter")
data_reading_group_multi.add_argument("--fake-strain-sample-rate",
default=16384, type=float,
nargs="+", action=MultiDetOptionAction,
help="Sample rate of the fake data generation")
# Injection options
data_reading_group_multi.add_argument("--injection-file", type=str,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:FILE',
help="(optional) Injection file containing parameters"
"of CBC signals to be added to the strain")
data_reading_group_multi.add_argument("--sgburst-injection-file", type=str,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:FILE',
help="(optional) Injection file containing parameters"
"of sine-Gaussian burst signals to add to the strain")
data_reading_group_multi.add_argument("--injection-scale-factor",
type=float, nargs="+", action=MultiDetOptionAction,
metavar="IFO:VAL", default=1.,
help="Divide injections by this factor "
"before adding to the strain data")
data_reading_group_multi.add_argument("--injection-sample-rate",
type=float, nargs="+", action=MultiDetOptionAction,
metavar="IFO:VAL",
help="Sample rate to use for injections (integer Hz). "
"Typically similar to the strain data sample rate."
"If not provided, the strain sample rate will be "
"used")
data_reading_group_multi.add_argument("--injection-f-ref", type=float,
action=MultiDetOptionAction, metavar='IFO:VALUE',
help="Reference frequency in Hz for creating CBC "
"injections from an XML file")
data_reading_group_multi.add_argument('--injection-f-final', type=float,
action=MultiDetOptionAction, metavar='IFO:VALUE',
help="Override the f_final field of a CBC XML "
"injection file (frequency in Hz)")
# Gating options
data_reading_group_multi.add_argument("--gating-file", nargs="+",
action=MultiDetOptionAction,
metavar='IFO:FILE',
help='(optional) Text file of gating segments to apply.'
' Format of each line (units s) :'
' gps_time zeros_half_width pad_half_width')
data_reading_group_multi.add_argument('--autogating-threshold', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SIGMA',
help='If given, find and gate glitches producing a '
'deviation larger than SIGMA in the whitened strain'
' time series')
data_reading_group_multi.add_argument('--autogating-max-iterations', type=int,
metavar='SIGMA', default=1,
help='If given, iteratively apply '
'autogating')
data_reading_group_multi.add_argument('--autogating-cluster', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=5.,
help='Length of clustering window for '
'detecting glitches for autogating.')
data_reading_group_multi.add_argument('--autogating-width', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=0.25,
help='Half-width of the gating window.')
data_reading_group_multi.add_argument('--autogating-taper', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=0.25,
help='Taper the strain before and after '
'each gating window over a duration '
'of SECONDS.')
data_reading_group_multi.add_argument('--autogating-pad', type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:SECONDS', default=16,
help='Ignore the given length of whitened '
'strain at the ends of a segment, to '
'avoid filters ringing.')
data_reading_group_multi.add_argument('--gating-method', type=str,
nargs='+', action=MultiDetOptionAction,
default='taper',
help='Choose the method for gating. '
'Default: `taper`',
choices=['hard', 'taper', 'paint'])
# Optional
data_reading_group_multi.add_argument("--normalize-strain", type=float,
nargs="+", action=MultiDetOptionAction,
metavar='IFO:VALUE',
help="(optional) Divide frame data by constant.")
data_reading_group_multi.add_argument("--zpk-z", type=float,
nargs="+", action=MultiDetOptionAppendAction,
metavar='IFO:VALUE',
help="(optional) Zero-pole-gain (zpk) filter strain. "
"A list of zeros for transfer function")
data_reading_group_multi.add_argument("--zpk-p", type=float,
nargs="+", action=MultiDetOptionAppendAction,
metavar='IFO:VALUE',
help="(optional) Zero-pole-gain (zpk) filter strain. "
"A list of poles for transfer function")
data_reading_group_multi.add_argument("--zpk-k", type=float,
nargs="+", action=MultiDetOptionAppendAction,
metavar='IFO:VALUE',
help="(optional) Zero-pole-gain (zpk) filter strain. "
"Transfer function gain")
return data_reading_group_multi
ensure_one_opt_groups = []
ensure_one_opt_groups.append(['--frame-cache','--fake-strain',
'--fake-strain-from-file',
'--frame-files', '--frame-type',
'--hdf-store'])
required_opts_list = ['--gps-start-time', '--gps-end-time',
'--pad-data', '--sample-rate',
'--channel-name']
def verify_strain_options(opts, parser):
"""Sanity check provided strain arguments.
Parses the strain data CLI options and verifies that they are consistent
and reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (gps-start-time, gps-end-time, strain-high-pass,
pad-data, sample-rate, frame-cache, channel-name, fake-strain,
fake-strain-seed).
parser : object
OptionParser instance.
"""
for opt_group in ensure_one_opt_groups:
ensure_one_opt(opts, parser, opt_group)
required_opts(opts, parser, required_opts_list)
def verify_strain_options_multi_ifo(opts, parser, ifos):
"""Sanity check provided strain arguments.
Parses the strain data CLI options and verifies that they are consistent
and reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (gps-start-time, gps-end-time, strain-high-pass,
pad-data, sample-rate, frame-cache, channel-name, fake-strain,
fake-strain-seed).
parser : object
OptionParser instance.
ifos : list of strings
List of ifos for which to verify options for
"""
for ifo in ifos:
for opt_group in ensure_one_opt_groups:
ensure_one_opt_multi_ifo(opts, parser, ifo, opt_group)
required_opts_multi_ifo(opts, parser, ifo, required_opts_list)
def gate_data(data, gate_params):
"""Apply a set of gating windows to a time series.
Each gating window is
defined by a central time, a given duration (centered on the given
time) to zero out, and a given duration of smooth tapering on each side of
the window. The window function used for tapering is a Tukey window.
Parameters
----------
data : TimeSeries
The time series to be gated.
gate_params : list
List of parameters for the gating windows. Each element should be a
list or tuple with 3 elements: the central time of the gating window,
the half-duration of the portion to zero out, and the duration of the
Tukey tapering on each side. All times in seconds. The total duration
of the data affected by one gating window is thus twice the second
parameter plus twice the third parameter.
Returns
-------
data: TimeSeries
The gated time series.
"""
def inverted_tukey(M, n_pad):
midlen = M - 2*n_pad
if midlen < 0:
raise ValueError("No zeros left after applying padding.")
padarr = 0.5*(1.+numpy.cos(numpy.pi*numpy.arange(n_pad)/n_pad))
return numpy.concatenate((padarr,numpy.zeros(midlen),padarr[::-1]))
sample_rate = 1./data.delta_t
temp = data.data
for glitch_time, glitch_width, pad_width in gate_params:
t_start = glitch_time - glitch_width - pad_width - data.start_time
t_end = glitch_time + glitch_width + pad_width - data.start_time
if t_start > data.duration or t_end < 0.:
continue # Skip gate segments that don't overlap
win_samples = int(2*sample_rate*(glitch_width+pad_width))
pad_samples = int(sample_rate*pad_width)
window = inverted_tukey(win_samples, pad_samples)
offset = int(t_start * sample_rate)
idx1 = max(0, -offset)
idx2 = min(len(window), len(data)-offset)
temp[idx1+offset:idx2+offset] *= window[idx1:idx2]
return data
class StrainSegments(object):
""" Class for managing manipulation of strain data for the purpose of
matched filtering. This includes methods for segmenting and
conditioning.
"""
def __init__(self, strain, segment_length=None, segment_start_pad=0,
segment_end_pad=0, trigger_start=None, trigger_end=None,
filter_inj_only=False, injection_window=None,
allow_zero_padding=False):
""" Determine how to chop up the strain data into smaller segments
for analysis.
"""
self._fourier_segments = None
self.strain = strain
self.delta_t = strain.delta_t
self.sample_rate = strain.sample_rate
if segment_length:
seg_len = segment_length
else:
seg_len = strain.duration
self.delta_f = 1.0 / seg_len
self.time_len = int(seg_len * self.sample_rate)
self.freq_len = self.time_len // 2 + 1
seg_end_pad = segment_end_pad
seg_start_pad = segment_start_pad
if not trigger_start:
trigger_start = int(strain.start_time) + segment_start_pad
else:
if not allow_zero_padding:
min_start_time = int(strain.start_time) + segment_start_pad
else:
min_start_time = int(strain.start_time)
if trigger_start < min_start_time:
err_msg = "Trigger start time must be within analysable "
err_msg += "window. Asked to start from %d " %(trigger_start)
err_msg += "but can only analyse from %d." %(min_start_time)
raise ValueError(err_msg)
if not trigger_end:
trigger_end = int(strain.end_time) - segment_end_pad
else:
if not allow_zero_padding:
max_end_time = int(strain.end_time) - segment_end_pad
else:
max_end_time = int(strain.end_time)
if trigger_end > max_end_time:
err_msg = "Trigger end time must be within analysable "
err_msg += "window. Asked to end at %d " %(trigger_end)
err_msg += "but can only analyse to %d." %(max_end_time)
raise ValueError(err_msg)
throwaway_size = seg_start_pad + seg_end_pad
seg_width = seg_len - throwaway_size
# The amount of time we can actually analyze given the
# amount of padding that is needed
analyzable = trigger_end - trigger_start
data_start = (trigger_start - segment_start_pad) - \
int(strain.start_time)
data_end = trigger_end + segment_end_pad - int(strain.start_time)
data_dur = data_end - data_start
data_start = data_start * strain.sample_rate
data_end = data_end * strain.sample_rate
#number of segments we need to analyze this data
num_segs = int(numpy.ceil(float(analyzable) / float(seg_width)))
# The offset we will use between segments
seg_offset = int(numpy.ceil(analyzable / float(num_segs)))
self.segment_slices = []
self.analyze_slices = []
# Determine how to chop up the strain into smaller segments
for nseg in range(num_segs-1):
# boundaries for time slices into the strain
seg_start = int(data_start + (nseg*seg_offset) * strain.sample_rate)
seg_end = int(seg_start + seg_len * strain.sample_rate)
seg_slice = slice(seg_start, seg_end)
self.segment_slices.append(seg_slice)
# boundaries for the analyzable portion of the segment
ana_start = int(seg_start_pad * strain.sample_rate)
ana_end = int(ana_start + seg_offset * strain.sample_rate)
ana_slice = slice(ana_start, ana_end)
self.analyze_slices.append(ana_slice)
# The last segment takes up any integer boundary slop
seg_end = int(data_end)
seg_start = int(seg_end - seg_len * strain.sample_rate)
seg_slice = slice(seg_start, seg_end)
self.segment_slices.append(seg_slice)
remaining = (data_dur - ((num_segs - 1) * seg_offset + seg_start_pad))
ana_start = int((seg_len - remaining) * strain.sample_rate)
ana_end = int((seg_len - seg_end_pad) * strain.sample_rate)
ana_slice = slice(ana_start, ana_end)
self.analyze_slices.append(ana_slice)
self.full_segment_slices = copy.deepcopy(self.segment_slices)
#Remove segments that are outside trig start and end
segment_slices_red = []
analyze_slices_red = []
trig_start_idx = (trigger_start - int(strain.start_time)) * strain.sample_rate
trig_end_idx = (trigger_end - int(strain.start_time)) * strain.sample_rate
if filter_inj_only and hasattr(strain, 'injections'):
end_times = strain.injections.end_times()
end_times = [time for time in end_times if float(time) < trigger_end and float(time) > trigger_start]
inj_idx = [(float(time) - float(strain.start_time)) * strain.sample_rate for time in end_times]
for seg, ana in zip(self.segment_slices, self.analyze_slices):
start = ana.start
stop = ana.stop
cum_start = start + seg.start
cum_end = stop + seg.start
# adjust first segment
if trig_start_idx > cum_start:
start += (trig_start_idx - cum_start)
# adjust last segment
if trig_end_idx < cum_end:
stop -= (cum_end - trig_end_idx)
if filter_inj_only and hasattr(strain, 'injections'):
analyze_this = False
inj_window = strain.sample_rate * 8
for inj_id in inj_idx:
if inj_id < (cum_end + inj_window) and \
inj_id > (cum_start - inj_window):
analyze_this = True
if not analyze_this:
continue
if start < stop:
segment_slices_red.append(seg)
analyze_slices_red.append(slice(start, stop))
self.segment_slices = segment_slices_red
self.analyze_slices = analyze_slices_red
def fourier_segments(self):
""" Return a list of the FFT'd segments.
Return the list of FrequencySeries. Additional properties are
added that describe the strain segment. The property 'analyze'
is a slice corresponding to the portion of the time domain equivalent
of the segment to analyze for triggers. The value 'cumulative_index'
indexes from the beginning of the original strain series.
"""
if not self._fourier_segments:
self._fourier_segments = []
for seg_slice, ana in zip(self.segment_slices, self.analyze_slices):
if seg_slice.start >= 0 and seg_slice.stop <= len(self.strain):
freq_seg = make_frequency_series(self.strain[seg_slice])
# Assume that we cannot have a case where we both zero-pad on
# both sides
elif seg_slice.start < 0:
strain_chunk = self.strain[:seg_slice.stop]
strain_chunk.prepend_zeros(-seg_slice.start)
freq_seg = make_frequency_series(strain_chunk)
elif seg_slice.stop > len(self.strain):
strain_chunk = self.strain[seg_slice.start:]
strain_chunk.append_zeros(seg_slice.stop - len(self.strain))
freq_seg = make_frequency_series(strain_chunk)
freq_seg.analyze = ana
freq_seg.cumulative_index = seg_slice.start + ana.start
freq_seg.seg_slice = seg_slice
self._fourier_segments.append(freq_seg)
return self._fourier_segments
@classmethod
def from_cli(cls, opt, strain):
"""Calculate the segmentation of the strain data for analysis from
the command line options.
"""
return cls(strain, segment_length=opt.segment_length,
segment_start_pad=opt.segment_start_pad,
segment_end_pad=opt.segment_end_pad,
trigger_start=opt.trig_start_time,
trigger_end=opt.trig_end_time,
filter_inj_only=opt.filter_inj_only,
injection_window=opt.injection_window,
allow_zero_padding=opt.allow_zero_padding)
@classmethod
def insert_segment_option_group(cls, parser):
segment_group = parser.add_argument_group(
"Options for segmenting the strain",
"These options are used to determine how to "
"segment the strain into smaller chunks, "
"and for determining the portion of each to "
"analyze for triggers. ")
segment_group.add_argument("--trig-start-time", type=int, default=0,
help="(optional) The gps time to start recording triggers")
segment_group.add_argument("--trig-end-time", type=int, default=0,
help="(optional) The gps time to stop recording triggers")
segment_group.add_argument("--segment-length", type=int,
help="The length of each strain segment in seconds.")
segment_group.add_argument("--segment-start-pad", type=int,
help="The time in seconds to ignore of the "
"beginning of each segment in seconds. ")
segment_group.add_argument("--segment-end-pad", type=int,
help="The time in seconds to ignore at the "
"end of each segment in seconds.")
segment_group.add_argument("--allow-zero-padding", action='store_true',
help="Allow for zero padding of data to "
"analyze requested times, if needed.")
# Injection optimization options
segment_group.add_argument("--filter-inj-only", action='store_true',
help="Analyze only segments that contain an injection.")
segment_group.add_argument("--injection-window", default=None,
type=float, help="""If using --filter-inj-only then
only search for injections within +/- injection
window of the injections's end time. This is useful
to speed up a coherent search or a search where we
initially filter at lower sample rate, and then
filter at full rate where needed. NOTE: Reverts to
full analysis if two injections are in the same
segment.""")
@classmethod
def from_cli_single_ifo(cls, opt, strain, ifo):
"""Calculate the segmentation of the strain data for analysis from
the command line options.
"""
return cls(strain, segment_length=opt.segment_length[ifo],
segment_start_pad=opt.segment_start_pad[ifo],
segment_end_pad=opt.segment_end_pad[ifo],
trigger_start=opt.trig_start_time[ifo],
trigger_end=opt.trig_end_time[ifo],
filter_inj_only=opt.filter_inj_only,
allow_zero_padding=opt.allow_zero_padding)
@classmethod
def from_cli_multi_ifos(cls, opt, strain_dict, ifos):
"""Calculate the segmentation of the strain data for analysis from
the command line options.
"""
strain_segments = {}
for ifo in ifos:
strain_segments[ifo] = cls.from_cli_single_ifo(
opt, strain_dict[ifo], ifo)
return strain_segments
@classmethod
def insert_segment_option_group_multi_ifo(cls, parser):
segment_group = parser.add_argument_group(
"Options for segmenting the strain",
"These options are used to determine how to "
"segment the strain into smaller chunks, "
"and for determining the portion of each to "
"analyze for triggers. ")
segment_group.add_argument("--trig-start-time", type=int, default=0,
nargs='+', action=MultiDetOptionAction, metavar='IFO:TIME',
help="(optional) The gps time to start recording triggers")
segment_group.add_argument("--trig-end-time", type=int, default=0,
nargs='+', action=MultiDetOptionAction, metavar='IFO:TIME',
help="(optional) The gps time to stop recording triggers")
segment_group.add_argument("--segment-length", type=int,
nargs='+', action=MultiDetOptionAction,
metavar='IFO:LENGTH',
help="The length of each strain segment in seconds.")
segment_group.add_argument("--segment-start-pad", type=int,
nargs='+', action=MultiDetOptionAction, metavar='IFO:TIME',
help="The time in seconds to ignore of the "
"beginning of each segment in seconds. ")
segment_group.add_argument("--segment-end-pad", type=int,
nargs='+', action=MultiDetOptionAction, metavar='IFO:TIME',
help="The time in seconds to ignore at the "
"end of each segment in seconds.")
segment_group.add_argument("--allow-zero-padding", action='store_true',
help="Allow for zero padding of data to analyze "
"requested times, if needed.")
segment_group.add_argument("--filter-inj-only", action='store_true',
help="Analyze only segments that contain "
"an injection.")
required_opts_list = ['--segment-length',
'--segment-start-pad',
'--segment-end-pad',
]
@classmethod
def verify_segment_options(cls, opt, parser):
required_opts(opt, parser, cls.required_opts_list)
@classmethod
def verify_segment_options_multi_ifo(cls, opt, parser, ifos):
for ifo in ifos:
required_opts_multi_ifo(opt, parser, ifo, cls.required_opts_list)
@functools.lru_cache(maxsize=500)
def create_memory_and_engine_for_class_based_fft(
npoints_time,
dtype,
delta_t=1,
ifft=False,
uid=0
):
""" Create memory and engine for class-based FFT/IFFT
Currently only supports R2C FFT / C2R IFFTs, but this could be expanded
if use-cases arise.
Parameters
----------
npoints_time : int
Number of time samples of the real input vector (or real output vector
if doing an IFFT).
dtype : np.dtype
The dtype for the real input vector (or real output vector if doing an
IFFT). np.float32 or np.float64 I think in all cases.
delta_t : float (default: 1)
delta_t of the real vector. If not given this will be set to 1, and we
will assume it is not needed in the returned TimeSeries/FrequencySeries
ifft : boolean (default: False)
By default will use the FFT class, set to true to use IFFT.
uid : int (default: 0)
Provide a unique identifier. This is used to provide a separate set
of memory in the cache, for instance if calling this from different
codes.
"""
npoints_freq = npoints_time // 2 + 1
delta_f_tmp = 1.0 / (npoints_time * delta_t)
vec = TimeSeries(
zeros(
npoints_time,
dtype=dtype
),
delta_t=delta_t,
copy=False
)
vectilde = FrequencySeries(
zeros(
npoints_freq,
dtype=complex_same_precision_as(vec)
),
delta_f=delta_f_tmp,
copy=False
)
if ifft:
fft_class = IFFT(vectilde, vec)
invec = vectilde
outvec = vec
else:
fft_class = FFT(vec, vectilde)
invec = vec
outvec = vectilde
return invec, outvec, fft_class
def execute_cached_fft(invec_data, normalize_by_rate=True, ifft=False,
copy_output=True, uid=0):
""" Executes a cached FFT
Parameters
-----------
invec_data : Array
Array which will be used as input when fft_class is executed.
normalize_by_rate : boolean (optional, default:False)
If True, then normalize by delta_t (for an FFT) or delta_f (for an
IFFT).
ifft : boolean (optional, default:False)
If true assume this is an IFFT and multiply by delta_f not delta_t.
Will do nothing if normalize_by_rate is False.
copy_output : boolean (optional, default:True)
If True we will copy the output into a new array. This avoids the issue
that calling this function again might overwrite output. However, if
you know that the output array will not be used before this function
might be called again with the same length, then setting this to False
will provide some increase in efficiency. The uid can also be used to
help ensure that data doesn't get unintentionally overwritten!
uid : int (default: 0)
Provide a unique identifier. This is used to provide a separate set
of memory in the cache, for instance if calling this from different
codes.
"""
from pycbc.types import real_same_precision_as
if ifft:
npoints_time = (len(invec_data) - 1) * 2
else:
npoints_time = len(invec_data)
try:
delta_t = invec_data.delta_t
except AttributeError:
if not normalize_by_rate:
# Don't need this
delta_t = 1
else:
raise
dtype = real_same_precision_as(invec_data)
invec, outvec, fft_class = create_memory_and_engine_for_class_based_fft(
npoints_time,
dtype,
delta_t=delta_t,
ifft=ifft,
uid=uid
)
if invec_data is not None:
invec._data[:] = invec_data._data[:]
fft_class.execute()
if normalize_by_rate:
if ifft:
outvec._data *= invec._delta_f
else:
outvec._data *= invec._delta_t
if copy_output:
outvec = outvec.copy()
try:
outvec._epoch = invec_data._epoch
except AttributeError:
pass
return outvec
def execute_cached_ifft(*args, **kwargs):
""" Executes a cached IFFT
Parameters
-----------
invec_data : Array
Array which will be used as input when fft_class is executed.
normalize_by_rate : boolean (optional, default:False)
If True, then normalize by delta_t (for an FFT) or delta_f (for an
IFFT).
copy_output : boolean (optional, default:True)
If True we will copy the output into a new array. This avoids the issue
that calling this function again might overwrite output. However, if
you know that the output array will not be used before this function
might be called again with the same length, then setting this to False
will provide some increase in efficiency. The uid can also be used to
help ensure that data doesn't get unintentionally overwritten!
uid : int (default: 0)
Provide a unique identifier. This is used to provide a separate set
of memory in the cache, for instance if calling this from different
codes.
"""
return execute_cached_fft(*args, **kwargs, ifft=True)
# If using caching we want output to be unique if called at different places
# (and if called from different modules/functions), these unique IDs acheive
# that. The numbers are not significant, only that they are unique.
STRAINBUFFER_UNIQUE_ID_1 = 236546845
STRAINBUFFER_UNIQUE_ID_2 = 778946541
STRAINBUFFER_UNIQUE_ID_3 = 665849947
class StrainBuffer(pycbc.frame.DataBuffer):
def __init__(self, frame_src, channel_name, start_time,
max_buffer,
sample_rate,
low_frequency_cutoff=20,
highpass_frequency=15.0,
highpass_reduction=200.0,
highpass_bandwidth=5.0,
psd_samples=30,
psd_segment_length=4,
psd_inverse_length=3.5,
trim_padding=0.25,
autogating_threshold=None,
autogating_cluster=None,
autogating_pad=None,
autogating_width=None,
autogating_taper=None,
autogating_duration=None,
autogating_psd_segment_length=None,
autogating_psd_stride=None,
state_channel=None,
data_quality_channel=None,
idq_channel=None,
idq_state_channel=None,
idq_threshold=None,
dyn_range_fac=pycbc.DYN_RANGE_FAC,
psd_abort_difference=None,
psd_recalculate_difference=None,
force_update_cache=True,
increment_update_cache=None,
analyze_flags=None,
data_quality_flags=None,
dq_padding=0):
""" Class to produce overwhitened strain incrementally
Parameters
----------
frame_src: str of list of strings
Strings that indicate where to read from files from. This can be a
list of frame files, a glob, etc.
channel_name: str
Name of the channel to read from the frame files
start_time:
Time to start reading from.
max_buffer: int
Length of the strain buffer in seconds.
sample_rate: int, Optional
Rate in Hz to sample the data.
low_frequency_cutoff: {float, 20}, Optional
The low frequency cutoff to use for inverse spectrum truncation
highpass_frequency: {float, 15}, Optional
The frequency to apply a highpass filter at before downsampling.
highpass_reduction: {float, 200}, Optional
The amount of reduction to apply to the low frequencies.
highpass_bandwidth: {float, 5}, Optional
The width of the transition region for the highpass filter.
psd_samples: {int, 30}, Optional
The number of sample to use for psd estimation
psd_segment_length: {float, 4}, Optional
The number of seconds in each psd sample.
psd_inverse_length: {float, 3.5}, Optional
The length in seconds for fourier transform of the inverse of the
PSD to be truncated to.
trim_padding: {float, 0.25}, Optional
Amount of padding in seconds to give for truncated the overwhitened
data stream.
autogating_threshold: float, Optional
Sigma deviation required to cause autogating of data.
If None, no autogating is performed.
autogating_cluster: float, Optional
Seconds to cluster possible gating locations.
autogating_pad: float, Optional
Seconds of corrupted whitened strain to ignore when generating a gate.
autogating_width: float, Optional
Half-duration of the zeroed-out portion of autogates.
autogating_taper: float, Optional
Duration of taper on either side of the gating window in seconds.
autogating_duration: float, Optional
Amount of data in seconds to apply autogating on.
autogating_psd_segment_length: float, Optional
The length in seconds of each segment used to estimate the PSD with Welch's method.
autogating_psd_stride: float, Optional
The overlap in seconds between each segment used to estimate the PSD with Welch's method.
state_channel: {str, None}, Optional
Channel to use for state information about the strain
data_quality_channel: {str, None}, Optional
Channel to use for data quality information about the strain
idq_channel: {str, None}, Optional
Channel to use for idq timeseries
idq_state_channel : {str, None}, Optional
Channel containing information about usability of idq
idq_threshold : float, Optional
Threshold which triggers a veto if iDQ channel falls below this threshold
dyn_range_fac: {float, pycbc.DYN_RANGE_FAC}, Optional
Scale factor to apply to strain
psd_abort_difference: {float, None}, Optional
The relative change in the inspiral range from the previous PSD
estimate to trigger the data to be considered invalid.
psd_recalculate_difference: {float, None}, Optional
the relative change in the inspiral range from the previous PSD
to trigger a re-estimatoin of the PSD.
force_update_cache: {boolean, True}, Optional
Re-check the filesystem for frame files on every attempt to
read more data.
analyze_flags: list of strs
The flags that must be on to mark the current data as valid for
*any* use.
data_quality_flags: list of strs
The flags used to determine if to keep triggers.
dq_padding: {float, 0}, optional
Extra seconds to consider invalid before/after times with bad DQ.
increment_update_cache: {str, None}, Optional
Pattern to look for frame files in a GPS dependent directory. This
is an alternate to the forced updated of the frame cache, and
apptempts to predict the next frame file name without probing the
filesystem.
"""
super(StrainBuffer, self).__init__(frame_src, channel_name, start_time,
max_buffer=max_buffer,
force_update_cache=force_update_cache,
increment_update_cache=increment_update_cache)
self.low_frequency_cutoff = low_frequency_cutoff
# Set up status buffers
self.analyze_flags = analyze_flags
self.data_quality_flags = data_quality_flags
self.state = None
self.dq = None
self.idq = None
self.dq_padding = dq_padding
# State channel
if state_channel is not None:
valid_mask = pycbc.frame.flag_names_to_bitmask(self.analyze_flags)
logger.info('State channel %s interpreted as bitmask %s = good',
state_channel, bin(valid_mask))
self.state = pycbc.frame.StatusBuffer(
frame_src,
state_channel, start_time,
max_buffer=max_buffer,
valid_mask=valid_mask,
force_update_cache=force_update_cache,
increment_update_cache=increment_update_cache)
# low latency dq channel
if data_quality_channel is not None:
sb_kwargs = dict(max_buffer=max_buffer,
force_update_cache=force_update_cache,
increment_update_cache=increment_update_cache)
if len(self.data_quality_flags) == 1 \
and self.data_quality_flags[0] == 'veto_nonzero':
sb_kwargs['valid_on_zero'] = True
logger.info('DQ channel %s interpreted as zero = good',
data_quality_channel)
else:
sb_kwargs['valid_mask'] = pycbc.frame.flag_names_to_bitmask(
self.data_quality_flags)
logger.info(
'DQ channel %s interpreted as bitmask %s = good',
data_quality_channel,
bin(sb_kwargs['valid_mask'])
)
self.dq = pycbc.frame.StatusBuffer(frame_src, data_quality_channel,
start_time, **sb_kwargs)
if idq_channel is not None:
if idq_state_channel is None:
raise ValueError(
'Each detector with an iDQ channel requires an iDQ state channel as well')
if idq_threshold is None:
raise ValueError(
'If an iDQ channel is provided, a veto threshold must also be provided')
self.idq = pycbc.frame.iDQBuffer(frame_src,
idq_channel,
idq_state_channel,
idq_threshold,
start_time,
max_buffer=max_buffer,
force_update_cache=force_update_cache,
increment_update_cache=increment_update_cache)
self.highpass_frequency = highpass_frequency
self.highpass_reduction = highpass_reduction
self.highpass_bandwidth = highpass_bandwidth
self.autogating_threshold = autogating_threshold
self.autogating_cluster = autogating_cluster
self.autogating_pad = autogating_pad
self.autogating_width = autogating_width
self.autogating_taper = autogating_taper
self.autogating_duration = autogating_duration
self.autogating_psd_segment_length = autogating_psd_segment_length
self.autogating_psd_stride = autogating_psd_stride
self.gate_params = []
self.sample_rate = sample_rate
self.dyn_range_fac = dyn_range_fac
self.psd_abort_difference = psd_abort_difference
self.psd_recalculate_difference = psd_recalculate_difference
self.psd_segment_length = psd_segment_length
self.psd_samples = psd_samples
self.psd_inverse_length = psd_inverse_length
self.psd = None
self.psds = {}
strain_len = int(max_buffer * self.sample_rate)
self.strain = TimeSeries(zeros(strain_len, dtype=numpy.float32),
delta_t=1.0/self.sample_rate,
epoch=start_time-max_buffer)
# Determine the total number of corrupted samples for highpass
# and PSD over whitening
highpass_samples, self.beta = kaiserord(self.highpass_reduction,
self.highpass_bandwidth / self.raw_buffer.sample_rate * 2 * numpy.pi)
self.highpass_samples = int(highpass_samples / 2)
resample_corruption = 10 # If using the ldas method
self.factor = round(1.0 / self.raw_buffer.delta_t / self.sample_rate)
self.corruption = self.highpass_samples // self.factor + resample_corruption
self.psd_corruption = self.psd_inverse_length * self.sample_rate
self.total_corruption = self.corruption + self.psd_corruption
# Determine how much padding is needed after removing the parts
# associated with PSD over whitening and highpass filtering
self.trim_padding = int(trim_padding * self.sample_rate)
if self.trim_padding > self.total_corruption:
self.trim_padding = self.total_corruption
self.psd_duration = (psd_samples - 1) // 2 * psd_segment_length
self.reduced_pad = int(self.total_corruption - self.trim_padding)
self.segments = {}
# time to ignore output of frame (for initial buffering)
self.add_hard_count()
self.taper_immediate_strain = True
@property
def start_time(self):
""" Return the start time of the current valid segment of data """
return self.end_time - self.blocksize
@property
def end_time(self):
""" Return the end time of the current valid segment of data """
return float(self.strain.start_time + (len(self.strain) - self.total_corruption) / self.sample_rate)
def add_hard_count(self):
""" Reset the countdown timer, so that we don't analyze data long enough
to generate a new PSD.
"""
self.wait_duration = int(numpy.ceil(self.total_corruption / self.sample_rate + self.psd_duration))
self.invalidate_psd()
def invalidate_psd(self):
""" Make the current PSD invalid. A new one will be generated when
it is next required """
self.psd = None
self.psds = {}
def recalculate_psd(self):
""" Recalculate the psd
"""
seg_len = int(self.sample_rate * self.psd_segment_length)
e = len(self.strain)
s = e - (self.psd_samples + 1) * seg_len // 2
psd = pycbc.psd.welch(self.strain[s:e], seg_len=seg_len, seg_stride=seg_len//2)
psd.dist = spa_distance(psd, 1.4, 1.4, self.low_frequency_cutoff) * pycbc.DYN_RANGE_FAC
# If the new psd is similar to the old one, don't replace it
if self.psd and self.psd_recalculate_difference:
if abs(self.psd.dist - psd.dist) / self.psd.dist < self.psd_recalculate_difference:
logger.info("Skipping recalculation of %s PSD, %s-%s",
self.detector, self.psd.dist, psd.dist)
return True
# If the new psd is *really* different than the old one, return an error
if self.psd and self.psd_abort_difference:
if abs(self.psd.dist - psd.dist) / self.psd.dist > self.psd_abort_difference:
logger.info("%s PSD is CRAZY, aborting!!!!, %s-%s",
self.detector, self.psd.dist, psd.dist)
self.psd = psd
self.psds = {}
return False
# If the new estimate replaces the current one, invalide the ineterpolate PSDs
self.psd = psd
self.psds = {}
logger.info("Recalculating %s PSD, %s", self.detector, psd.dist)
return True
def check_psd_dist(self, min_dist, max_dist):
"""Check that the horizon distance of a detector is within a required
range. If so, return True, otherwise log a warning and return False.
"""
if self.psd is None:
# ignore check
return True
# Note that the distance can in principle be inf or nan, e.g. if h(t)
# is identically zero. The check must fail in those cases. Be careful
# with how the logic works out when comparing inf's or nan's!
good = self.psd.dist >= min_dist and self.psd.dist <= max_dist
if not good:
logger.info(
"%s PSD dist %s outside acceptable range [%s, %s]",
self.detector,
self.psd.dist,
min_dist,
max_dist
)
return good
def overwhitened_data(self, delta_f):
""" Return overwhitened data
Parameters
----------
delta_f: float
The sample step to generate overwhitened frequency domain data for
Returns
-------
htilde: FrequencySeries
Overwhited strain data
"""
# we haven't already computed htilde for this delta_f
if delta_f not in self.segments:
buffer_length = int(1.0 / delta_f)
e = len(self.strain)
s = int(e - buffer_length * self.sample_rate - self.reduced_pad * 2)
# FFT the contents of self.strain[s:e] into fseries
fseries = execute_cached_fft(self.strain[s:e],
copy_output=False,
uid=STRAINBUFFER_UNIQUE_ID_1)
fseries._epoch = self.strain._epoch + s*self.strain.delta_t
# we haven't calculated a resample psd for this delta_f
if delta_f not in self.psds:
psdt = pycbc.psd.interpolate(self.psd, fseries.delta_f)
psdt = pycbc.psd.inverse_spectrum_truncation(psdt,
int(self.sample_rate * self.psd_inverse_length),
low_frequency_cutoff=self.low_frequency_cutoff)
psdt._delta_f = fseries.delta_f
psd = pycbc.psd.interpolate(self.psd, delta_f)
psd = pycbc.psd.inverse_spectrum_truncation(psd,
int(self.sample_rate * self.psd_inverse_length),
low_frequency_cutoff=self.low_frequency_cutoff)
psd.psdt = psdt
self.psds[delta_f] = psd
psd = self.psds[delta_f]
fseries /= psd.psdt
# trim ends of strain
if self.reduced_pad != 0:
# IFFT the contents of fseries into overwhite
overwhite = execute_cached_ifft(fseries,
copy_output=False,
uid=STRAINBUFFER_UNIQUE_ID_2)
overwhite2 = overwhite[self.reduced_pad:len(overwhite)-self.reduced_pad]
taper_window = self.trim_padding / 2.0 / overwhite.sample_rate
gate_params = [(overwhite2.start_time, 0., taper_window),
(overwhite2.end_time, 0., taper_window)]
gate_data(overwhite2, gate_params)
# FFT the contents of overwhite2 into fseries_trimmed
fseries_trimmed = execute_cached_fft(
overwhite2,
copy_output=True,
uid=STRAINBUFFER_UNIQUE_ID_3
)
fseries_trimmed.start_time = fseries.start_time + self.reduced_pad * self.strain.delta_t
else:
fseries_trimmed = fseries
fseries_trimmed.psd = psd
self.segments[delta_f] = fseries_trimmed
stilde = self.segments[delta_f]
return stilde
def near_hwinj(self):
"""Check that the current set of triggers could be influenced by
a hardware injection.
"""
if not self.state:
return False
if not self.state.is_extent_valid(self.start_time, self.blocksize, pycbc.frame.NO_HWINJ):
return True
return False
def null_advance_strain(self, blocksize):
""" Advance and insert zeros
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
"""
sample_step = int(blocksize * self.sample_rate)
csize = sample_step + self.corruption * 2
self.strain.roll(-sample_step)
# We should roll this off at some point too...
self.strain[len(self.strain) - csize + self.corruption:] = 0
self.strain.start_time += blocksize
# The next time we need strain will need to be tapered
self.taper_immediate_strain = True
def advance(self, blocksize, timeout=10):
"""Advanced buffer blocksize seconds.
Add blocksize seconds more to the buffer, push blocksize seconds
from the beginning.
Parameters
----------
blocksize: int
The number of seconds to attempt to read from the channel
Returns
-------
status: boolean
Returns True if this block is analyzable.
"""
ts = super(StrainBuffer, self).attempt_advance(blocksize, timeout=timeout)
self.blocksize = blocksize
self.gate_params = []
# We have given up so there is no time series
if ts is None:
logger.info("%s frame is late, giving up", self.detector)
self.null_advance_strain(blocksize)
if self.state:
self.state.null_advance(blocksize)
if self.dq:
self.dq.null_advance(blocksize)
if self.idq:
self.idq.null_advance(blocksize)
return False
# We collected some data so we are closer to being able to analyze data
self.wait_duration -= blocksize
# If the data we got was invalid, reset the counter on how much to collect
# This behavior corresponds to how we handle CAT1 vetoes
if self.state and self.state.advance(blocksize) is False:
self.add_hard_count()
self.null_advance_strain(blocksize)
if self.dq:
self.dq.null_advance(blocksize)
if self.idq:
self.idq.null_advance(blocksize)
logger.info("%s time has invalid data, resetting buffer",
self.detector)
return False
# Also advance the dq vector and idq timeseries in lockstep
if self.dq:
self.dq.advance(blocksize)
if self.idq:
self.idq.advance(blocksize)
self.segments = {}
# only condition with the needed raw data so we can continuously add
# to the existing result
# Precondition
sample_step = int(blocksize * self.sample_rate)
csize = sample_step + self.corruption * 2
start = len(self.raw_buffer) - csize * self.factor
strain = self.raw_buffer[start:]
strain = pycbc.filter.highpass_fir(strain, self.highpass_frequency,
self.highpass_samples,
beta=self.beta)
strain = (strain * self.dyn_range_fac).astype(numpy.float32)
strain = pycbc.filter.resample_to_delta_t(strain,
1.0/self.sample_rate, method='ldas')
# remove corruption at beginning
strain = strain[self.corruption:]
# taper beginning if needed
if self.taper_immediate_strain:
logger.info("Tapering start of %s strain block", self.detector)
strain = gate_data(
strain, [(strain.start_time, 0., self.autogating_taper)])
self.taper_immediate_strain = False
# Stitch into continuous stream
self.strain.roll(-sample_step)
self.strain[len(self.strain) - csize + self.corruption:] = strain[:]
self.strain.start_time += blocksize
# apply gating if needed
if self.autogating_threshold is not None:
autogating_duration_length = self.autogating_duration * self.sample_rate
autogating_start_sample = int(len(self.strain) - autogating_duration_length)
glitch_times = detect_loud_glitches(
self.strain[autogating_start_sample:-self.corruption],
psd_duration=self.autogating_psd_segment_length, psd_stride=self.autogating_psd_stride,
threshold=self.autogating_threshold,
cluster_window=self.autogating_cluster,
low_freq_cutoff=self.highpass_frequency,
corrupt_time=self.autogating_pad)
if len(glitch_times) > 0:
logger.info('Autogating %s at %s', self.detector,
', '.join(['%.3f' % gt for gt in glitch_times]))
self.gate_params = \
[(gt, self.autogating_width, self.autogating_taper)
for gt in glitch_times]
self.strain = gate_data(self.strain, self.gate_params)
if self.psd is None and self.wait_duration <=0:
self.recalculate_psd()
return self.wait_duration <= 0
@classmethod
def from_cli(cls, ifo, args):
"""Initialize a StrainBuffer object (data reader) for a particular
detector.
"""
state_channel = analyze_flags = None
if args.state_channel and ifo in args.state_channel \
and args.analyze_flags and ifo in args.analyze_flags:
state_channel = ':'.join([ifo, args.state_channel[ifo]])
analyze_flags = args.analyze_flags[ifo].split(',')
dq_channel = dq_flags = None
if args.data_quality_channel and ifo in args.data_quality_channel \
and args.data_quality_flags and ifo in args.data_quality_flags:
dq_channel = ':'.join([ifo, args.data_quality_channel[ifo]])
dq_flags = args.data_quality_flags[ifo].split(',')
idq_channel = None
if args.idq_channel and ifo in args.idq_channel:
idq_channel = ':'.join([ifo, args.idq_channel[ifo]])
idq_state_channel = None
if args.idq_state_channel and ifo in args.idq_state_channel:
idq_state_channel = ':'.join([ifo, args.idq_state_channel[ifo]])
if args.frame_type:
frame_src = pycbc.frame.frame_paths(
args.frame_type[ifo],
args.start_time,
args.end_time,
site=ifo[0]
)
else:
frame_src = [args.frame_src[ifo]]
strain_channel = ':'.join([ifo, args.channel_name[ifo]])
return cls(
frame_src,
strain_channel,
args.start_time,
max_buffer=args.max_length,
state_channel=state_channel,
data_quality_channel=dq_channel,
idq_channel=idq_channel,
idq_state_channel=idq_state_channel,
idq_threshold=args.idq_threshold,
sample_rate=args.sample_rate,
low_frequency_cutoff=args.low_frequency_cutoff,
highpass_frequency=args.highpass_frequency,
highpass_reduction=args.highpass_reduction,
highpass_bandwidth=args.highpass_bandwidth,
psd_samples=args.psd_samples,
trim_padding=args.trim_padding,
psd_segment_length=args.psd_segment_length,
psd_inverse_length=args.psd_inverse_length,
autogating_threshold=args.autogating_threshold,
autogating_cluster=args.autogating_cluster,
autogating_pad=args.autogating_pad,
autogating_width=args.autogating_width,
autogating_taper=args.autogating_taper,
autogating_duration=args.autogating_duration,
autogating_psd_segment_length=args.autogating_psd_segment_length,
autogating_psd_stride=args.autogating_psd_stride,
psd_abort_difference=args.psd_abort_difference,
psd_recalculate_difference=args.psd_recalculate_difference,
force_update_cache=args.force_update_cache,
increment_update_cache=args.increment_update_cache[ifo],
analyze_flags=analyze_flags,
data_quality_flags=dq_flags,
dq_padding=args.data_quality_padding
)
|
gwastroREPO_NAMEpycbcPATH_START.@pycbc_extracted@pycbc-master@pycbc@strain@strain.py@.PATH_END.py
|
{
"filename": "_xpad.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/parcoords/line/colorbar/_xpad.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XpadValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="xpad", parent_name="parcoords.line.colorbar", **kwargs
):
super(XpadValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@parcoords@line@colorbar@_xpad.py@.PATH_END.py
|
{
"filename": "subversion.py",
"repo_name": "davidharvey1986/pyRRG",
"repo_path": "pyRRG_extracted/pyRRG-master/unittests/bugFixPyRRG/lib/python3.7/site-packages/pip/_internal/vcs/subversion.py",
"type": "Python"
}
|
# The following comment should be removed at some point in the future.
# mypy: disallow-untyped-defs=False
from __future__ import absolute_import
import logging
import os
import re
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import (
display_path,
is_console_interactive,
rmtree,
split_auth_from_netloc,
)
from pip._internal.utils.subprocess import make_command
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
from pip._internal.vcs.versioncontrol import VersionControl, vcs
_svn_xml_url_re = re.compile('url="([^"]+)"')
_svn_rev_re = re.compile(r'committed-rev="(\d+)"')
_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"')
_svn_info_xml_url_re = re.compile(r'<url>(.*)</url>')
if MYPY_CHECK_RUNNING:
from typing import Optional, Tuple
from pip._internal.utils.subprocess import CommandArgs
from pip._internal.utils.misc import HiddenText
from pip._internal.vcs.versioncontrol import AuthInfo, RevOptions
logger = logging.getLogger(__name__)
class Subversion(VersionControl):
name = 'svn'
dirname = '.svn'
repo_name = 'checkout'
schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn')
@classmethod
def should_add_vcs_url_prefix(cls, remote_url):
return True
@staticmethod
def get_base_rev_args(rev):
return ['-r', rev]
@classmethod
def get_revision(cls, location):
"""
Return the maximum revision for all files under a given location
"""
# Note: taken from setuptools.command.egg_info
revision = 0
for base, dirs, files in os.walk(location):
if cls.dirname not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove(cls.dirname)
entries_fn = os.path.join(base, cls.dirname, 'entries')
if not os.path.exists(entries_fn):
# FIXME: should we warn?
continue
dirurl, localrev = cls._get_svn_url_rev(base)
if base == location:
base = dirurl + '/' # save the root url
elif not dirurl or not dirurl.startswith(base):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
return revision
@classmethod
def get_netloc_and_auth(cls, netloc, scheme):
"""
This override allows the auth information to be passed to svn via the
--username and --password options instead of via the URL.
"""
if scheme == 'ssh':
# The --username and --password options can't be used for
# svn+ssh URLs, so keep the auth information in the URL.
return super(Subversion, cls).get_netloc_and_auth(netloc, scheme)
return split_auth_from_netloc(netloc)
@classmethod
def get_url_rev_and_auth(cls, url):
# type: (str) -> Tuple[str, Optional[str], AuthInfo]
# hotfix the URL scheme after removing svn+ from svn+ssh:// readd it
url, rev, user_pass = super(Subversion, cls).get_url_rev_and_auth(url)
if url.startswith('ssh://'):
url = 'svn+' + url
return url, rev, user_pass
@staticmethod
def make_rev_args(username, password):
# type: (Optional[str], Optional[HiddenText]) -> CommandArgs
extra_args = [] # type: CommandArgs
if username:
extra_args += ['--username', username]
if password:
extra_args += ['--password', password]
return extra_args
@classmethod
def get_remote_url(cls, location):
# In cases where the source is in a subdirectory, not alongside
# setup.py we have to look up in the location until we find a real
# setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without
# finding setup.py
logger.warning(
"Could not find setup.py for directory %s (tried all "
"parent directories)",
orig_location,
)
return None
return cls._get_svn_url_rev(location)[0]
@classmethod
def _get_svn_url_rev(cls, location):
from pip._internal.exceptions import InstallationError
entries_path = os.path.join(location, cls.dirname, 'entries')
if os.path.exists(entries_path):
with open(entries_path) as f:
data = f.read()
else: # subversion >= 1.7 does not have the 'entries' file
data = ''
if (data.startswith('8') or
data.startswith('9') or
data.startswith('10')):
data = list(map(str.splitlines, data.split('\n\x0c\n')))
del data[0][0] # get rid of the '8'
url = data[0][3]
revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0]
elif data.startswith('<?xml'):
match = _svn_xml_url_re.search(data)
if not match:
raise ValueError(
'Badly formatted data: {data!r}'.format(**locals()))
url = match.group(1) # get repository URL
revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0]
else:
try:
# subversion >= 1.7
# Note that using get_remote_call_options is not necessary here
# because `svn info` is being run against a local directory.
# We don't need to worry about making sure interactive mode
# is being used to prompt for passwords, because passwords
# are only potentially needed for remote server requests.
xml = cls.run_command(
['info', '--xml', location],
show_stdout=False,
)
url = _svn_info_xml_url_re.search(xml).group(1)
revs = [
int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)
]
except InstallationError:
url, revs = None, []
if revs:
rev = max(revs)
else:
rev = 0
return url, rev
@classmethod
def is_commit_id_equal(cls, dest, name):
"""Always assume the versions don't match"""
return False
def __init__(self, use_interactive=None):
# type: (bool) -> None
if use_interactive is None:
use_interactive = is_console_interactive()
self.use_interactive = use_interactive
# This member is used to cache the fetched version of the current
# ``svn`` client.
# Special value definitions:
# None: Not evaluated yet.
# Empty tuple: Could not parse version.
self._vcs_version = None # type: Optional[Tuple[int, ...]]
super(Subversion, self).__init__()
def call_vcs_version(self):
# type: () -> Tuple[int, ...]
"""Query the version of the currently installed Subversion client.
:return: A tuple containing the parts of the version information or
``()`` if the version returned from ``svn`` could not be parsed.
:raises: BadCommand: If ``svn`` is not installed.
"""
# Example versions:
# svn, version 1.10.3 (r1842928)
# compiled Feb 25 2019, 14:20:39 on x86_64-apple-darwin17.0.0
# svn, version 1.7.14 (r1542130)
# compiled Mar 28 2018, 08:49:13 on x86_64-pc-linux-gnu
version_prefix = 'svn, version '
version = self.run_command(['--version'], show_stdout=False)
if not version.startswith(version_prefix):
return ()
version = version[len(version_prefix):].split()[0]
version_list = version.split('.')
try:
parsed_version = tuple(map(int, version_list))
except ValueError:
return ()
return parsed_version
def get_vcs_version(self):
# type: () -> Tuple[int, ...]
"""Return the version of the currently installed Subversion client.
If the version of the Subversion client has already been queried,
a cached value will be used.
:return: A tuple containing the parts of the version information or
``()`` if the version returned from ``svn`` could not be parsed.
:raises: BadCommand: If ``svn`` is not installed.
"""
if self._vcs_version is not None:
# Use cached version, if available.
# If parsing the version failed previously (empty tuple),
# do not attempt to parse it again.
return self._vcs_version
vcs_version = self.call_vcs_version()
self._vcs_version = vcs_version
return vcs_version
def get_remote_call_options(self):
# type: () -> CommandArgs
"""Return options to be used on calls to Subversion that contact the server.
These options are applicable for the following ``svn`` subcommands used
in this class.
- checkout
- export
- switch
- update
:return: A list of command line arguments to pass to ``svn``.
"""
if not self.use_interactive:
# --non-interactive switch is available since Subversion 0.14.4.
# Subversion < 1.8 runs in interactive mode by default.
return ['--non-interactive']
svn_version = self.get_vcs_version()
# By default, Subversion >= 1.8 runs in non-interactive mode if
# stdin is not a TTY. Since that is how pip invokes SVN, in
# call_subprocess(), pip must pass --force-interactive to ensure
# the user can be prompted for a password, if required.
# SVN added the --force-interactive option in SVN 1.8. Since
# e.g. RHEL/CentOS 7, which is supported until 2024, ships with
# SVN 1.7, pip should continue to support SVN 1.7. Therefore, pip
# can't safely add the option if the SVN version is < 1.8 (or unknown).
if svn_version >= (1, 8):
return ['--force-interactive']
return []
def export(self, location, url):
# type: (str, HiddenText) -> None
"""Export the svn repository at the url to the destination location"""
url, rev_options = self.get_url_rev_options(url)
logger.info('Exporting svn repository %s to %s', url, location)
with indent_log():
if os.path.exists(location):
# Subversion doesn't like to check out over an existing
# directory --force fixes this, but was only added in svn 1.5
rmtree(location)
cmd_args = make_command(
'export', self.get_remote_call_options(),
rev_options.to_args(), url, location,
)
self.run_command(cmd_args, show_stdout=False)
def fetch_new(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
rev_display = rev_options.to_display()
logger.info(
'Checking out %s%s to %s',
url,
rev_display,
display_path(dest),
)
cmd_args = make_command(
'checkout', '-q', self.get_remote_call_options(),
rev_options.to_args(), url, dest,
)
self.run_command(cmd_args)
def switch(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
cmd_args = make_command(
'switch', self.get_remote_call_options(), rev_options.to_args(),
url, dest,
)
self.run_command(cmd_args)
def update(self, dest, url, rev_options):
# type: (str, HiddenText, RevOptions) -> None
cmd_args = make_command(
'update', self.get_remote_call_options(), rev_options.to_args(),
dest,
)
self.run_command(cmd_args)
vcs.register(Subversion)
|
davidharvey1986REPO_NAMEpyRRGPATH_START.@pyRRG_extracted@pyRRG-master@unittests@bugFixPyRRG@lib@python3.7@site-packages@pip@_internal@vcs@subversion.py@.PATH_END.py
|
{
"filename": "driver.py",
"repo_name": "igrins/plp",
"repo_path": "plp_extracted/plp-master/igrins/pipeline/driver.py",
"type": "Python"
}
|
""" Pipeline Driver """
from ..igrins_libs.resource_manager import get_igrins_resource_manager
from ..igrins_libs.igrins_config import IGRINSConfig
from ..igrins_libs.obs_set import ObsSet
def get_obsset(obsdate, recipe_name, band,
obsids, frametypes,
groupname=None, recipe_entry=None,
config_file=None, saved_context_name=None,
basename_postfix="", runner_config=None):
# recipe_entry : dict of original recipe entry
# from igrins import get_obsset
# caldb = get_caldb(config_file, obsdate, ensure_dir=True)
if isinstance(config_file, IGRINSConfig):
config = config_file
else:
config = IGRINSConfig(config_file)
if saved_context_name is not None:
import cPickle as pickle
resource_manager = pickle.load(open(saved_context_name, "rb"))
else:
resource_manager = get_igrins_resource_manager(config, (obsdate, band))
obsset = ObsSet(resource_manager, recipe_name, obsids, frametypes,
groupname=groupname, recipe_entry=recipe_entry,
basename_postfix=basename_postfix,
runner_config=runner_config)
return obsset
def get_obsset_from_context(obsset_desc, resource_manager):
recipe_name = obsset_desc["recipe_name"]
obsids = obsset_desc["obsids"]
frametypes = obsset_desc["frametypes"]
groupname = obsset_desc["groupname"]
basename_postfix = obsset_desc["basename_postfix"]
obsset = ObsSet(resource_manager, recipe_name, obsids, frametypes,
groupname=groupname,
basename_postfix=basename_postfix)
# ``, recipe_entry=recipe_entry)
return obsset
|
igrinsREPO_NAMEplpPATH_START.@plp_extracted@plp-master@igrins@pipeline@driver.py@.PATH_END.py
|
{
"filename": "VegaPlots2Datasets.ipynb",
"repo_name": "andreicuceu/vega",
"repo_path": "vega_extracted/vega-master/examples/VegaPlots2Datasets.ipynb",
"type": "Jupyter Notebook"
}
|
```python
from vega import VegaInterface
```
```python
vega1 = VegaInterface('../desi-4.0-4/10_mocks/main-auto_10mpc.ini')
vega2 = VegaInterface('../desi-4.12-4/10_mocks_bao/main-auto_10mpc.ini')
```
INFO: reading input Pk /global/u1/a/acuceu/envs/picca/vega/vega/models/PlanckDR12/PlanckDR12.fits
Reading data file /global/cfs/cdirs/desi/users/acuceu/picca_on_mocks_v9.0/global/desi-4.0-4/baseline/correlations/cf_lya_lya-exp.fits.gz
Reading distortion matrix file /global/cfs/cdirs/desi/users/acuceu/picca_on_mocks_v9.0/v9.0.0/desi-4.0-4/baseline/correlations/dmat_lya_lya_0_10.fits.gz
/global/homes/a/acuceu/.conda/envs/picca/lib/python3.10/site-packages/mcfit/mcfit.py:176: UserWarning: input must be log-spaced
warnings.warn("input must be log-spaced")
INFO: reading input Pk /global/u1/a/acuceu/envs/picca/vega/vega/models/PlanckDR12/PlanckDR12.fits
Reading data file /global/cfs/cdirs/desi/users/acuceu/picca_on_mocks_v9.0/global/desi-4.12-4/baseline/correlations/cf_lya_lya-exp.fits.gz
Reading distortion matrix file /global/cfs/cdirs/desi/users/acuceu/picca_on_mocks_v9.0/v9.0.0/desi-4.12-4/baseline/correlations/dmat_lya_lya_0_10.fits.gz
```python
model1 = vega1.compute_model()
model2 = vega2.compute_model()
```
/global/homes/a/acuceu/.conda/envs/picca/lib/python3.10/site-packages/mcfit/mcfit.py:176: UserWarning: input must be log-spaced
warnings.warn("input must be log-spaced")
```python
vega1.plots.plot_4wedges(models=[model1], data_label='No contaminants', data_color='C0', model_colors=['C0']);
vega2.plots.plot_4wedges(models=[model2], data_label='With contaminants', data_color='C1', model_colors=['C1'],
fig=vega1.plots.fig);
vega1.plots.postprocess_fig(vega1.plots.fig,
ylim=[[-1.2, 0.2], [-0.5, 0.15], [-0.25, 0.2], [0, 0.4]])
```
/global/u1/a/acuceu/envs/picca/vega/vega/plots/plot.py:627: UserWarning: The figure layout has changed to tight
plt.tight_layout()

```python
vega1.plots.plot_1wedge(models=[model1], data_label='No contaminants', data_color='C0', model_colors=['C0']);
vega2.plots.plot_1wedge(models=[model2], data_label='With contaminants', data_color='C1', model_colors=['C1'],
fig=vega1.plots.fig);
vega1.plots.postprocess_fig(vega1.plots.fig)
```

```python
vega1.plots.plot_2wedges(models=[model1], data_label='No contaminants', data_color='C0', model_colors=['C0']);
vega2.plots.plot_2wedges(models=[model2], data_label='With contaminants', data_color='C1', model_colors=['C1'],
fig=vega1.plots.fig);
vega1.plots.postprocess_fig(vega1.plots.fig)
```

```python
```
|
andreicuceuREPO_NAMEvegaPATH_START.@vega_extracted@vega-master@examples@VegaPlots2Datasets.ipynb@.PATH_END.py
|
{
"filename": "test_spline_filter.py",
"repo_name": "dask/dask-image",
"repo_path": "dask-image_extracted/dask-image-main/tests/test_dask_image/test_ndinterp/test_spline_filter.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from packaging import version
import dask
import dask.array as da
import numpy as np
import pytest
import scipy
import scipy.ndimage
import dask_image.ndinterp
# mode lists for the case with prefilter = False
_supported_modes = ['constant', 'nearest', 'reflect', 'mirror']
_unsupported_modes = ['wrap']
# additional modes are present in SciPy >= 1.6.0
if version.parse(scipy.__version__) >= version.parse('1.6.0'):
_supported_modes += ['grid-constant', 'grid-mirror', 'grid-wrap']
def validate_spline_filter(n=2,
axis_size=64,
interp_order=3,
interp_mode='constant',
chunksize=32,
output=np.float64,
random_seed=0,
use_cupy=False,
axis=None,
input_as_non_dask_array=False,
depth=None):
"""
Compare the outputs of `scipy.ndimage.spline_transform`
and `dask_image.ndinterp.spline_transform`. If axis is not None, then
`spline_transform1d` is tested instead.
"""
if (
np.dtype(output) != np.float64
and version.parse(scipy.__version__) < version.parse('1.4.0')
):
pytest.skip("bug in output dtype handling in SciPy < 1.4")
# define test image
np.random.seed(random_seed)
image = np.random.random([axis_size] * n)
if version.parse(dask.__version__) < version.parse("2020.1.0"):
# older dask will fail if any chunks have size smaller than depth
_depth = dask_image.ndinterp._get_default_depth(interp_order)
rem = axis_size % chunksize
if chunksize < _depth or (rem != 0 and rem < _depth):
pytest.skip("older dask doesn't automatically rechunk")
if input_as_non_dask_array:
if use_cupy:
import cupy as cp
image_da = cp.asarray(image)
else:
image_da = image
else:
# transform into dask array
image_da = da.from_array(image, chunks=[chunksize] * n)
if use_cupy:
import cupy as cp
image_da = image_da.map_blocks(cp.asarray)
if axis is not None:
scipy_func = scipy.ndimage.spline_filter1d
dask_image_func = dask_image.ndinterp.spline_filter1d
kwargs = {'axis': axis}
else:
scipy_func = scipy.ndimage.spline_filter
dask_image_func = dask_image.ndinterp.spline_filter
kwargs = {}
# transform with scipy
image_t_scipy = scipy_func(
image,
output=output,
order=interp_order,
mode=interp_mode,
**kwargs)
# transform with dask-image
image_t_dask = dask_image_func(
image_da,
output=output,
order=interp_order,
mode=interp_mode,
depth=depth,
**kwargs)
image_t_dask_computed = image_t_dask.compute()
rtol = atol = 1e-6
out_dtype = np.dtype(output)
assert image_t_scipy.dtype == image_t_dask_computed.dtype == out_dtype
assert np.allclose(image_t_scipy, image_t_dask_computed,
rtol=rtol, atol=atol)
@pytest.mark.parametrize("n", [1, 2, 3])
@pytest.mark.parametrize("axis_size", [64])
@pytest.mark.parametrize("interp_order", range(2, 6))
@pytest.mark.parametrize("interp_mode", _supported_modes)
@pytest.mark.parametrize("chunksize", [32, 15])
def test_spline_filter_general(
n,
axis_size,
interp_order,
interp_mode,
chunksize,
):
validate_spline_filter(
n=n,
axis_size=axis_size,
interp_order=interp_order,
interp_mode=interp_mode,
chunksize=chunksize,
axis=None,
)
@pytest.mark.cupy
@pytest.mark.parametrize("n", [2])
@pytest.mark.parametrize("axis_size", [32])
@pytest.mark.parametrize("interp_order", range(2, 6))
@pytest.mark.parametrize("interp_mode", _supported_modes[::2])
@pytest.mark.parametrize("chunksize", [16])
@pytest.mark.parametrize("axis", [None, -1])
@pytest.mark.parametrize("input_as_non_dask_array", [False, True])
def test_spline_filter_cupy(
n,
axis_size,
interp_order,
interp_mode,
chunksize,
axis,
input_as_non_dask_array,
):
pytest.importorskip("cupy", minversion="9.0.0")
validate_spline_filter(
n=n,
axis_size=axis_size,
interp_order=interp_order,
interp_mode=interp_mode,
chunksize=chunksize,
axis=axis,
input_as_non_dask_array=input_as_non_dask_array,
use_cupy=True,
)
@pytest.mark.parametrize("n", [1, 2, 3])
@pytest.mark.parametrize("axis_size", [48, 27])
@pytest.mark.parametrize("interp_order", range(2, 6))
@pytest.mark.parametrize("interp_mode", _supported_modes)
@pytest.mark.parametrize("chunksize", [33])
@pytest.mark.parametrize("axis", [0, 1, -1])
def test_spline_filter1d_general(
n,
axis_size,
interp_order,
interp_mode,
chunksize,
axis,
):
if axis == 1 and n < 2:
pytest.skip("skip axis=1 for 1d signals")
validate_spline_filter(
n=n,
axis_size=axis_size,
interp_order=interp_order,
interp_mode=interp_mode,
chunksize=chunksize,
axis=axis,
)
@pytest.mark.parametrize("axis", [None, -1])
def test_spline_filter_non_dask_array_input(axis):
validate_spline_filter(
axis=axis,
input_as_non_dask_array=True,
)
@pytest.mark.parametrize("depth", [None, 24])
@pytest.mark.parametrize("axis", [None, -1])
def test_spline_filter_non_default_depth(depth, axis):
validate_spline_filter(
axis=axis,
depth=depth,
)
@pytest.mark.parametrize("depth", [(16, 32), [18]])
def test_spline_filter1d_invalid_depth(depth):
with pytest.raises(ValueError):
validate_spline_filter(
axis=-1,
depth=depth,
)
@pytest.mark.parametrize("axis_size", [32])
@pytest.mark.parametrize("interp_order", range(2, 6))
@pytest.mark.parametrize("interp_mode", _unsupported_modes)
@pytest.mark.parametrize("axis", [None, -1])
def test_spline_filter_unsupported_modes(
axis_size,
interp_order,
interp_mode,
axis,
):
with pytest.raises(NotImplementedError):
validate_spline_filter(
axis_size=axis_size,
interp_order=interp_order,
interp_mode=interp_mode,
axis=axis,
)
@pytest.mark.parametrize(
"output", [np.float64, np.float32, "float32", np.dtype(np.float32)]
)
@pytest.mark.parametrize("axis", [None, -1])
def test_spline_filter_output_dtype(output, axis):
validate_spline_filter(
axis_size=32,
interp_order=3,
output=output,
axis=axis,
)
@pytest.mark.parametrize("axis", [None, -1])
def test_spline_filter_array_output_unsupported(axis):
n = 2
axis_size = 32
shape = (axis_size,) * n
with pytest.raises(TypeError):
validate_spline_filter(
n=n,
axis_size=axis_size,
interp_order=3,
output=np.empty(shape),
axis=axis,
)
|
daskREPO_NAMEdask-imagePATH_START.@dask-image_extracted@dask-image-main@tests@test_dask_image@test_ndinterp@test_spline_filter.py@.PATH_END.py
|
{
"filename": "tvguide.py",
"repo_name": "tessgi/tvguide",
"repo_path": "tvguide_extracted/tvguide-master/tvguide/tvguide.py",
"type": "Python"
}
|
from __future__ import absolute_import
import numpy as np
from ._tvguide import tvguidef
import argparse, sys
from astropy.coordinates import SkyCoord
from astropy import units as u
from . import Highlight
from . import logger
class TessPointing(object):
def __init__(self, ra_deg, dec_deg, dstart=0):
self.ra_deg = ra_deg
self.dec_deg = dec_deg
self.dstart = dstart
def is_observable(self):
"""
is the target observable in Cycle 1?
returns
0, not observable, in ecliptic,
1, not observable, in Cycle 2
2, observable
"""
# convert to ecliptic
gc = SkyCoord(ra=self.ra_deg * u.degree, dec=self.dec_deg * u.degree,
frame='icrs')
lat = gc.barycentrictrueecliptic.lat.value
if (lat > -6) & (lat < 6):
return 0
elif (lat >= 6):
return 1
else:
return 2
def get_13cameras(self):
"""
returns an array of thirteen integers
the value of each integer ranges from 0-4
with 0 meaning not observable, and the integer
refering to the camera if the target is observable
13 values, one for each sector in Cycle 1
"""
return tvguidef(self.ra_deg, self.dec_deg, self.dstart)
def get_camera(self, fallback=False):
"""
which camera is the star on?
"""
cams = self.get_13cameras()
cams = cams[cams > 0]
if np.shape(cams)[0] > 0:
return int(np.max(cams))
else:
if fallback:
return self.get_camera_loop()
else:
return 0
def get_camera_loop(self):
"""
which camera is the star on?
loop over starting points
"""
step_arr = np.arange(0, 360, 0.5)
outarr = np.zeros_like(step_arr)
dstart_orig = np.copy(self.dstart)
for i, dstart in enumerate(np.arange(0, 360, 0.5)):
self.dstart = dstart
cams = self.get_13cameras()
outarr[i] = np.max(cams)
self.dstart = dstart_orig
if np.max(outarr) > 0:
return int(np.max(outarr))
else:
return 0
def get_numcampaigns(self):
"""
returns an integer of how many sectors a target is observable
"""
return np.nonzero(self.get_13cameras())[0].shape[0]
def get_maxminmedave(self):
"""
get the max, min and average number of campaigns that a target
is observable by TESS in Cycle 1
"""
step_arr = np.arange(0, 360, 0.5)
outarr = np.zeros_like(step_arr)
dstart_orig = np.copy(self.dstart)
for i, dstart in enumerate(np.arange(0, 360, 0.5)):
self.dstart = dstart
outarr[i] = self.get_numcampaigns()
self.dstart = dstart_orig
return (int(np.max(outarr)), int(np.min(outarr)),
int(np.median(outarr)), np.mean(outarr))
def parse_file(infile, exit_on_error=True):
"""Parse a comma-separated file with columns "ra,dec,magnitude".
"""
try:
a, b = np.atleast_2d(
np.genfromtxt(
infile,
usecols=[0, 1],
delimiter=',',
comments='#',
dtype="f8"
)
).T
except IOError as e:
logger.error("There seems to be a problem with the input file, "
"the format should be: RA_degrees (J2000), Dec_degrees (J2000). "
"There should be no header, columns should be "
"separated by a comma")
if exit_on_error:
sys.exit(1)
else:
raise e
return a, b
def tvguide(args=None):
"""
exposes tvguide to the command line
"""
if args is None:
parser = argparse.ArgumentParser(
description="Determine whether targets are observable using TESS.")
parser.add_argument('ra', nargs=1, type=float,
help="Right Ascension in decimal degrees (J2000).")
parser.add_argument('dec', nargs=1, type=float,
help="Declination in decimal degrees (J2000).")
args = parser.parse_args(args)
ra, dec = args.ra[0], args.dec[0]
return check_observable(ra, dec)
def tvguide_csv(args=None):
"""
exposes tvguide-csv to the command line
"""
if args is None:
parser = argparse.ArgumentParser(
description="Determine whether targets in a csv are observable using TESS.")
parser.add_argument('input_filename', type=str,
help="Path to a comma-separated table containing "
"columns 'ra, dec' (decimal degrees) "
"or 'TIC number'.")
args = parser.parse_args(args)
args = vals(args)
input_fn = args['input_filename']
output_fn = input_fn + '-tvguide.csv'
# First, try assuming the file has the classic "ra, dec format
try:
ra, dec = parse_file(input_fn, exit_on_error=False)
minC = np.zeros_like(ra, dtype=int)
maxC = np.zeros_like(ra, dtype=int)
for idx in range(len(ra)):
tobj = TessPointing(ra[idx], dec[idx])
minC[idx] = tobj.get_maxminmedave()[1]
maxC[idx] = tobj.get_maxminmedave()[0]
output = np.array([ra, dec, minC, maxC])
print("Writing {0}".format(output_fn))
np.savetxt(output_fn, output.T, delimiter=', ',
fmt=['%10.10f', '%10.10f', '%i', '%i'])
# If this fails, assume the file has a single "name" column
except ValueError:
# this will eventually take a tic id
raise NotImplementedError
# def tvguide_fromtic(args=None):
# pass
def check_observable(ra, dec,silent=False):
"""Determine whether targets are observable using TESS.
Wrapper for tvguide.tvguide for use in Python scripts.
Give an RA and DEC, returns either int 0 or 1 if not observable at
all or not in cycle 1, or returns a set of four: maximum, minimum,
median, and average number of times observed.
-------
from tvguide import check_observable
check_observable(234.56, -78.9)
"""
tessObj = TessPointing(ra, dec)
if tessObj.is_observable() == 0 and not silent:
print(Highlight.RED + "Sorry, the target is not observable by TESS"
"during Cycle 1 or 2." + Highlight.END)
elif tessObj.is_observable() == 1 and not silent:
print(Highlight.RED + "Sorry, the target is not observable by TESS"
" during Cycle 1.\nBut may be observable in Cycle 2" +
Highlight.END)
elif tessObj.is_observable() == 2:
whichCamera = tessObj.get_camera(fallback=True)
outlst = tessObj.get_maxminmedave()
outlst = outlst + (whichCamera,)
if silent:
return outlst
print(Highlight.GREEN +
"Success! The target may be observable by TESS during Cycle 1." +
Highlight.END)
if whichCamera != 0:
print(Highlight.GREEN +
"Looks like it may fall into Camera {}.".format(
whichCamera) + Highlight.END)
elif whichCamera == 0:
print(Highlight.GREEN +
"Looks like it may fall into gap between cameras," +
Highlight.END)
print(Highlight.GREEN +
"but you should still propose this target because the final" +
"pointing is not finalized." +
Highlight.END)
print(Highlight.GREEN +
"Each sector is 27.4 days." +
" We can observe this source for:" +
Highlight.END)
print(Highlight.GREEN + " maximum: {0} sectors".format(
outlst[0]) + Highlight.END)
print(Highlight.GREEN + " minimum: {0} sectors".format(
outlst[1]) + Highlight.END)
print(Highlight.GREEN + " median: {0} sectors".format(
outlst[2]) + Highlight.END)
print(Highlight.GREEN + " average: {0:0.2f} sectors".format(
outlst[3]) + Highlight.END)
return tessObj.is_observable()
def check_many(ra, dec, output_fn=''):
"""
Determines whether many targets are observable with TESS. Returns columns:
[ra, dec, min campaigns, max campaigns]
If an output filename (e.g. output_fn='example.csv') is set,
a csv fie is written.
Wrapper for tvguide.tvguide_csv for use in Python scripts.
"""
minC = np.zeros_like(ra, dtype=int)
maxC = np.zeros_like(ra, dtype=int)
for idx in range(len(ra)):
tobj = TessPointing(ra[idx], dec[idx])
minC[idx] = tobj.get_maxminmedave()[1]
maxC[idx] = tobj.get_maxminmedave()[0]
output = np.array([ra, dec, minC, maxC])
if (len(output_fn) > 0):
print("Writing {0}".format(output_fn))
np.savetxt(output_fn, output.T, delimiter=', ',
fmt=['%10.10f', '%10.10f', '%i', '%i'])
else:
return output.T
if __name__ == '__main__':
pass
|
tessgiREPO_NAMEtvguidePATH_START.@tvguide_extracted@tvguide-master@tvguide@tvguide.py@.PATH_END.py
|
{
"filename": "_sf_error.py",
"repo_name": "scipy/scipy",
"repo_path": "scipy_extracted/scipy-main/scipy/special/_sf_error.py",
"type": "Python"
}
|
"""Warnings and Exceptions that can be raised by special functions."""
import warnings
class SpecialFunctionWarning(Warning):
"""Warning that can be emitted by special functions."""
pass
warnings.simplefilter("always", category=SpecialFunctionWarning)
class SpecialFunctionError(Exception):
"""Exception that can be raised by special functions."""
pass
|
scipyREPO_NAMEscipyPATH_START.@scipy_extracted@scipy-main@scipy@special@_sf_error.py@.PATH_END.py
|
{
"filename": "fluxes.py",
"repo_name": "moeyensj/atm",
"repo_path": "atm_extracted/atm-master/atm/plotting/fluxes.py",
"type": "Python"
}
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
from sys import platform
if platform == 'darwin':
import matplotlib
matplotlib.use("TkAgg")
import numpy as np
import matplotlib.pyplot as plt
from ..config import Config
__all__ = ["plotObservations",
"plotSED"]
def plotObservations(obs, data,
plotMedian=False,
ax=None,
figKwargs={"dpi": 200},
columnMapping=Config.columnMapping):
"""
Plot observations for a single asteroid. Can also plot the median flux and median
errors on flux if desired.
Parameters
----------
obs : `~atm.obs.Observatory`
Observatory object containing filter bandpass information.
data : `~pandas.DataFrame`
DataFrame containing the relevant data to plot. The user should define
the columnMapping dictionary which maps internally used variables to the
variables used in the user's DataFrame.
plotMedian : bool, optional
Instead of plotting all fluxes and flux uncertainties plot the median per band.
[Default = False]
ax : {None, `~matplotlib.Axes.ax`}, optional
Plot on the passed matplotlib axis. If ax is None then create a new figure.
[Default = None]
figKwargs : dict, optional
Keyword arguments to pass to figure API.
[Default = {"dpi" : 200}]
columnMapping : dict, optional
This dictionary should define the column names of the user's data relative to the
internally used names.
[Default = `~atm.Config.columnMapping`]
Returns
-------
`~matplotlib.Figure.figure, `~matplotlib.Axes.ax`
Returns matplotlib figure and axes object if the user passed no
pre-existing axes.
"""
m_to_mum = 1e6 # simple conversion from m to micron
fig = None
if ax is None:
fig, ax = plt.subplots(1, 1, **figKwargs)
for i, f in enumerate(obs.filterNames):
if plotMedian is True:
ax.errorbar(obs.filterEffectiveLambdas[i] * m_to_mum,
1/m_to_mum*np.median(data[columnMapping["flux_si"][i]].values),
yerr=1/m_to_mum*np.median(data[columnMapping["fluxErr_si"][i]].values),
fmt='o',
c="k",
ms=2,
capsize=1,
elinewidth=1)
else:
ax.errorbar(obs.filterEffectiveLambdas[i] * m_to_mum * np.ones(len(data)),
1/m_to_mum*data[columnMapping["flux_si"][i]].values,
yerr=1/m_to_mum*data[columnMapping["fluxErr_si"][i]].values,
fmt='o',
c="k",
ms=2,
capsize=1,
elinewidth=1)
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_ylabel(r"Flux ($F_\lambda$) [$W m^{-2} \mu m^{-1}$]")
ax.set_xlabel(r"Wavelength ($\lambda$) [$\mu m$]")
if fig is not None:
return fig, ax
else:
return
def plotSED(sed,
ax=None,
figKwargs={"dpi" : 200},
plotKwargs={"label" : "SED",
"ls" : ":",
"c" : "k",
"lw" : 1}):
"""
Plot an asteroid's thermal spectral energy distribution. This function
takes the output DataFrame from `~atm.function.calcFluxLambdaSED` and
plots the flux as a function of wavelength.
Parameters
----------
sed : `~pandas.DataFrame`
DataFrame containing a columns of fluxes and wavelengths.
ax : {None, `~matplotlib.Axes.ax`}, optional
Plot on the passed matplotlib axis. If ax is None then create a new figure.
[Default = None]
figKwargs : dict, optional
Keyword arguments to pass to figure API.
[Default = {"dpi" : 200}]
plotKwargs : dict, optional
Keyword arguments to pass to ax.plot API.
[Default = {"label" : "SED",
"ls" : ":",
"c" : "k",
"lw" : 1}]
columnMapping : dict, optional
This dictionary should define the column names of the user's data relative to the
internally used names.
[Default = `~atm.Config.columnMapping`]
Returns
-------
`~matplotlib.Figure.figure, `~matplotlib.Axes.ax`
Returns matplotlib figure and axes object if the user passed no
pre-existing axes.
See Also
--------
`~atm.function.calcFluxLambdaSED` : Calculates a thermal model SED for a given wavelength
range and step.
"""
m_to_mum = 1e6 # simple conversion from m to micron
fig = None
if ax is None:
fig, ax = plt.subplots(1, 1, **figKwargs)
ax.plot(sed["lambda"] * m_to_mum, 1/m_to_mum*sed["flux"], **plotKwargs)
ax.set_yscale("log")
ax.set_xscale("log")
ax.set_ylabel(r"Flux ($F_\lambda$) [$W m^{-2} \mu m^{-1}$]")
ax.set_xlabel(r"Wavelength ($\lambda$) [$\mu m$]")
if fig is not None:
return fig, ax
else:
return
|
moeyensjREPO_NAMEatmPATH_START.@atm_extracted@atm-master@atm@plotting@fluxes.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatterternary/hoverlabel/font/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._sizesrc import SizesrcValidator
from ._size import SizeValidator
from ._familysrc import FamilysrcValidator
from ._family import FamilyValidator
from ._colorsrc import ColorsrcValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._sizesrc.SizesrcValidator",
"._size.SizeValidator",
"._familysrc.FamilysrcValidator",
"._family.FamilyValidator",
"._colorsrc.ColorsrcValidator",
"._color.ColorValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatterternary@hoverlabel@font@__init__.py@.PATH_END.py
|
{
"filename": "Models.py",
"repo_name": "htjb/maxsmooth",
"repo_path": "maxsmooth_extracted/maxsmooth-master/maxsmooth/Models.py",
"type": "Python"
}
|
import numpy as np
from scipy.special import legendre
class Models_class(object):
def __init__(self, params, x, y, N, pivot_point, model_type, new_basis):
self.x = x
self.y = y
self.N = N
self.params = params
self.pivot_point = pivot_point
self.model_type = model_type
self.model = new_basis['model']
self.args = new_basis['args']
self.y_sum = self.fit()
def fit(self):
if self.model is None:
if self.model_type == 'normalised_polynomial':
y_sum = self.y[self.pivot_point]*np.sum([
self.params[i]*(self.x/self.x[self.pivot_point])**i
for i in range(self.N)], axis=0)
if self.model_type == 'polynomial':
y_sum = np.sum(
[self.params[i]*(self.x)**i for i in range(self.N)],
axis=0)
if self.model_type == 'loglog_polynomial':
y_sum = 10**(np.sum([
self.params[i]*np.log10(self.x)**i
for i in range(self.N)],
axis=0))
if self.model_type == 'exponential':
y_sum = self.y[self.pivot_point]*np.sum([
self.params[i] *
np.exp(-i*self.x/self.x[self.pivot_point])
for i in range(self.N)],
axis=0)
if self.model_type == 'log_polynomial':
y_sum = np.sum([
self.params[i] *
np.log10(self.x/self.x[self.pivot_point])**i
for i in range(self.N)],
axis=0)
if self.model_type == 'difference_polynomial':
y_sum = np.sum([
self.params[i]*(self.x-self.x[self.pivot_point])**i
for i in range(self.N)], axis=0)
if self.model_type == 'legendre':
interval = np.linspace(-0.999, 0.999, len(self.x))
lps = []
for n in range(self.N):
P = legendre(n)
lps.append(P(interval))
lps = np.array(lps)
y_sum = np.sum([
self.params[i] * lps[i] for i in range(self.N)], axis=0)
if self.model is not None:
if self.args is None:
y_sum = self.model(
self.x, self.y, self.pivot_point, self.N, self.params)
if self.args is not None:
y_sum = self.model(
self.x, self.y, self.pivot_point, self.N, self.params,
*self.args)
return y_sum
|
htjbREPO_NAMEmaxsmoothPATH_START.@maxsmooth_extracted@maxsmooth-master@maxsmooth@Models.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter3d/line/colorbar/title/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._text import TextValidator
from ._side import SideValidator
from ._font import FontValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
["._text.TextValidator", "._side.SideValidator", "._font.FontValidator"],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter3d@line@colorbar@title@__init__.py@.PATH_END.py
|
{
"filename": "position.py",
"repo_name": "astroufsc/chimera",
"repo_path": "chimera_extracted/chimera-master/src/chimera/util/position.py",
"type": "Python"
}
|
# to allow use outsise chimera
try:
from chimera.util.coord import Coord, CoordUtil
except ImportError:
from .coord import Coord, CoordUtil
try:
from chimera.util.enum import Enum
except ImportError:
from .enum import Enum
import ephem
__all__ = ["Position"]
Epoch = Enum("J2000", "B1950", "NOW")
System = Enum("CELESTIAL", "GALACTIC", "ECLIPTIC", "TOPOCENTRIC")
class PositionOutsideLimitsError(Exception):
pass
class Position(object):
"""Position represents a coordinate pair in a reference frame.
There are five factories available, that can be used to create
Position in different frames.
Each factory accepts a pair of parameters. The parameters format
is tied to the choosen frame. The intent is to accept the most
common form of coordinate pair in each frame. For example, ra/dec
coordinates are mostly given in 'hms dms' format, so this is the
default used when strings are passed, however, if integers/floats
are given, these will be interpreted as 'dd dd' (which is also
common).
However, the common sense isn't common to everyone and to allow
these different point of views, every factory acceps a Coord
object as well, which can be created from a bunch of different
representations. See L{Coord} for more information on the
available representations.
Examples:
>>> # assumes ra in hms and declination in dms
>>> p = Position.fromRaDec('10:00:00', '20:20:20', epoch=Epoch.J2000)
>>> p = Position.fromRaDec(10, 20) # assume ra in hours and dec in decimal degress
>>> # don't want assumptions? ok, give me a real Coord
>>> # ok, If you want ra in radians and dec in hours (very strange), use:
>>> p = Position.fromRaDec(Coord.fromR(pi), Coord.fromH(10))
No matter which representation is given, the value will be checked
for range validity.
The following ranges are imposed::
+----------------+------------------------------+
| Right Ascension| 0-24 hours or 0-360 degrees |
| Declination | -90 - +90 or 0-180 degrees |
| Latitude | -90 - +90 or 0-180 degrees |
| Longitude | -180 - +180 or 0-360 degrees |
| Azimuth | -180 - +180 or 0-360 degrees |
| Altitude | -90 - +90 or 0-180 degrees |
+----------------+------------------------------+
Position offers a wide range of getters. You can get the
coordinate itself, a Coord instance, or a primitive (int/float)
converted from those instances. Also, to allow explicity intention
and code documentation, Position also offers getter with a short
name of the respective coordinate pais, like ra for Right
Ascension and so on. These getter returns a Coord object wich can
be used to get another representations or conversions.
>>> p = Position.fromRaDec('10 00 00', '20 00 00')
>>> print 'position ra:', p.ra.HMS
@group Factories: from*
@group Tuple getters: dd, rad, D, H, R, AS
@group Coordinate getters: ra, dec, az, alt, long, lat
"""
@staticmethod
def fromRaDec(ra, dec, epoch=Epoch.J2000):
try:
if type(ra) == str:
ra = Coord.fromHMS(ra)
elif isinstance(ra, Coord):
ra = ra.toHMS()
else:
try:
ra = Coord.fromH(float(ra))
ra = ra.toHMS()
except ValueError:
raise ValueError(
"Invalid RA coordinate type %s. Expected numbers, strings or Coords."
% str(type(ra))
)
Position._checkRange(float(ra), 0, 360)
except ValueError as e:
raise ValueError("Invalid RA coordinate %s" % str(ra))
except PositionOutsideLimitsError:
raise ValueError(
"Invalid RA range %s. Must be between 0-24 hours or 0-360 deg."
% str(ra)
)
try:
if type(dec) == str:
dec = Coord.fromDMS(dec)
elif isinstance(dec, Coord):
dec = dec.toDMS()
else:
try:
dec = Coord.fromD(float(dec))
dec = dec.toDMS()
except ValueError:
raise ValueError(
"Invalid DEC coordinate type %s. Expected numbers, strings or Coords."
% str(type(dec))
)
Position._checkRange(float(dec), -90, 360)
except ValueError as e:
raise ValueError("Invalid DEC coordinate %s" % str(dec))
except PositionOutsideLimitsError:
raise ValueError(
"Invalid DEC range %s. Must be between 0-360 deg or -90 - +90 deg."
% str(dec)
)
return Position((ra, dec), system=System.CELESTIAL, epoch=epoch)
@staticmethod
def fromAltAz(alt, az):
try:
if not isinstance(az, Coord):
az = Coord.fromDMS(az)
else:
az = az.toDMS()
Position._checkRange(float(az), -180, 360)
except ValueError as e:
raise ValueError("Invalid AZ coordinate %s" % str(az))
except PositionOutsideLimitsError:
raise ValueError(
"Invalid AZ range %s. Must be between 0-360 deg or -180 - +180 deg."
% str(az)
)
try:
if not isinstance(alt, Coord):
alt = Coord.fromDMS(alt)
else:
alt = alt.toDMS()
Position._checkRange(float(alt), -90, 180)
except ValueError as e:
raise ValueError("Invalid ALT coordinate %s" % str(alt))
except PositionOutsideLimitsError:
raise ValueError(
"Invalid ALT range %s. Must be between 0-180 deg or -90 - +90 deg."
% str(alt)
)
return Position((alt, az), system=System.TOPOCENTRIC)
@staticmethod
def fromLongLat(long, lat):
return Position(Position._genericLongLat(int, lat), system=System.TOPOCENTRIC)
@staticmethod
def fromGalactic(long, lat):
return Position(Position._genericLongLat(int, lat), system=System.GALACTIC)
@staticmethod
def fromEcliptic(long, lat):
return Position(Position._genericLongLat(int, lat), system=System.ECLIPTIC)
@staticmethod
def _genericLongLat(long, lat):
try:
if not isinstance(int, Coord):
long = Coord.fromDMS(int)
else:
long = int.toDMS()
Position._checkRange(float(int), -180, 360)
except ValueError as e:
raise ValueError("Invalid LONGITUDE coordinate %s" % str(int))
except PositionOutsideLimitsError:
raise ValueError(
"Invalid LONGITUDE range %s. Must be between 0-360 deg or -180 - +180 deg."
% str(int)
)
try:
if not isinstance(lat, Coord):
lat = Coord.fromDMS(lat)
else:
lat = lat.toDMS()
Position._checkRange(float(lat), -90, 180)
except ValueError:
raise ValueError("Invalid LATITUDE coordinate %s" % str(lat))
except PositionOutsideLimitsError:
raise ValueError(
"Invalid LATITUDE range %s. Must be between 0-180 deg or -90 - +90 deg."
% str(lat)
)
return (int, lat)
@staticmethod
def _checkRange(value, lower, upper):
# handle -0 problem.
if abs(value) == 0.0:
value = abs(value)
if not (lower <= value <= upper):
raise PositionOutsideLimitsError(
"Value must between %s and %s." % (lower, upper)
)
return True
def __init__(self, coords, epoch=Epoch.J2000, system=System.CELESTIAL):
self._coords = coords
self.system = System.fromStr(str(system).upper())
self.epoch = Epoch.fromStr(str(epoch).upper())
#
# serialization
#
def __getstate__(self):
return {
"_coords": self._coords,
"system": str(self.system),
"epoch": str(self.epoch),
}
def __setstate__(self, state):
self._coords = state["_coords"]
self.system = System.fromStr(state["system"])
self.epoch = Epoch.fromStr(state["epoch"])
def __str__(self):
"""
@rtype: string
"""
return "%s %s" % tuple(str(c) for c in self.coords)
# def __repr__(self):
# """
# @rtype: string
# """
# return self.__str__()
def epochString(self):
if self.epoch == Epoch.J2000:
return "J2000.0"
elif self.epoch == Epoch.B1950:
return "B1950.0"
else:
return "J%.2f" % (2000.0 + (ephem.julian_date() - 2451545.0) / 365.25)
def __eq__(self, other):
if isinstance(other, Position):
return self._coords == other._coords
return False
def __neq__(self, other):
return not (self == other)
# -* conversions -*
# Coord conversion
coords = property(lambda self: self._coords)
def __iter__(self):
return self.coords.__iter__()
ra = property(lambda self: self._coords[0])
dec = property(lambda self: self._coords[1])
alt = property(lambda self: self._coords[0])
az = property(lambda self: self._coords[1])
long = property(lambda self: self._coords[0])
lat = property(lambda self: self._coords[1])
# primitive conversion
D = property(lambda self: tuple((c.D for c in self.coords)))
R = property(lambda self: tuple((c.R for c in self.coords)))
AS = property(lambda self: tuple((c.AS for c in self.coords)))
H = property(lambda self: tuple((c.H for c in self.coords)))
def dd(self):
return self.D
def rad(self):
return self.R
def toEphem(self):
if str(self.epoch).lower() == str(Epoch.J2000).lower():
epoch = ephem.J2000
elif str(self.epoch).lower() == str(Epoch.B1950).lower():
epoch = ephem.B1950
else:
epoch = ephem.now()
return ephem.Equatorial(self.ra.R, self.dec.R, epoch=epoch)
def toEpoch(self, epoch=Epoch.J2000):
"""
Returns a new Coordinate with the specified Epoch
"""
# If coordinate epoch is already the right one, do nothing
if str(epoch).lower() == str(self.epoch).lower():
return self
# Else, do the coordinate conversion...
if str(epoch).lower() == str(Epoch.J2000).lower():
eph_epoch = ephem.J2000
elif str(epoch).lower() == str(Epoch.B1950).lower():
eph_epoch = ephem.B1950
elif str(epoch).lower() == str(Epoch.NOW).lower():
eph_epoch = ephem.now()
coords = ephem.Equatorial(self.toEphem(), epoch=eph_epoch)
return Position.fromRaDec(
Coord.fromR(coords.ra), Coord.fromR(coords.dec), epoch=epoch
)
#
# great circle distance
#
def angsep(self, other):
"""
Calculate the Great Circle Distance from other.
@param other: position to calculate distance from.
@type other: L{Position}
@returns: The distance from this point to L{other}.
@rtype: L{Coord} in degress (convertable, as this is a Coord).
"""
return Coord.fromR(CoordUtil.gcdist(self.R, other.R)).toD()
def within(self, other, eps=Coord.fromAS(60)):
"""
Returns wether L{other} is up to L{eps} units from this
points. (using great circle distance).
@param other: Same as in angsep.
@type other: L{Position}.
@param eps: Limit distance.
@type eps: L{Coord}.
@returns: Wether L{other} is within {eps} units from this point.
@rtype: bool
"""
return self.angsep(other) <= eps
# raDecToAltAz and altAzToRaDec adopted from sidereal.py
# http://www.nmt.edu/tcc/help/lang/python/examples/sidereal/ims/
@staticmethod
def raDecToAltAz(raDec, latitude, lst):
decR = CoordUtil.coordToR(raDec.dec)
latR = CoordUtil.coordToR(latitude)
ha = CoordUtil.raToHa(raDec.ra, lst)
haR = CoordUtil.coordToR(ha)
altR, azR = CoordUtil.coordRotate(decR, latR, haR)
return Position.fromAltAz(
Coord.fromR(CoordUtil.makeValid180to180(altR)),
Coord.fromR(CoordUtil.makeValid0to360(azR)),
)
@staticmethod
def altAzToRaDec(altAz, latitude, lst):
altR = CoordUtil.coordToR(altAz.alt)
latR = CoordUtil.coordToR(latitude)
azR = CoordUtil.coordToR(altAz.az)
decR, haR = CoordUtil.coordRotate(altR, latR, azR)
ra = CoordUtil.haToRa(haR, lst)
return Position.fromRaDec(
CoordUtil.makeValid0to360(ra), CoordUtil.makeValid180to180(decR)
)
|
astroufscREPO_NAMEchimeraPATH_START.@chimera_extracted@chimera-master@src@chimera@util@position.py@.PATH_END.py
|
{
"filename": "published_cats.py",
"repo_name": "mshubat/galaxy_data_mines",
"repo_path": "galaxy_data_mines_extracted/galaxy_data_mines-master/galaxy_data_mines/tobeintegrated/published_cats.py",
"type": "Python"
}
|
from astropy.table import Table, Column, hstack, vstack
from astropy.coordinates import SkyCoord, match_coordinates_sky
from astropy import units as u
import numpy as np
import os
# published_cats:
# tools for manipulating published catalogs of m83 objects, for comparison
# with Chandar et al catalog
# - reformat and combine NED and SIMBAD data tables
# - add in tables of data not included in NED or SIMBAD
# - match to Chandar catalog to produce a list of best-matches with object types
#
# usage:
# published_cats.ns_combine('ned-20160629.fits','simbad-20160629.fits','M83_NScomb.fits','M83_NSall.fits')
# published_cats.add_tables('M83_NSall.fits',['williams15_rsg.fits','kim12_wr.fits'],'M83_final.fits')
# published_cats.catalog_match('M83_final.fits', 'hlsp_wfc3ers_hst_wfc3_m83_cat_all_v2-corrected.txt','M83_ers_pubcat.txt')
ned_rename = [('Name_N', 'Name'), ('RA(deg)', 'RA'), ('DEC(deg)', 'Dec'), ('Type_N', 'Type')]
sim_rename = [('Name_S', 'Name'), ('RA_d', 'RA'), ('DEC_d', 'Dec'), ('Type_S', 'Type')]
# ---------------- ns_combine AND helper functions ---------------- #
def ns_combine(ned_name, simbad_name, ns_combine, final_tab, match_tol=1.0): # match_tol in arcsec
ned_in = Table.read(ned_name)
simbad_in = Table.read(simbad_name)
# prelim processing
ned_proc = reformat_cat(ned_in, old_name='Object Name', new_name='Name_N', old_type='Type', new_type='Type_N',
keepcols=['Object Name', 'RA(deg)', 'DEC(deg)', 'Type'])
sim_proc = reformat_cat(simbad_in, old_name='MAIN_ID', new_name='Name_S',
old_type='OTYPE', new_type='Type_S')
# construct coordinates needed for matching # ***** MATT - created SkyCoord's w correct unit columns
ned_coo = SkyCoord(ra=ned_proc['RA(deg)'], dec=ned_proc['DEC(deg)'])
sim_coo = SkyCoord(ra=sim_proc['RA_d'], dec=sim_proc['DEC_d'])
# do the matching # ***** MATT - Returns indices of matched col's for ned+sim tables
matched_ned, matched_sim, ned_only, sim_only = symmetric_match_sky_coords(
ned_coo, sim_coo, match_tol*u.arcsec)
print("Matched NED column:")
print(ned_proc[matched_ned])
print("Matched SIMBAD column:")
print(sim_proc[matched_sim])
print("Unmatched NED:")
print(ned_proc[ned_only])
print("Unmatched SIMBAD:")
print(sim_proc[sim_only])
# generate the matched table
matchtab = hstack([ned_proc[matched_ned], sim_proc[matched_sim]], join_type='outer')
# mark the really good matches
matchtab2 = process_match(matchtab)
matchtab2.write(ns_combine, format='fits')
# rename some columns
nedcat = process_unmatch(ned_proc[ned_only], src='N', rename_cols=ned_rename)
simcat = process_unmatch(sim_proc[sim_only], src='S', rename_cols=sim_rename)
keeplist = ['Name_N', 'RA(deg)', 'DEC(deg)', 'Type_N']
matchtab3 = process_unmatch(Table(matchtab2[keeplist]), src='NS', rename_cols=ned_rename)
# add on the unmatched objects
finaltab = vstack([matchtab3, nedcat, simcat], join_type='outer')
# save the result
finaltab.write(final_tab, format='fits')
return
# ---------------- ns_combine's helpers ---------------- #
ns_replace_names = [(" ", ""), ("MESSIER083:", ""), ("NGC5236:", ""), ("M83-", ""), ("NGC5236", "")]
ns_replace_types = [('*Cl', 'Cl*'), ('PofG', 'Galaxy'), ('X', 'XrayS'), ('Radio', 'RadioS')]
ns_remove_ids = ['NAMENGC5236Group', 'M83', 'MESSIER083', 'NGC5236GROUP']
ra_dec_cols = ['RA(deg)', 'DEC(deg)', 'RA_d', 'DEC_d']
def reformat_cat(in_tab, old_name, new_name, old_type, new_type, replace_names=ns_replace_names, replace_types=ns_replace_types, remove_id=ns_remove_ids, keepcols=None):
''' reformat NED or SIMBAD catalog to make more intercompatible'''
# just keep selected columns
if keepcols != None:
in_tab = in_tab[keepcols]
# change units for RA/Dec
for col in ra_dec_cols:
if col in in_tab.colnames:
in_tab[col].unit = u.degree
# change ID for name & type columns
in_tab.rename_column(old_name, new_name)
in_tab.rename_column(old_type, new_type)
'''
# reformat some object names
for pair in replace_names:
in_tab[new_name] = np.char.replace(in_tab[new_name], pair[0], pair[1])
'''
# reformat some object types # ***** MATT S. Can remove this code, will be handled by dictionary
#in_tab[new_type] = np.char.replace(in_tab[new_type], "?", "")
#in_tab[new_type] = np.char.replace(in_tab[new_type], " ", "")
# for pair in replace_types:
# in_tab[new_type][in_tab[new_type] == pair[0]] = pair[1]
# delete rows whose names are in remove_id
# there's a non-loopy way to do this but I can't remember it
remove_idx = []
for i in range(0, len(in_tab)):
if in_tab[i][new_name] in remove_id:
remove_idx.append(i)
in_tab.remove_rows(remove_idx)
# all done
return(in_tab)
def symmetric_match_sky_coords(coord1, coord2, tolerance):
'''produce the symmetric match of coord1 to coord2
output:
index1_matched: index into coord1 for matched objects
index2_matched: index into coord2 for matches of objects in index1_matched
index1_unmatch: indices for unmatched objects in coord1
index2_unmatch: indices for unmatched objects in coord2
'''
closest_2to1, sep2d_2to1, sep3d = match_coordinates_sky(
coord1, coord2) # location in coord2 for closest match to each coord1. len = len(coord1)
# location in coord1 for closest match to each coord2. len = len(coord2)
closest_1to2, sep2d_1to2, sep3d = match_coordinates_sky(coord2, coord1)
index1_matched = []
index2_matched = []
index1_unmatched = []
index2_unmatched = []
for i in range(0, len(coord1)): # doubtless there is a more Pythonic way to do this..
# not sure this condition covers all of the possible corner cases. But good enough.
if sep2d_1to2[i] < tolerance and i == closest_2to1[closest_1to2[i]]:
index1_matched.append(i)
index2_matched.append(closest_2to1[i])
else:
index1_unmatched.append(i)
for j in range(0, len(coord2)):
if j not in index2_matched:
index2_unmatched.append(j)
return(index1_matched, index2_matched, index1_unmatched, index2_unmatched)
def process_match(matched_tab_in):
'''find secure matches btw NED and SIMBAD'''
goodmatch = np.logical_or(matched_tab_in['Name_S'] == matched_tab_in['Name_N'],
matched_tab_in['Type_S'] == matched_tab_in['Type_N'])
matched_tab_in.add_column(Column(goodmatch, name='Secure'))
return(matched_tab_in)
def process_unmatch(tab_in, src, rename_cols):
'''find secure matches btw NED and SIMBAD'''
for pair in rename_cols:
tab_in.rename_column(pair[0], pair[1])
tab_in.add_column(Column(name='Source', length=len(tab_in), dtype='S2'))
tab_in['Source'] = src
return(tab_in)
# ------------------------- Add tables ------------------------- #
def add_tables(basetab_file, tab_file_list, outfile, jt='outer'):
basetab = Table.read(basetab_file)
tablist = [basetab]
for filename in tab_file_list:
tab = Table.read(filename)
tablist.append(tab)
stack = vstack(tablist, join_type=jt)
stack.write(outfile)
return
# ------------------------ match up objects ------------------------ #
def catalog_match(pubcat_file, erscat_file, match_out_file, match_tol=1.0):
pubcat = Table.read(pubcat_file, format='fits')
erscat = Table.read(erscat_file, format='ascii.commented_header')
# construct coordinates needed for matching
pub_coo = SkyCoord(ra=pubcat['RA'], dec=pubcat['Dec'])
ers_coo = SkyCoord(ra=erscat['ra']*u.degree, dec=erscat['dec']*u.degree)
# do the matching
# closest_2to1, sep2d_2to1, sep3d = match_coordinates_sky(coord1, coord2) # location in coord2 for closest match to each coord1. len = len(coord1)
# location in coord2 for closest match to each coord1. len = len(coord1)
closest, sep2d, sep3d = match_coordinates_sky(pub_coo, ers_coo)
matched = sep2d < match_tol*u.arcsec
# matched_ers, matched_pub, ers_only, pub_only = symmetric_match_sky_coords(ers_coo, pub_coo, match_tol*u.arcsec)
# generate the matched table
keeplist = ['id_', 'ra', 'dec']
tmpcat = Table(erscat[keeplist])
matchtab = hstack([tmpcat[closest][matched], pubcat[matched]], join_type='outer')
# write the matched catalog to a file
matchtab.write(match_out_file, format='ascii.commented_header')
return
# BELOW HERE IS OLD STUFF, not used
# --------------------------------------------------------
within_galaxy_types_ned = ['*Cl', 'HII', 'PofG', 'Neb', 'SN', 'SNR', 'V*', 'WR*']
background_types_ned = ['G', 'GClstr', 'GGroup', 'QSO']
obs_types_ned = ['IrS', 'RadioS', 'UvES', 'UvS', 'VisS', 'XrayS']
within_galaxy_types_simbad = ['**', 'Assoc*', 'Candidate_SN*', 'Candidate_WR*', 'Cepheid', 'Cl*', 'HII', 'ISM',
'LMXB', 'MolCld', 'Nova', 'PN', 'PartofG', 'SN', 'SNR', 'SNR?', 'Star', 'ULX', 'V*', 'WR*', 'semi-regV*']
background_types_simbad = ['BLLac', 'ClG', 'EmG', 'Galaxy',
'GinCl', 'GroupG', 'Possible_G', 'StarburstG']
obs_types_simbad = ['EmObj', 'IR', 'Radio', 'Radio(sub-mm)', 'UV', 'X']
def name_match(simbad_name, ned_name):
matched = np.zeros(len(simbad_name), dtype='bool')
matched[np.char.replace(simbad_name, " ", "") == np.char.replace(ned_name, " ", "")] = True
# TODO: account for cases where one name has leading zero in an ID (eg [WLC2001]03) and other doesn't
return(matched)
# not using this
def process_tab(tab_in, tab_out, type_col, select_list=within_galaxy_types_ned, rfmt_fn=None):
'''select specific object types from a table'''
tab = Table.read(tab_in)
if type_col != 'Type':
tab.rename_column(type_col, 'Type')
tab['Type'] = np.char.strip(tab['Type']) # remove whitespace -- helps with matching
tg = tab.group_by('Type')
mask = np.in1d(tg.groups.keys['Type'], select_list) # create mask for only wanted types
wanted_types = tg.groups[mask]
# do some reformatting
if rfmt_fn != None:
wanted_types = rfmt_fn(wanted_types)
if tab_out != None: # write to file
if os.path.exists(tab_out):
os.unlink(tab_out)
wanted_types.write(tab_out, format='fits')
return(wanted_types)
|
mshubatREPO_NAMEgalaxy_data_minesPATH_START.@galaxy_data_mines_extracted@galaxy_data_mines-master@galaxy_data_mines@tobeintegrated@published_cats.py@.PATH_END.py
|
{
"filename": "analysis.py",
"repo_name": "ChrisBoettner/plato",
"repo_path": "plato_extracted/plato-main/plato/utils/analysis.py",
"type": "Python"
}
|
from typing import Optional
import numpy as np
import pandas as pd
from plato.instrument.detection import DetectionModel
def compute_detectable_fraction(
targets: pd.DataFrame,
detection_model: DetectionModel,
detection_threshold: float,
axis: Optional[int | tuple[int]] = None,
) -> np.ndarray:
"""
Calculate the fraction of detectable targets for a given sample.
Parameters
----------
targets : pd.DataFrame
The target dataframe, must contain all
necessary columns for detection_model.detection_efficiency.
See DetectionModel for more details.
detection_model : DetectionModel
The detection model instance.
detection_threshold : float
The detection threshold, i.e., the minimum
detection efficiency required to consider a target
as detectable.
axis : Optional[int | tuple[int]], optional
Axis along which to compute the mean. If None,
the mean is computed over all axes. By default, None.
Returns
-------
np.ndarray
The fraction of detectable targets.
"""
efficiency = detection_model.detection_efficiency(targets)
return np.mean(efficiency > detection_threshold, axis=axis)
def iterate_detectable_fraction(
properties: pd.DataFrame,
target_stars: pd.DataFrame,
detection_model: DetectionModel,
detection_threshold: float,
reshaping_bins: Optional[int | tuple[int, int]] = None,
) -> np.ndarray:
"""
Compute the detectable fraction for a list of targets,
with varying properties. The target stars are assigned
the properties from the properties dataframe, and the
detectable fraction is calculated for each set of
properties. This is done iteratively for all rows in
the properties dataframe. Useful if both properties
and target_stars are long so that properties do not fit
into a single dataframe.
Parameters
----------
properties : pd.DataFrame
The dataframe containing the properties to iterate over.
The columns of this dataframe are assigned to the target_stars
dataframe, and the detectable fraction is calculated for each
row.
target_stars : pd.DataFrame
The target stars dataframe. Between this and the properties
dataframe, all necessary columns for
detection_model.detection_efficiency must be included. See
DetectionModel for more details.
detection_model : DetectionModel
The detection model instance.
detection_threshold : float
The detection threshold, i.e., the minimum
detection efficiency required to consider a target
as detectable.
reshaping_bins : Optional[int | tuple[int, int]], optional
If not None, the resulting array is reshaped to the
given shape. If an integer is given, the resulting
array is reshaped to a square array with the given
integer as the side length. By default, None.
Returns
-------
np.ndarray
The detectable fraction for each set of properties.
"""
results: list = []
for _, row in properties.iterrows():
targets = target_stars.assign(**{col: row[col] for col in properties.columns})
frac = compute_detectable_fraction(
targets,
detection_model=detection_model,
detection_threshold=detection_threshold,
)
results.append(frac)
reshaping_bins = (
(reshaping_bins, reshaping_bins)
if isinstance(reshaping_bins, int)
else reshaping_bins
)
result_arr = np.array(results)
if reshaping_bins is not None:
assert isinstance(reshaping_bins, tuple)
result_arr = result_arr.reshape(reshaping_bins)
return result_arr
|
ChrisBoettnerREPO_NAMEplatoPATH_START.@plato_extracted@plato-main@plato@utils@analysis.py@.PATH_END.py
|
{
"filename": "ComputeColorDistribution.py",
"repo_name": "LSSTDESC/descqa",
"repo_path": "descqa_extracted/descqa-master/v1/descqa/ComputeColorDistribution.py",
"type": "Python"
}
|
from __future__ import division, print_function
import numpy as np
from astropy.io import fits
from astropy.table import Table
import os
import kcorrect
from astropy.cosmology import FlatLambdaCDM
def load_SDSS(filename, colors, SDSS_kcorrection_z):
"""
Compute the CDF of SDSS colors for some redshift range.
Parameters
----------
colors : list of string, required
list of colors to be tested
e.g ['u-g','g-r','r-i','i-z']
zlo : float, requred
minimum redshift of the validation catalog
zhi : float, requred
maximum redshift of the validation catalog
"""
translate = {'u':'M_u', 'g':'M_g', 'r':'M_r', 'i':'M_i', 'z':'M_z'}
data_dir = os.path.dirname(filename)
kcorrect_magnitudes_path = os.path.join(data_dir, 'sdss_k_corrected_magnitudes_z_0.06_0.09_z_{:.3f}.fits'.format(SDSS_kcorrection_z))
if not os.path.exists(kcorrect_magnitudes_path):
kcorrect_maggies_path = os.path.join(data_dir, 'sdss_k_corrected_maggies_z_0.06_0.09_z_{:.3f}.dat'.format(SDSS_kcorrection_z))
# Load kcorrect templates and filters
kcorrect.load_templates()
kcorrect.load_filters()
kcorrect.reconstruct_maggies_from_file(filename, redshift=SDSS_kcorrection_z, outfile=kcorrect_maggies_path)
#----------Convert kcorrected maggies to magnitudes----------------
cat = Table.read(os.path.join(data_dir, kcorrect_maggies_path), format='ascii.no_header', names=('redshift', 'maggies_u', 'maggies_g', 'maggies_r', 'maggies_i', 'maggies_z'))
cat0 = Table.read(filename, format='ascii.no_header')
redshifts = cat0['col1']
u = -2.5*np.log10(cat['maggies_u'])
g = -2.5*np.log10(cat['maggies_g'])
r = -2.5*np.log10(cat['maggies_r'])
i = -2.5*np.log10(cat['maggies_i'])
z = -2.5*np.log10(cat['maggies_z'])
cat1 = Table()
cat1['redshift'] = redshifts
cat1['u'] = u
cat1['g'] = g
cat1['r'] = r
cat1['i'] = i
cat1['z'] = z
cat1.write(kcorrect_magnitudes_path)
cat = cat1.copy()
else:
cat = Table.read(kcorrect_magnitudes_path)
# distance modulus
##########################################
cosmo = FlatLambdaCDM(H0=70.2, Om0=0.275)
##########################################
dm = np.array(cosmo.distmod(cat['redshift']))
cat['M_u'] = cat['u'] - dm
cat['M_g'] = cat['g'] - dm
cat['M_r'] = cat['r'] - dm
cat['M_i'] = cat['i'] - dm
cat['M_z'] = cat['z'] - dm
# Calculate the aboluste magnitude cut
mask = (cat['redshift']>0.089) & (cat['redshift']<0.090)
mr = cat['M_r'][mask]
mr_sort = np.sort(mr)
mrmax = mr_sort[int(len(mr)*0.85)]
# Apply r-band absolute magnitude
mask = (cat['M_r'] < mrmax)
cat = cat[mask]
vsummary = []
# Histogram with small bins (for calculating CDF)
for index in range(len(colors)):
color = colors[index]
band1 = translate[color[0]]
band2 = translate[color[2]]
bins = np.linspace(-1, 4, 2000)
hist, bin_edges = np.histogram((cat[band1]-cat[band2]), bins=bins)
hist = hist/np.sum(hist)
binctr = (bin_edges[1:] + bin_edges[:-1])/2.
# Convert PDF to CDF
cdf = np.zeros(len(hist))
cdf[0] = hist[0]
for cdf_index in range(1, len(hist)):
cdf[cdf_index] = cdf[cdf_index-1]+hist[cdf_index]
vsummary.append((len(cat), binctr, hist, cdf))
return vsummary, mrmax
|
LSSTDESCREPO_NAMEdescqaPATH_START.@descqa_extracted@descqa-master@v1@descqa@ComputeColorDistribution.py@.PATH_END.py
|
{
"filename": "_fill.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/volume/slices/z/_fill.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FillValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="fill", parent_name="volume.slices.z", **kwargs):
super(FillValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 1),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@volume@slices@z@_fill.py@.PATH_END.py
|
{
"filename": "_borderwidth.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scatter3d/marker/colorbar/_borderwidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class BorderwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="borderwidth",
parent_name="scatter3d.marker.colorbar",
**kwargs,
):
super(BorderwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scatter3d@marker@colorbar@_borderwidth.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter/marker/colorbar/title/font/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color",
parent_name="scatter.marker.colorbar.title.font",
**kwargs,
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter@marker@colorbar@title@font@_color.py@.PATH_END.py
|
{
"filename": "amazon-redshift.md",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/doc/unconverted/python/amazon-redshift.md",
"type": "Markdown"
}
|
---
jupyter:
jupytext:
notebook_metadata_filter: all
text_representation:
extension: .md
format_name: markdown
format_version: '1.1'
jupytext_version: 1.1.1
kernelspec:
display_name: Python 2
language: python
name: python2
plotly:
description: A tutorial showing how to plot Amazon AWS Redshift data with Plotly.
display_as: databases
has_thumbnail: false
language: python
layout: base
name: Plot Data From Amazon Redshift
order: 3
page_type: example_index
permalink: python/amazon-redshift/
redirect_from: ipython-notebooks/amazon-redshift/
thumbnail: /images/static-image
---
#### New to Plotly?
Plotly's Python library is free and open source! [Get started](https://plot.ly/python/getting-started/) by downloading the client and [reading the primer](https://plot.ly/python/getting-started/).
<br>You can set up Plotly to work in [online](https://plot.ly/python/getting-started/#initialization-for-online-plotting) or [offline](https://plot.ly/python/getting-started/#initialization-for-offline-plotting) mode, or in [jupyter notebooks](https://plot.ly/python/getting-started/#start-plotting-online).
<br>We also have a quick-reference [cheatsheet](https://images.plot.ly/plotly-documentation/images/python_cheat_sheet.pdf) (new!) to help you get started!
#### Version Check
Plotly's python package is updated frequently. Run `pip install plotly --upgrade` to use the latest version.
```python
import plotly
plotly.__version__
```
This notebook will go over one of the easiest ways to graph data from your [Amazon Redshift data warehouse](http://aws.amazon.com/redshift/) using [Plotly's public platform](https://plot.ly/) for publishing beautiful, interactive graphs from Python to the web.
[Plotly's Enterprise platform](https://plot.ly/product/enterprise/) allows for an easy way for your company to build and share graphs without the data leaving your servers.
#### Imports
In this notebook we'll be using [Amazon's Sample Redshift Data](http://docs.aws.amazon.com/redshift/latest/gsg/rs-gsg-create-sample-db.html) for this notebook. Although we won't be connecting through a JDBC/ODBC connection we'll be using the [psycopg2 package](http://initd.org/psycopg/docs/index.html) with [SQLAlchemy](http://www.sqlalchemy.org/) and [pandas](http://pandas.pydata.org/) to make it simple to query and analyze our data.
```python
import plotly.plotly as py
import plotly.graph_objs as go
import plotly.tools as tls
import pandas as pd
import os
import requests
requests.packages.urllib3.disable_warnings() # this squashes insecure SSL warnings - DO NOT DO THIS ON PRODUCTION!
```
#### Connect to Redshift
You'll need your [Redshift Endpoint URL](http://docs.aws.amazon.com/redshift/latest/gsg/rs-gsg-connect-to-cluster.html) in order to access your Redshift instance. I've obscured mine below but yours will be in a format similar to `datawarehouse.some_chars_here.region_name.redshift.amazonaws.com`. Connecting to Redshift is made extremely simple once you've set your cluster configuration. This configuration needs to include the username, password, port, host and database name. I've opted to store mine as environmental variables on my machine.
```python
redshift_endpoint = os.getenv("REDSHIFT_ENDPOINT")
redshift_user = os.getenv("REDSHIFT_USER")
redshift_pass = os.getenv("REDSHIFT_PASS")
port = 5439
dbname = 'dev'
```
As I mentioned there are numerous ways to connect to a Redshift database and I've included two below. We can use either the SQLAlchemy package or we can use the psycopg2 package for a more direct access.
Both will allow us to execute SQL queries and get results however the SQLAlchemy engine makes it a bit easier to directly return our data as a dataframe using pandas. Plotly has a tight integration with pandas as well, making it extremely easy to make interactive graphs to share with your company.
#### SQLAlchemy
```python
from sqlalchemy import create_engine
engine_string = "postgresql+psycopg2://%s:%s@%s:%d/%s" \
% (redshift_user, redshift_pass, redshift_endpoint, port, dbname)
engine = create_engine(engine_string)
```
#### Psycopg2
```python
import psycopg2
conn = psycopg2.connect(
host="datawarehouse.cm4z2iunjfsc.us-west-2.redshift.amazonaws.com",
user=redshift_user,
port=port,
password=redshift_pass,
dbname=dbname)
cur = conn.cursor() # create a cursor for executing queries
```
#### Load Data
This next section goes over loading in the sample data from Amazon's sample database. This is strictly for the purposes of the tutorial so feel free to skim this section if you're going to be working with your own data.
-----------------START DATA LOADING-----------------
```python
cur.execute("""drop table users;
drop table venue;
drop table category;
drop table date;
drop table event;
drop table listing;
drop table sales;""")
conn.commit()
```
```python
aws_key = os.getenv("AWS_ACCESS_KEY_ID") # needed to access S3 Sample Data
aws_secret = os.getenv("AWS_SECRET_ACCESS_KEY")
base_copy_string = """copy %s from 's3://awssampledbuswest2/tickit/%s.txt'
credentials 'aws_access_key_id=%s;aws_secret_access_key=%s'
delimiter '%s';""" # the base COPY string that we'll be using
#easily generate each table that we'll need to COPY data from
tables = ["users", "venue", "category", "date", "event", "listing"]
data_files = ["allusers_pipe", "venue_pipe", "category_pipe", "date2008_pipe", "allevents_pipe", "listings_pipe"]
delimiters = ["|", "|", "|", "|", "|", "|", "|"]
#the generated COPY statements we'll be using to load data;
copy_statements = []
for tab, f, delim in zip(tables, data_files, delimiters):
copy_statements.append(base_copy_string % (tab, f, aws_key, aws_secret, delim))
# add in Sales data, delimited by '\t'
copy_statements.append("""copy sales from 's3://awssampledbuswest2/tickit/sales_tab.txt'
credentials 'aws_access_key_id=%s;aws_secret_access_key=%s'
delimiter '\t' timeformat 'MM/DD/YYYY HH:MI:SS';""" % (aws_key, aws_secret))
```
```python
# Create Table Statements
cur.execute("""
create table users(
userid integer not null distkey sortkey,
username char(8),
firstname varchar(30),
lastname varchar(30),
city varchar(30),
state char(2),
email varchar(100),
phone char(14),
likesports boolean,
liketheatre boolean,
likeconcerts boolean,
likejazz boolean,
likeclassical boolean,
likeopera boolean,
likerock boolean,
likevegas boolean,
likebroadway boolean,
likemusicals boolean);
create table venue(
venueid smallint not null distkey sortkey,
venuename varchar(100),
venuecity varchar(30),
venuestate char(2),
venueseats integer);
create table category(
catid smallint not null distkey sortkey,
catgroup varchar(10),
catname varchar(10),
catdesc varchar(50));
create table date(
dateid smallint not null distkey sortkey,
caldate date not null,
day character(3) not null,
week smallint not null,
month character(5) not null,
qtr character(5) not null,
year smallint not null,
holiday boolean default('N'));
create table event(
eventid integer not null distkey,
venueid smallint not null,
catid smallint not null,
dateid smallint not null sortkey,
eventname varchar(200),
starttime timestamp);
create table listing(
listid integer not null distkey,
sellerid integer not null,
eventid integer not null,
dateid smallint not null sortkey,
numtickets smallint not null,
priceperticket decimal(8,2),
totalprice decimal(8,2),
listtime timestamp);
create table sales(
salesid integer not null,
listid integer not null distkey,
sellerid integer not null,
buyerid integer not null,
eventid integer not null,
dateid smallint not null sortkey,
qtysold smallint not null,
pricepaid decimal(8,2),
commission decimal(8,2),
saletime timestamp);""")
```
```python
for copy_statement in copy_statements: # execute each COPY statement
cur.execute(copy_statement)
conn.commit()
```
```python
for table in tables + ["sales"]:
cur.execute("select count(*) from %s;" % (table,))
print(cur.fetchone())
conn.commit() # make sure data went through and commit our statements permanently.
```
-----------------END DATA LOADING-----------------
Now that we've loaded some data into our Redshift cluster, we can start running queries against it.
We're going to start off by exploring and presenting some of our user's tastes and habits. Pandas makes it easy to query our data base and get back a dataframe in return. In this query, I'm simply getting the preferences of our users. What kinds of events do they like?
```python
df = pd.read_sql_query("""
SELECT sum(likesports::int) as sports, sum(liketheatre::int) as theatre,
sum(likeconcerts::int) as concerts, sum(likejazz::int) as jazz,
sum(likeclassical::int) as classical, sum(likeopera::int) as opera,
sum(likerock::int) as rock, sum(likevegas::int) as vegas,
sum(likebroadway::int) as broadway, sum(likemusicals::int) as musical,
state
FROM users
GROUP BY state
ORDER BY state asc;
""", engine)
```
Now that I've gotten a DataFrame back, let's make a quick heatmap using plotly.
```python
data = [
go.Heatmap(
z = df.drop('state', axis=1).values,
x = df.drop('state', axis=1).columns,
y = df.state,
colorscale = 'Hot'
)
]
layout = go.Layout(title="State and Music Tastes", yaxis=dict(autotick=False, dtick=1))
py.iplot(Figure(data=data, layout=layout), filename='redshift/state and music taste heatmap', height=1000)
```
*the above graph is interactive, click and drag to zoom, double click to return to initial layout, shift click to pan*
This graph is simple to produce and even more simple to explore. The interactivity makes it great for those that aren't completely familiar with heatmaps.
Looking at this particular one we can easily get a sense of popularity. We can see here that sports events don't seem to be particularly popular among our users and that certain states have much higher preferences (and possibly users) than others.
A common next step might be to create some box plots of these user preferences.
```python
layout = go.Layout(title="Declared User Preference Box Plots",
yaxis=dict())
data = []
for pref in df.drop('state', axis=1).columns:
# for every preference type, make a box plot
data.append(go.Box(y=df[pref], name=pref))
py.iplot(go.Figure(data=data, layout=layout), filename='redshift/user preference box plots')
```
*the above graph is interactive, click and drag to zoom, double click to return to initial layout, shift click to pan*
It seems to be that sports are just a bit more compressed than the rest. This may be because there's simply fewer people interested in sports or our company doesn't have many sporting events.
Now that we've explored a little bit about some of our customers we've stumbled upon this sports anomoly. Are we listing less sports events? Do we sell approximately the same amount of all event types and our users just aren't drawn to sports events?
We've got to understand a bit more and to do so we'll be plotting a simple bar graph of our event information.
```python
df = pd.read_sql_query("""
SELECT sum(event.catid) as category_sum, catname as category_name
FROM event, category
where event.catid = category.catid
GROUP BY category.catname
""", engine)
```
```python
layout = go.Layout(title="Event Categories Sum", yaxis=dict(title="Sum"))
data = [go.Bar(x=df.category_name, y=df.category_sum)]
py.iplot(go.Figure(data=data, layout=layout))
```
It's a good thing we started exploring this data because we've got to rush to management and report the discrepancy between our users' preferences and the kinds of events that we're hosting! Luckily, sharing plotly's graphs is extremely easy using the `play with this data` link at the bottom right.
However for our report, let's dive a bit deeper into the events that we're listing and when we're listing them. Maybe we're trending upwards with certain event types?
```python
df = pd.read_sql_query("""
SELECT sum(sales.qtysold) as quantity_sold, date.caldate
FROM sales, date
WHERE sales.dateid = date.dateid
GROUP BY date.caldate
ORDER BY date.caldate asc;
""", engine)
```
```python
layout = go.Layout(title="Event Sales Per Day", yaxis=dict(title="Sales Quantity"))
data = [go.Scatter(x=df.caldate, y=df.quantity_sold)]
py.iplot(go.Figure(data=data, layout=layout))
```
```
Overall it seems inconclusive except that our events seem to be seasonal. This aggregate graph doesn't show too much so it's likely worth exploring a bit more about each category.
```
```python
df = pd.read_sql_query("""
SELECT sum(sales.qtysold) as quantity_sold, date.caldate, category.catname as category_name
FROM sales, date, event, category
WHERE sales.dateid = date.dateid
AND sales.eventid = event.eventid
AND event.catid = category.catid
GROUP BY date.caldate, category_name
ORDER BY date.caldate asc;
""", engine)
```
It's always great to try and better understand which graph type conveys your message the best. Sometimes subplots do the best and other times it's best to put them all on one graph. Plotly makes it easy to do either one!
```python
data = []
for count, (name, g) in enumerate(df.groupby("category_name")):
data.append(go.Scatter(
name=name,
x=g.caldate,
y=g.quantity_sold,
xaxis='x' + str(count + 1),
yaxis='y' + str(count + 1)
))
fig = tls.make_subplots(rows=2,cols=2)
fig['layout'].update(title="Event Sales Per Day By Category")
fig['data'] += data
py.iplot(fig)
```
The above subplots seem to tell an interesting story although it's important to note that with subplots the axes are not always aligned. So let's try plotting all of them together, with lines for each category.
```python
data = []
for name, g in df.groupby("category_name"):
data.append(go.Scatter(
name=name,
x=g.caldate,
y=g.quantity_sold
))
fig = go.Figure()
fig['layout'].update(title="Event Sales Per Day By Category")
fig['data'] += data
py.iplot(fig, filename='redshift/Event Sales Per Day by Category')
```
This looks much better and explains the story perfectly. It seems that all of our events are fairly regular through the year except for a spike in musicals and plays around March. This might be of interest to so I'm going to mark up this graph and share it with some of the relevant sales representatives in my company.
The rest of my team can edit the graph with me in a web app. Collaborating does not require coding, emailing, or downloading software. I can even fit a function to the data in the web app.
```python
from IPython.display import Image
```
```python
Image(url="http://i.imgur.com/nUVihzx.png")
```
```python
tls.embed("https://plot.ly/~bill_chambers/195")
```
Plotly makes it easier for data analysts and data scientists to share data in meaningful ways. By marking up drawings and embedding comments on the graph, I can make sure that I'm sharing everything within a context. Rather than having to send a static image, I can share an interactive plot a coworker can explore and understand as well. Plotly makes it easy for companies to make sure that information is conveyed in the right context.
#### References
Learn more about:
- [Amazon Redshift Data Warehouse](http://aws.amazon.com/redshift/)
- [Chart Studio Enterprise - Plotly Hosted on your servers](https://plot.ly/product/enterprise/)
- [Subplots in Plotly](https://plot.ly/python/subplots/)
- [Creating a plot of best fit](https://plot.ly/online-graphing/tutorials/create-a-line-of-best-fit-online/)
```python
from IPython.display import display, HTML
display(HTML('<link href="//fonts.googleapis.com/css?family=Open+Sans:600,400,300,200|Inconsolata|Ubuntu+Mono:400,700" rel="stylesheet" type="text/css" />'))
display(HTML('<link rel="stylesheet" type="text/css" href="http://help.plot.ly/documentation/all_static/css/ipython-notebook-custom.css">'))
! pip install git+https://github.com/plotly/publisher.git --upgrade
import publisher
publisher.publish(
'redshift.ipynb', 'python/amazon-redshift/', 'Plot Data From Amazon Redshift',
'A tutorial showing how to plot Amazon AWS Redshift data with Plotly.',
title = 'Plot Data from Amazon Redshift | plotly',
has_thumbnail='false', redirect_from='ipython-notebooks/amazon-redshift/',
language='python', page_type='example_index',
display_as='databases', order=3,
ipynb= '~notebook_demo/1')
```
```python
```
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@doc@unconverted@python@amazon-redshift.md@.PATH_END.py
|
{
"filename": "xla_custom_call_ops_test.py",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/compiler/tests/xla_custom_call_ops_test.py",
"type": "Python"
}
|
# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for XLA custom call op wrapper."""
from tensorflow.compiler.tests import xla_test
from tensorflow.compiler.tf2xla.python import xla
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import random_ops
from tensorflow.python.platform import test
class XlaCustomCallOpTest(xla_test.XLATestCase):
def testXlaCustomCallOp(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x, y):
return xla.custom_call(
args=(x, y),
target_name='my_call',
dtype=dtypes.int32,
shape=(3, 4, 5),
backend_config='my_backend_config')
compiled_f = def_function.function(f, jit_compile=True)
x = random_ops.random_normal([1, 2, 3], dtype=dtypes.float32)
y = random_ops.random_normal([], dtype=dtypes.float32)
hlo = compiled_f.experimental_get_compiler_ir(x, y)(stage='hlo')
self.assertIn('s32[3,4,5]{2,1,0} custom-call(f32[1,2,3]{2,1,0}', hlo)
self.assertIn('custom_call_target="my_call"', hlo)
self.assertIn('backend_config="my_backend_config"', hlo)
def testXlaCustomCallOpDoesntExist(self):
with ops.device('device:{}:0'.format(self.device)):
def f():
return xla.custom_call(
args=(1, 2),
target_name='my_non_existing_call_target',
dtype=dtypes.int32,
shape=(),
backend_config='my_backend_config',
)
with self.assertRaises(errors_impl.OpError):
compiled_f = def_function.function(f, jit_compile=True)
compiled_f()
def testXlaCustomCallV2Op(self):
with ops.device('device:{}:0'.format(self.device)):
def f(x, y):
return xla.custom_call_v2(
'my_call',
(x, y),
(
tensor_spec.TensorSpec((2, 3), dtypes.int32),
tensor_spec.TensorSpec((5,), dtypes.float32),
),
has_side_effect=True,
backend_config='my_backend_config',
)
compiled_f = def_function.function(f, jit_compile=True)
x = random_ops.random_normal([7, 11], dtype=dtypes.float32)
y = random_ops.random_normal([13, 17, 19], dtype=dtypes.float32)
hlo = compiled_f.experimental_get_compiler_ir(x, y)(stage='hlo')
self.assertContainsInOrder([
'= (s32[2,3]{1,0}, f32[5]{0}) custom-call(',
'f32[7,11]{1,0}',
'f32[13,17,19]{2,1,0}',
'custom_call_target="my_call"',
'custom_call_has_side_effect=true',
'api_version=API_VERSION_STATUS_RETURNING_UNIFIED',
'backend_config="my_backend_config"',
], hlo)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@compiler@tests@xla_custom_call_ops_test.py@.PATH_END.py
|
{
"filename": "mod.md",
"repo_name": "misharash/class_public",
"repo_path": "class_public_extracted/class_public-master/doc/input/mod.md",
"type": "Markdown"
}
|
Updating the manual
===================
Author: D. C. Hooper (hooper@physik.rwth-aachen.de)
This pdf manual and accompanying web version have been generated using the `doxygen` software (http://www.doxygen.org). This software directly reads the code and extracts the necessary comments to form the manual, meaning it is very easy to generate newer versions of the manual as desired.
### For CLASS developpers: ###
To maintain the usefulness of the manual, a new version should be generated after any major upgrade to `CLASS`. To keep track of how up-to-date the manual is the title page also displays the last modification date. The manual is generated automatically from the code, excepted a few chapters written manually in the files
README.md
doc/input/chap2.md
doc/input/chap3.md
doc/input/mod.md
external_Pk/README.md
You can update these files, or add new ones that should be declared in the `INPUT=` field of `doc/input/doxyconf`.
Generating a new version of this manual is straightforward. First, you need to install the `doxygen` software, which can be done by following the instructions on the software's webpage. The location where you install this software is irrelevant; it doesn't need to be in the same folder as `CLASS`. For Mac OSX, homebrew users can install the software with `brew install doxygen --with-graphviz`.
Once installed, navigate to the class/doc/input directory and run the first script
` . make1.sh`
This will generate a new version of the html manual and the necessary files to make the pdf version. Unfortunately, `doxygen` does not yet offer the option to automatically order the output chapters in the pdf version of the manual. Hence, before compiling the pdf, this must be done manually. To do this you need to find the `refman.tex` file in class/doc/manual/latex. With this file you can modify the title page, headers, footers, and chapter ordering for the final pdf. Usually we just make two things: add manually the line
\vspace*{1cm}
{\large Last updated \today}\\
after
{\Large C\+L\+A\+SS M\+A\+N\+U\+AL }\\
and move manually the chapters `"The external Pk mode"` and `"Updating the manual"` to the end, after the automatically generated part. Once you have this file with your desired configuration, navigate back to the class/doc/input directory, and run the second script
` . make2.sh`
You should now be able to find the finished pdf in `class/doc/manual/CLASS_MANUAL.pdf`. Finally you can commit the changes to git, but not all the content of `doc/` is necessary: only `doc/README`, `doc/input/` and `doc/manual/CLASS_MANUAL.pdf`. Since version 2.8, we are not committing anymore `doc/manual/html/` because it was too big (and complicating the version history): users only get the PDF manual from git.
As a final comment, doxygen uses two main configuration files: `doxyconf` and `doxygen.sty`, both located in class/doc/input. Changes to these files can dramatically impact the outcome, so any modifications to these files should be done with great care.
|
misharashREPO_NAMEclass_publicPATH_START.@class_public_extracted@class_public-master@doc@input@mod.md@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "lsst-uk/lasair-lsst",
"repo_path": "lasair-lsst_extracted/lasair-lsst-main/deploy/roles/grafana/README.md",
"type": "Markdown"
}
|
Role Name
=========
A brief description of the role goes here.
Requirements
------------
Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required.
Role Variables
--------------
A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well.
Dependencies
------------
A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles.
Example Playbook
----------------
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
- hosts: servers
roles:
- { role: username.rolename, x: 42 }
License
-------
BSD
Author Information
------------------
An optional section for the role authors to include contact information, or a website (HTML is not allowed).
|
lsst-ukREPO_NAMElasair-lsstPATH_START.@lasair-lsst_extracted@lasair-lsst-main@deploy@roles@grafana@README.md@.PATH_END.py
|
{
"filename": "Miscellaneous.ipynb",
"repo_name": "moble/GWFrames",
"repo_path": "GWFrames_extracted/GWFrames-master/Tests/Miscellaneous.ipynb",
"type": "Jupyter Notebook"
}
|
# Pointwise operations
```
import GWFrames
T = [0., 1., 2.]
LM = [[l,m] for l in range(0,9) for m in range(-l,l+1)]
Data = zeros((len(LM), len(T)), dtype=complex)
Data[0,:] = sqrt(4*pi)+0.0j
# The Waveform objects A and B will represent uniform values of 3.0 and 5.0 over the sphere.
# Note that the factor of sqrt(4*pi) is necessary due to the normalization of the spherical harmonics.
A = GWFrames.Waveform(T, LM, 3.0*Data)
B = GWFrames.Waveform(T, LM, 5.0*Data)
A.SetSpinWeight(0)
B.SetSpinWeight(0)
# C should represent a value of 8.0 over the sphere, which is equivalent to
# the (l,m)=(0,0) mode having the value 8.0*sqrt(4*pi).
C = A+B
print C.Data(0,0)/sqrt(4*pi), "should be (8+0j)"
# D should represent a value of -2.0 over the sphere, which is equivalent to
# the (l,m)=(0,0) mode having the value -2.0*sqrt(4*pi).
D = A-B
print D.Data(0,0)/sqrt(4*pi), "should be (-2+0j)"
# D should represent a value of 15.0 over the sphere, which is equivalent to
# the (l,m)=(0,0) mode having the value 15.0*sqrt(4*pi).
D = A*B
print D.Data(0,0)/sqrt(4*pi), "should be (15+0j)"
# E should represent a value of 0.6 over the sphere, which is equivalent to
# the (l,m)=(0,0) mode having the value 0.6*sqrt(4*pi).
E = A/B
print E.Data(0,0)/sqrt(4*pi), "should be (0.6+0j)"
```
(8+0j) should be (8+0j)
(-2+0j) should be (-2+0j)
(15+0j)
should be (15+0j)
(0.6+0j) should be (0.6+0j)
# $\eth$ operators
```
import GWFrames
T = [0., 1., 2.]
LM = [[l,m] for l in range(0,9) for m in range(-l,l+1)]
Data = zeros((len(LM), len(T)), dtype=complex)
Data[0,:] = 2+3j # (l,m)=(0,0)
Data[1,:] = 5+7j # (l,m)=(1,-1)
Data[4,:] = 11+13j # (l,m)=(2,-2)
Data[9,:] = 17+19j # (l,m)=(3,-3)
A = GWFrames.Waveform(T, LM, Data)
A.SetSpinWeight(1)
# Note that I gave A (l,m)=(0,0) data, even though it has spin s=1. That data will be removed any time it goes through edth.
B = A.GHPEdth()
C = B.IntegrateGHPEdth()
D = B.GHPEdthBar()
print A.Data()[:10,:]
print B.Data()[:10,:]
print("Nonzero terms above should be {0} and {1}".format(Data[4,0]*sqrt((2-1)*(2+1+1)/2.), Data[9,0]*sqrt((3-1)*(3+1+1)/2.)))
print C.Data()[:10,:]
print D.Data()[:10,:]
```
[[ 2. +3.j 2. +3.j 2. +3.j]
[ 5. +7.j 5. +7.j 5. +7.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 11.+13.j 11.+13.j 11.+13.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 17.+19.j 17.+19.j 17.+19.j]]
[[ 0.00000000 +0.j 0.00000000 +0.j 0.00000000 +0.j ]
[ 0.00000000 +0.j 0.00000000 +0.j 0.00000000 +0.j ]
[ 0.00000000 +0.j 0.00000000 +0.j 0.00000000 +0.j ]
[ 0.00000000 +0.j 0.00000000 +0.j 0.00000000 +0.j ]
[ 15.55634919+18.38477631j 15.55634919+18.38477631j
15.55634919+18.38477631j]
[ 0.00000000 +0.j 0.00000000 +0.j 0.00000000 +0.j ]
[ 0.00000000 +0.j 0.00000000 +0.j 0.00000000 +0.j ]
[ 0.00000000 +0.j 0.00000000 +0.j 0.00000000 +0.j ]
[ 0.00000000 +0.j 0.00000000 +0.j 0.00000000 +0.j ]
[ 38.01315562+42.48529157j 38.01315562+42.48529157j
38.01315562+42.48529157j]]
Nonzero terms above should be (15.5563491861+18.3847763109j) and (38.0131556175+42.4852915725j)
[[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 11.+13.j 11.+13.j 11.+13.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 17.+19.j 17.+19.j 17.+19.j]]
[[ 0. +0.j 0. +0.j 0. +0.j]
[ -0. -0.j -0. -0.j -0. -0.j]
[ -0. -0.j -0. -0.j -0. -0.j]
[ -0. -0.j -0. -0.j -0. -0.j]
[-22.-26.j -22.-26.j -22.-26.j]
[ -0. -0.j -0. -0.j -0. -0.j]
[ -0. -0.j -0. -0.j -0. -0.j]
[ -0. -0.j -0. -0.j -0. -0.j]
[ -0. -0.j -0. -0.j -0. -0.j]
[-85.-95.j -85.-95.j -85.-95.j]]
```
A.SetSpinWeight(+1)
B = A.GHPEdthBar()
C = B.IntegrateGHPEdthBar()
D = B.GHPEdth()
print A.Data()[:10,:]
print B.Data()[:10,:]
print("Nonzero terms above should be {0}, {1}, and {2}".format(-Data[1,0],
-Data[4,0]*sqrt((2+1)*(2-1+1)/2.),
-Data[9,0]*sqrt((3+1)*(3-1+1)/2.)))
print C.Data()[:10,:]
print D.Data()[:10,:]
```
[[ 2. +3.j 2. +3.j 2. +3.j]
[ 5. +7.j 5. +7.j 5. +7.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 11.+13.j 11.+13.j 11.+13.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 17.+19.j 17.+19.j 17.+19.j]]
[[ -0.00000000 -0.j -0.00000000 -0.j -0.00000000 -0.j ]
[ -5.00000000 -7.j -5.00000000 -7.j -5.00000000 -7.j ]
[ -0.00000000 -0.j -0.00000000 -0.j -0.00000000 -0.j ]
[ -0.00000000 -0.j -0.00000000 -0.j -0.00000000 -0.j ]
[-19.05255888-22.5166605j -19.05255888-22.5166605j
-19.05255888-22.5166605j ]
[ -0.00000000 -0.j -0.00000000 -0.j -0.00000000 -0.j ]
[ -0.00000000 -0.j -0.00000000 -0.j -0.00000000 -0.j ]
[ -0.00000000 -0.j -0.00000000 -0.j -0.00000000 -0.j ]
[ -0.00000000 -0.j -0.00000000 -0.j -0.00000000 -0.j ]
[-41.64132563-46.54030511j -41.64132563-46.54030511j
-41.64132563-46.54030511j]]
Nonzero terms above should be (-5-7j), (-19.0525588833-22.5166604984j), and (-41.6413256273-46.5403051129j)
[[ 0. +0.j 0. +0.j 0. +0.j]
[ 5. +7.j 5. +7.j 5. +7.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 11.+13.j 11.+13.j 11.+13.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 17.+19.j 17.+19.j 17.+19.j]]
[[ 0. +0.j 0. +0.j 0. +0.j]
[ -5. -7.j -5. -7.j -5. -7.j]
[ -0. -0.j -0. -0.j -0. -0.j]
[ -0. -0.j -0. -0.j -0. -0.j]
[ -33. -39.j -33. -39.j -33. -39.j]
[ -0. -0.j -0. -0.j -0. -0.j]
[ -0. -0.j -0. -0.j -0. -0.j]
[ -0. -0.j -0. -0.j -0. -0.j]
[ -0. -0.j -0. -0.j -0. -0.j]
[-102.-114.j -102.-114.j -102.-114.j]]
```
A.SetSpinWeight(-1)
B = A.GHPEdthBar()
C = B.IntegrateGHPEdthBar()
D = B.GHPEdth()
print A.Data()[:10,:]
print B.Data()[:10,:]
print("Nonzero terms above should be {0}, {1}, and {2}".format(0.0j,
-Data[4,0]*sqrt((2-1)*(2+1+1)/2.),
-Data[9,0]*sqrt((3-1)*(3+1+1)/2.)))
print C.Data()[:10,:]
print D.Data()[:10,:]
```
[[ 2. +3.j 2. +3.j 2. +3.j]
[ 5. +7.j 5. +7.j 5. +7.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 11.+13.j 11.+13.j 11.+13.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 17.+19.j 17.+19.j 17.+19.j]]
[[ 0.00000000 +0.j 0.00000000 +0.j 0.00000000 +0.j ]
[ 0.00000000 +0.j 0.00000000 +0.j 0.00000000 +0.j ]
[ 0.00000000 +0.j 0.00000000 +0.j 0.00000000 +0.j ]
[ 0.00000000 +0.j 0.00000000 +0.j 0.00000000 +0.j ]
[-15.55634919-18.38477631j -15.55634919-18.38477631j
-15.55634919-18.38477631j]
[ -0.00000000 -0.j -0.00000000 -0.j -0.00000000 -0.j ]
[ -0.00000000 -0.j -0.00000000 -0.j -0.00000000 -0.j ]
[ -0.00000000 -0.j -0.00000000 -0.j -0.00000000 -0.j ]
[ -0.00000000 -0.j -0.00000000 -0.j -0.00000000 -0.j ]
[-38.01315562-42.48529157j -38.01315562-42.48529157j
-38.01315562-42.48529157j]]
Nonzero terms above should be 0j, (-15.5563491861-18.3847763109j), and (-38.0131556175-42.4852915725j)
[[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 11.+13.j 11.+13.j 11.+13.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 17.+19.j 17.+19.j 17.+19.j]]
[[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[-22.-26.j -22.-26.j -22.-26.j]
[ -0. -0.j -0. -0.j -0. -0.j]
[ -0. -0.j -0. -0.j -0. -0.j]
[ -0. -0.j -0. -0.j -0. -0.j]
[ -0. -0.j -0. -0.j -0. -0.j]
[-85.-95.j -85.-95.j -85.-95.j]]
```
B = A.GHPEdthBar()
C = B.GHPEdthBar()
D = C.GHPEdth()
E = D.GHPEdth()
print A.SpinWeight()
print B.SpinWeight()
print C.SpinWeight()
print D.SpinWeight()
print E.SpinWeight()
```
-1
-2
-3
-2
-1
```
def EdthBarSqEdthSq(l) :
"""
Note that this formula only applies for s=0 functions.
"""
return (l-1.)*(l)*(l+1.)*(l+2.)/4.
A.SetSpinWeight(0)
B = A.GHPEdthBar().GHPEdthBar().GHPEdth().GHPEdth()
print A.Data()[:10,:]
print B.Data()[:10,:]
print("Nonzero terms above should be {0}, {1}, and {2}".format(
A.Data(1,0)*EdthBarSqEdthSq(A.LM()[1,0]),
A.Data(4,0)*EdthBarSqEdthSq(A.LM()[4,0]),
A.Data(9,0)*EdthBarSqEdthSq(A.LM()[9,0])))
```
[[ 2. +3.j 2. +3.j 2. +3.j]
[ 5. +7.j 5. +7.j 5. +7.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 11.+13.j 11.+13.j 11.+13.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 17.+19.j 17.+19.j 17.+19.j]]
[[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 66. +78.j 66. +78.j 66. +78.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 0. +0.j 0. +0.j 0. +0.j]
[ 510.+570.j 510.+570.j 510.+570.j]]
Nonzero terms above should be 0j, (66+78j), and (510+570j)
```
```
|
mobleREPO_NAMEGWFramesPATH_START.@GWFrames_extracted@GWFrames-master@Tests@Miscellaneous.ipynb@.PATH_END.py
|
{
"filename": "mpl_plot_trace_bars.py",
"repo_name": "arviz-devs/arviz",
"repo_path": "arviz_extracted/arviz-main/examples/matplotlib/mpl_plot_trace_bars.py",
"type": "Python"
}
|
"""
Rank Bars Diagnostic with KDE
=============================
_gallery_category: Mixed Plots
"""
import matplotlib.pyplot as plt
import arviz as az
az.style.use("arviz-doc")
data = az.load_arviz_data("non_centered_eight")
az.plot_trace(data, var_names=("tau", "mu"), kind="rank_bars")
plt.show()
|
arviz-devsREPO_NAMEarvizPATH_START.@arviz_extracted@arviz-main@examples@matplotlib@mpl_plot_trace_bars.py@.PATH_END.py
|
{
"filename": "plot_logparabola_norm_spectral.py",
"repo_name": "gammapy/gammapy",
"repo_path": "gammapy_extracted/gammapy-main/examples/models/spectral/plot_logparabola_norm_spectral.py",
"type": "Python"
}
|
r"""
.. _logparabola-spectral-norm-model:
Log parabola spectral norm model
================================
This model parametrises a log parabola spectral correction with a norm parameter.
"""
# %%
# Example plot
# ------------
# Here is an example plot of the model:
from astropy import units as u
import matplotlib.pyplot as plt
from gammapy.modeling.models import (
LogParabolaNormSpectralModel,
Models,
SkyModel,
TemplateSpectralModel,
)
energy_bounds = [0.1, 100] * u.TeV
energy = [0.3, 1, 3, 10, 30] * u.TeV
values = [40, 30, 20, 10, 1] * u.Unit("TeV-1 s-1 cm-2")
template = TemplateSpectralModel(energy, values)
norm = LogParabolaNormSpectralModel(
norm=1.5,
reference=1 * u.TeV,
)
template.plot(energy_bounds=energy_bounds, label="Template model")
lp_norm = template * norm
lp_norm.plot(energy_bounds, label="Template model with LogParabola norm correction")
plt.legend(loc="best")
plt.grid(which="both")
# %%
# YAML representation
# -------------------
# Here is an example YAML file using the model:
model = SkyModel(spectral_model=lp_norm, name="log-parabola-norm-model")
models = Models([model])
print(models.to_yaml())
|
gammapyREPO_NAMEgammapyPATH_START.@gammapy_extracted@gammapy-main@examples@models@spectral@plot_logparabola_norm_spectral.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "natashabatalha/virga",
"repo_path": "virga_extracted/virga-master/setup.py",
"type": "Python"
}
|
#!/usr/bin/env python
# This sample setup.py can be used as a template for any project using d2to1.
# Simply copy this file and, if desired, delete all the comments. Also remove
# the 'namespace_packages' and 'packages' arguments to setup.py if this project
# does not contain any packages beloning to a namespace package.
# This import statement attempts to import the setup() function from setuptools
# (this replaces the setup() one uses from distutils.core when using plain
# distutils).
#
# If the import fails (the user doesn't have the setuptools package) it then
# uses the ez_setup bootstrap script to install setuptools, then retries the
# import. This is common practice for packages using setuptools.
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
# The standard setup() call. Notice, however, that most of the arguments
# normally passed to setup() are absent. They will instead be read from the
# setup.cfg file using d2to1.
#
# In order for this to work it is necessary to specify setup_requires=['d2to1']
# If the user does not have d2to1, this will boostrap it. Also require
# stsci.distutils to use any of the common setup_hooks included in
# stsci.distutils (see the setup.cfg for more details).
#
# The next line, which defines namespace_packages and packages is only
# necessary if this projet contains a package belonging to the stsci namespace
# package, such as stsci.distutils or stsci.tools. This is not necessary for
# projects with their own namespace, such as acstools or pyraf.
#
# d2to1=True is required to enable d2to1 and read the remaning project metadata
# from the setup.cfg file.
#
# use_2to3 and zip_safe are common options support by setuptools; these can
# also be placed in the setup.cfg, as will be demonstrated in a future update
# to this sample package.
setup(
name='virga-exo',
version = '0.4',
description = 'exoplanet code for compute cloud structure',
long_description = 'README.rst',
author = 'Natasha E. Batalha',
author_email = 'natasha.e.batalha@gmail.com',
url = 'https://natashabatalha.github.io/virga',
license = 'GPL-3.0',
download_url = 'https://github.com/natashabatalha/virga',
classifiers = [
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent' ,
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Software Development :: Libraries :: Python Modules'
],
packages=find_packages(),include_package_data=True,
package_data={'': ['reference/*']},
install_requires=[
'numpy',
'pandas',
'bokeh',
'joblib',
'photutils',
'astropy',
'scipy',
'PyMieScatt'
],
zip_safe = False,
)
|
natashabatalhaREPO_NAMEvirgaPATH_START.@virga_extracted@virga-master@setup.py@.PATH_END.py
|
{
"filename": "autofit.py",
"repo_name": "saltastro/pyhrs",
"repo_path": "pyhrs_extracted/pyhrs-master/scripts/autofit.py",
"type": "Python"
}
|
import os
import sys
import numpy as np
import pickle
from ccdproc import CCDData
import specutils
from astropy import units as u
from astropy import modeling as mod
from astropy.io import fits
import pylab as pl
from specreduce import WavelengthSolution
from pyhrs import create_linelists
from pyhrs import collapse_array, match_lines
from pyhrs import HRSOrder, HRSModel
def autofit(arc, order_frame, n_order, ws, target='upper', interp=True, npoints=20, xlimit=1, slimit=0.5, wlimit=0.25):
"""Automatically identify lines in an arc and determine
the fit
"""
sw, sf, slines, sfluxes = create_linelists('thar_list.txt', 'thar.fits')
hrs = HRSOrder(n_order)
hrs.set_order_from_array(order_frame.data)
hrs.set_flux_from_array(arc.data, flux_unit=arc.unit)
if target=='upper':
hrs.set_target(True)
else:
hrs.set_target(False)
data, coef = hrs.create_box(hrs.flux, interp=interp)
#create a summed spectra by cross correlating each row
xarr = np.arange(len(data[0]))
flux, shift_dict = collapse_array(data, i_reference=10)
mask = (sw > ws(xarr).min()-5) * ( sw < ws(xarr).max()+5 )
smask = (slines > ws(xarr).min()-10) * (slines < ws(xarr).max()+10)
mx, mw = match_lines(xarr, flux, slines, sfluxes, ws, rw=3, npoints = npoints,
xlimit=xlimit, slimit=slimit, wlimit=wlimit)
pl.plot(mx, mw, ls='', marker='o')
pl.figure()
fit_ws = mod.fitting.LinearLSQFitter()
print n_order, ws.parameters,
nws = fit_ws(ws, mx, mw)
print nws.parameters
ws = WavelengthSolution.WavelengthSolution(mx, mw, nws)
ws.fit()
pickle.dump([shift_dict, ws], open('tran_%i.pkl' % n_order, 'w'))
#print ws.parameters
#pl.plot(mx, mw-ws(mx), ls='', marker='o')
#pl.figure()
#pl.plot(ws(xarr), flux)
#pl.plot(sw[mask], sf[mask] * flux.max()/sf[mask].max())
#pl.show()
return
if __name__=='__main__':
arc = CCDData.read(sys.argv[1])
order_frame = CCDData.read(sys.argv[2], unit=u.adu)
coef = pickle.load(open('coef.pkl'))
camera_name = arc.header['DETNAM'].lower()
if camera_name=='hrdet':
arm = 'R'
if arc.header['OBSMODE']=='HIGH RESOLUTION':
target = True
elif arc.header['OBSMODE']=='MEDIUM RESOLUTION':
target = True
elif arc.header['OBSMODE']=='LOW RESOLUTION':
target = False
else:
arm = 'H'
if arc.header['OBSMODE']=='HIGH RESOLUTION':
target = True
elif arc.header['OBSMODE']=='MEDIUM RESOLUTION':
target = True
elif arc.header['OBSMODE']=='LOW RESOLUTION':
target = False
n_order=65
for n_order in range(54,83):
#dc_dict, ws = pickle.load(open('sol_%i.pkl' % n_order))
c_list = [coef[0](n_order), coef[1](n_order), coef[2](n_order), coef[3](n_order)]
#ws.model.parameters = c_list
#dw = np.median(ws.wavelength - ws(ws.x))
#ws.model.c0 -= dw
nws = mod.models.Legendre1D(3)
nws.domain = [xarr.min(), xarr.max()]
nws.parameters = c_list
autofit(arc, order_frame, n_order, nws, target='upper', interp=True)
#k = iws.keys()[0]
#pickle.dump([dc_dict, iws[k]], open('sol_%i.pkl' % n_order, 'w'))
|
saltastroREPO_NAMEpyhrsPATH_START.@pyhrs_extracted@pyhrs-master@scripts@autofit.py@.PATH_END.py
|
{
"filename": "defaults.py",
"repo_name": "ismael-mendoza/ShapeMeasurementFisherFormalism",
"repo_path": "ShapeMeasurementFisherFormalism_extracted/ShapeMeasurementFisherFormalism-master/smff/defaults.py",
"type": "Python"
}
|
"""Some of the defaults that are used in the overall program."""
import numpy as np
def get_steps(g_parameters, image_renderer):
"""Return a dictionary containing the steps to be used in the
derivatives of each parameter.
The dictionary is of the form: 'parameter_name:value_of_step'
Some parameter variations were copied from David's code suggestions.
Args:
g_parameters(:class:`analysis.galfun.GParameters`): An object containing different
forms of the galaxy parameters.
Returns:
A dict.
"""
steps = dict()
fit_params = g_parameters.fit_params
for param in fit_params:
if 'flux' in param:
steps[param] = fit_params[param] * .01
elif 'hlr' in param:
steps[param] = fit_params[param] * .05
elif 'beta' in param:
steps[param] = .1
elif 'x0' in param or 'y0' in param:
steps[param] = image_renderer.pixel_scale / 3.
elif 'g1' in param or 'g2' in param:
steps[param] = .03
elif 'e' in param:
steps[param] = .03
else:
steps[param] = .01
return steps
def get_initial_values_fit(g_parameters):
"""Return a dictionary containing the initial values to be used in the
in the fitting of the parameters.
The dictionary is of the form: 'parameter_name:initial_value'
Args:
g_parameters(:class:`analysis.galfun.GParameters`): An object containing different
forms of the galaxy parameters.
Returns:
A dict.
"""
initial_values = dict()
fit_params = g_parameters.fit_params
for param in fit_params:
initial_values[param] = fit_params[param] + abs(np.random.uniform()) * (fit_params[param] / 10 + 0.2)
return initial_values
def get_minimums(g_parameters, gal_image):
"""Return a dictionary containing the minimum values to be used in the
in the fitting of the parameters.
The dictionary is of the form: 'parameter_name:initial_value'
Args:
g_parameters(:class:`analysis.galfun.GParameters`): An object containing different
forms of the galaxy parameters.
Returns:
A dict.
"""
minimums = dict()
for param in g_parameters.fit_params:
if 'flux' in param:
minimums[param] = 0.
elif 'hlr' in param:
minimums[param] = 0.
elif 'x0' in param:
minimums[param] = - gal_image.xmax * PIXEL_SCALE / 2
elif 'y0' in param:
minimums[param] = - gal_image.ymax * PIXEL_SCALE / 2
elif 'eta1' in param:
minimums[param] = g_parameters.params[param] - 2.5
elif 'eta2' in param:
minimums[param] = g_parameters.params[param] - 2.5
elif 'e1' in param or 'g1' in param:
minimums[param] = -.7
elif 'e2' in param or 'g2' in param:
minimums[param] = -.7
return minimums
def get_maximums(g_parameters, gal_image):
"""Return a dictionary containing the minimum values to be used in the
in the fitting of the parameters.
The dictionary is of the form: 'parameter_name:maximum_value'
Args:
g_parameters(:class:`galfun.GParameters`): An object containing different
forms of the galaxy parameters.
Returns:
A dict.
"""
maximums = dict()
for param in g_parameters.fit_params:
if 'flux' in param:
maximums[param] = float('Inf')
elif 'hlr' in param:
maximums[param] = float('Inf')
elif 'x0' in param:
maximums[param] = gal_image.xmax * PIXEL_SCALE / 2
elif 'y0' in param:
maximums[param] = gal_image.ymax * PIXEL_SCALE / 2
elif 'eta1' in param:
maximums[param] = g_parameters.params[param] + 2.5
elif 'eta2' in param:
maximums[param] = g_parameters.params[param] + 2.5
elif 'e1' in param or 'g1' in param:
maximums[param] = .7
elif 'e2' in param or 'g2' in param:
maximums[param] = .7
return maximums
# general global(module-level) constants.
FIT_DEVIATION = .00001
PIXEL_SCALE = .2
SIG_DIGITS = 4
DPI = 300
FONTSIZE_LABEL = 8
FONTSIZE_VALUE = 4
FONTSIZE_TITLE = 14
# some default names for argparse and i/0 file management.
PROJECT = 'project'
PLOTS_DIR = 'plots'
RESULTS_DIR = 'results'
GALAXY_FILE = 'galaxies.csv'
SNR_FILE = 'snr.txt'
MODEL = 'gaussian'
FIGURE_BASENAME = 'figure'
FIGURE_EXTENSION = '.pdf'
|
ismael-mendozaREPO_NAMEShapeMeasurementFisherFormalismPATH_START.@ShapeMeasurementFisherFormalism_extracted@ShapeMeasurementFisherFormalism-master@smff@defaults.py@.PATH_END.py
|
{
"filename": "Tutorial_Plots.ipynb",
"repo_name": "CaymanUnterborn/ExoPlex",
"repo_path": "ExoPlex_extracted/ExoPlex-master/Tutorial/Tutorial_Plots.ipynb",
"type": "Jupyter Notebook"
}
|
```python
#RUN THIS FIRST
import matplotlib.pyplot as plt
import os
import numpy as np
file_loc = os.getcwd()+'/Data'
def get_den(M, R, M_err,R_err):
Rho = [M[i]*5.97e21/((4*np.pi/3)*pow(R[i]*6371e3,3)) for i in range(len(M))]
if len(M_err) > 0:
Rho_err = [(5.97e21/(4*np.pi*pow(R[i]*6371e3,3)))*np.sqrt(pow(M_err[i],2) \
+pow(3*R_err[i]*M[i]/R[i],2)) for i in range(len(M))]
return([Rho, Rho_err])
else:
return(Rho)
T1_masses = [1.374,1.308,0.388,0.692,1.039,1.321,0.326]
T1_radii = [1.116,1.097,0.788,0.92,1.045,1.129,0.755]
T1_mass_EB = [0.069,0.056,0.012,0.022,0.031,0.038,0.02]
T1_rad_EB = [0.013,0.013,0.01,0.013,0.013,0.014,0.014]
T1_den , T1_den_err = get_den(T1_masses,T1_radii,T1_mass_EB,T1_rad_EB) #returns in g/cm^3
```
```python
#Plot density, Pressure, Temperature vs radius
#Make sure to update your filename. Note this is the FULL data file, not the smaller file (no _full in filename)
filename = 'TRAPPIST-1b_baseline_full.csv'
data = open(file_loc +'/'+ filename,'r')
data_lines = data.readlines()[1:]
Radius = [float(data_lines[i].strip('\n').split(',')[1]) for i in range(len(data_lines))]
Density = [float(data_lines[i].strip('\n').split(',')[3]) for i in range(len(data_lines))]
Pressure = [float(data_lines[i].strip('\n').split(',')[4]) for i in range(len(data_lines))]
Temperature = [float(data_lines[i].strip('\n').split(',')[5]) for i in range(len(data_lines))]
Gravity = [float(data_lines[i].strip('\n').split(',')[6]) for i in range(len(data_lines))]
figure = plt.figure(figsize = (12,15))
ax1 = plt.subplot2grid((6, 3), (0, 0), colspan=3, rowspan=3)
ax2 = plt.subplot2grid((6, 3), (3, 0), colspan=3, rowspan=1)
ax3 = plt.subplot2grid((6, 3), (4, 0), colspan=3, rowspan=1)
ax4 = plt.subplot2grid((6, 3), (5, 0), colspan=3, rowspan=1)
ax1.plot(Radius, Density, 'k', linewidth=2.)
ax1.set_ylim(min(Density)-0.2, (max(Density)) + 1.)
ax1.set_xlim(0., max(Radius))
ax1.set_ylabel('Density ($\mathrm{g\ cm^{-3}}$)',size= 15)
ax1.minorticks_on()
# Make a subplot showing the calculated pressure profile
ax2.plot(Radius, Pressure, 'b', linewidth=2.)
ax2.set_ylim(-1, (max(Pressure))+50)
ax2.set_xlim(0., max(Radius))
ax2.set_ylabel("Pressure (GPa)",size= 15)
ax2.minorticks_on()
# Make a subplot showing the calculated temperature profile
ax3.plot(Radius, Temperature, 'g', linewidth=2.)
ax3.set_ylabel("Temperature ($\mathrm{K}$)",size= 15)
ax3.set_xlim(0., max(Radius))
ax3.set_ylim(min(Temperature)-50, max(Temperature) + 500)
ax3.minorticks_on()
ax4.plot(Radius, Gravity, 'r', linewidth=2.)
ax4.set_ylabel("Gravity ($\mathrm{m\ s^{-2}}$)",size= 15)
ax4.set_xlabel("Radius (km)",size= 15)
ax4.set_xlim(0., max(Radius))
ax4.set_ylim(0., max(Gravity) + 0.5)
ax4.minorticks_on()
plt.show()
```
```python
##Compare your individual runs to Trappist-1 System
#Enter your masses and radii here, by hand if you want
#your_Masses = [mass_1,mass_2,mass_3,...]
your_Masses = [1,1,1]
#your_Radii = [rad_1, rad_2,rad_3,...]
your_Radii = [0.8,0.9,1.1]
#otherwise for those who created a file uncomment these lines below
#filename = 'TRAPPIST-1b_baseline.csv'
#data = open(file_loc +'/'+ filename,'r')
#data_lines = data.readlines()[1:]
#your_Masses = [float(data_lines[i].strip('\n').split(',')[0]) for i in range(len(data_lines))]
#your_Radii = [float(data_lines[i].strip('\n').split(',')[1]) for i in range(len(data_lines))]
your_Den = get_den(your_Masses, your_Radii, [],[])
fig, (ax1, ax2) = plt.subplots(1,2, figsize=(20,8))
ax1.errorbar(T1_masses,T1_radii,yerr=T1_rad_EB,xerr=T1_mass_EB,ecolor='r',label='TRAPPIST-1 planets',fmt='s',color='k',capthick=2,elinewidth=2)
ax1.scatter(your_Masses,your_Radii,color='b',label= 'Your Model')
ax1.set_ylabel('Radius (Earth Radii)',size= 20)
ax1.set_xlabel('Mass (Earth Masses)',size= 20)
ax1.legend(loc='upper left',fontsize=22)
ax1.text(1.4,1.12, 'T1-b',size=16)
ax1.text(1.2,1.082, 'T1-c',size=16)
ax1.text(0.275,0.795, 'T1-d',size=16)
ax1.text(0.585,0.925, 'T1-e',size=16)
ax1.text(0.935,1.05 ,'T1-f',size=16)
ax1.text(1.2,1.137, 'T1-g',size=16)
ax1.text(0.3,0.72, 'T1-h',size=16)
ax1.axis(xmin=0.2,xmax=1.5)
ax1.axis(ymin=0.7,ymax=1.2)
ax1.minorticks_on()
for label in (ax1.get_xticklabels() + ax1.get_yticklabels()):
label.set_fontsize(16)
ax2.errorbar(T1_radii,T1_den,yerr=T1_den_err,xerr=T1_rad_EB,ecolor='r',label='TRAPPIST-1 planets',fmt='s',color='k',capthick=2,elinewidth=2)
ax2.scatter(your_Radii,your_Den,color='b',label= 'Your Model')
ax2.set_xlabel('Radius (Earth Radii)', size= 20)
ax2.set_ylabel('Density ($\mathrm{g\ cm^{-3}}$)',size= 20)
ax2.legend(loc='upper left',fontsize=22)
ax2.text(1.12, 5.48 ,'T1-b', size=16)
ax2.text(1.055,5.48, 'T1-c',size=16)
ax2.text(0.795,4.4, 'T1-d',size=16)
ax2.text(.925,4.92, 'T1-e',size=16)
ax2.text(1.05,5.05 ,'T1-f',size=16)
ax2.text(1.135,5.08, 'T1-g',size=16)
ax2.text(0.76,4.2, 'T1-h',size=16)
ax2.axis(xmin=0.7,xmax=1.2)
ax2.axis(ymin = 4.,ymax = 5.6)
ax2.minorticks_on()
for label in (ax2.get_xticklabels() + ax2.get_yticklabels()):
label.set_fontsize(16)
plt.show()
```
```python
##Let's calculate chi-squared.
#Enter your masses and radii here, by hand if you want
#your_compositions = [comp_1,comp_2,comp_3,...]
your_compositions = [1,2,3]
#your_compositions = [comp_1,comp_2,comp_3,...]
your_chi_squared = [2,4,1]
#otherwise for those who created a file uncomment these lines below
#filename = 'FILENAME.txt'
#data = open(file_loc +'/'+ filename,'r')
#data_lines = data.readlines()[1:]
#IF you want to include other parameters, simply add lines naming them (e.g your_CMBP) and insert the
#corresponding column number after the .split('\t')[COLUMN NUMBER] (CMBP is column 4 by default)
#your_compositions = [float(data_lines[i].strip('\n').split('\t')[COLUMN]) for i in range(len(data_lines))]
#your_chi_squared = [float(data_lines[i].strip('\n').split('\t')[COLUMN]) for i in range(len(data_lines))]
plt.plot(your_compositions, your_chi_squared,color='k',label='chi-squared')
plt.hlines(1,0,max(your_compositions),linestyles='dashed',colors='r')
plt.legend(loc='upper left',fontsize=20)
plt.xlabel('FILL IN COMPOSITIONAL VARIABLE',size=20)
plt.ylabel('Chi Squared in Radius',size=20)
plt.minorticks_on()
plt.show()
```
```python
##Compare how uncertanties in mass affect model radius for constant composition. Make sure you have ~50 individual
#model runs. This plot produces a histogram.
#Enter your masses and radii here, by hand if you want
#your_Masses = [mass_1,mass_2,mass_3,...]
your_Masses = [1,2,3]
#your_Radii = [rad_1, rad_2,rad_3,...]
your_Radii = [4,5,6]
#otherwise for those who created a file uncomment these lines below
#filename = 'FILENAME.csv'
#data = open(file_loc +'/'+ filename,'r')
#data_lines = data.readlines()[1:]
#your_Masses = [float(data_lines[i].strip('\n').split(',')[0]) for i in range(len(data_lines))]
#your_Radii = [float(data_lines[i].strip('\n').split(',')[1]) for i in range(len(data_lines))]
plt.hist(your_Radii,bins=20)
plt.ylabel('Number',size=20)
plt.xlabel('Radius (Earth Radii)',size=20)
plt.minorticks_on()
plt.show()
```
|
CaymanUnterbornREPO_NAMEExoPlexPATH_START.@ExoPlex_extracted@ExoPlex-master@Tutorial@Tutorial_Plots.ipynb@.PATH_END.py
|
{
"filename": "docmaker.py",
"repo_name": "Caltech-IPAC/Montage",
"repo_path": "Montage_extracted/Montage-main/lib/src/freetype-2.4.4/src/tools/docmaker/docmaker.py",
"type": "Python"
}
|
#!/usr/bin/env python
#
# DocMaker (c) 2002, 2004, 2008 David Turner <david@freetype.org>
#
# This program is a re-write of the original DocMaker took used
# to generate the API Reference of the FreeType font engine
# by converting in-source comments into structured HTML.
#
# This new version is capable of outputting XML data, as well
# as accepts more liberal formatting options.
#
# It also uses regular expression matching and substitution
# to speed things significantly.
#
from sources import *
from content import *
from utils import *
from formatter import *
from tohtml import *
import utils
import sys, os, time, string, glob, getopt
def usage():
print "\nDocMaker Usage information\n"
print " docmaker [options] file1 [file2 ...]\n"
print "using the following options:\n"
print " -h : print this page"
print " -t : set project title, as in '-t \"My Project\"'"
print " -o : set output directory, as in '-o mydir'"
print " -p : set documentation prefix, as in '-p ft2'"
print ""
print " --title : same as -t, as in '--title=\"My Project\"'"
print " --output : same as -o, as in '--output=mydir'"
print " --prefix : same as -p, as in '--prefix=ft2'"
def main( argv ):
"""main program loop"""
global output_dir
try:
opts, args = getopt.getopt( sys.argv[1:], \
"ht:o:p:", \
["help", "title=", "output=", "prefix="] )
except getopt.GetoptError:
usage()
sys.exit( 2 )
if args == []:
usage()
sys.exit( 1 )
# process options
#
project_title = "Project"
project_prefix = None
output_dir = None
for opt in opts:
if opt[0] in ( "-h", "--help" ):
usage()
sys.exit( 0 )
if opt[0] in ( "-t", "--title" ):
project_title = opt[1]
if opt[0] in ( "-o", "--output" ):
utils.output_dir = opt[1]
if opt[0] in ( "-p", "--prefix" ):
project_prefix = opt[1]
check_output()
# create context and processor
source_processor = SourceProcessor()
content_processor = ContentProcessor()
# retrieve the list of files to process
file_list = make_file_list( args )
for filename in file_list:
source_processor.parse_file( filename )
content_processor.parse_sources( source_processor )
# process sections
content_processor.finish()
formatter = HtmlFormatter( content_processor, project_title, project_prefix )
formatter.toc_dump()
formatter.index_dump()
formatter.section_dump_all()
# if called from the command line
#
if __name__ == '__main__':
main( sys.argv )
# eof
|
Caltech-IPACREPO_NAMEMontagePATH_START.@Montage_extracted@Montage-main@lib@src@freetype-2.4.4@src@tools@docmaker@docmaker.py@.PATH_END.py
|
{
"filename": "getting-started.md",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/doc/python/getting-started.md",
"type": "Markdown"
}
|
---
jupyter:
jupytext:
notebook_metadata_filter: all
text_representation:
extension: .md
format_name: markdown
format_version: '1.3'
jupytext_version: 1.16.1
kernelspec:
display_name: Python 3 (ipykernel)
language: python
name: python3
language_info:
codemirror_mode:
name: ipython
version: 3
file_extension: .py
mimetype: text/x-python
name: python
nbconvert_exporter: python
pygments_lexer: ipython3
version: 3.10.14
plotly:
description: Getting Started with Plotly for Python.
has_thumbnail: false
language: python
layout: base
name: Getting Started with Plotly
page_type: u-guide
permalink: python/getting-started/
redirect_from:
- python/getting_started/
- /python/pytables/
---
### Overview
The [`plotly` Python library](/python/) is an interactive, [open-source](/python/is-plotly-free) plotting library that supports over 40 unique chart types covering a wide range of statistical, financial, geographic, scientific, and 3-dimensional use-cases.
Built on top of the Plotly JavaScript library ([plotly.js](https://plotly.com/javascript/)), `plotly` enables Python users to create beautiful interactive web-based visualizations that can be displayed in Jupyter notebooks, saved to standalone HTML files, or served as part of pure Python-built web applications using Dash. The `plotly` Python library is sometimes referred to as "plotly.py" to differentiate it from the JavaScript library.
Thanks to deep integration with our [Kaleido](https://medium.com/plotly/introducing-kaleido-b03c4b7b1d81) image export utility, `plotly` also provides great support for non-web contexts including desktop editors (e.g. QtConsole, Spyder, PyCharm) and static document publishing (e.g. exporting notebooks to PDF with high-quality vector images).
This Getting Started guide explains how to install `plotly` and related optional pages. Once you've installed, you can use our documentation in three main ways:
1. You jump right in to **examples** of how to make [basic charts](/python/basic-charts/), [statistical charts](/python/statistical-charts/), [scientific charts](/python/scientific-charts/), [financial charts](/python/financial-charts/), [maps](/python/maps/), and [3-dimensional charts](/python/3d-charts/).
2. If you prefer to learn about the **fundamentals** of the library first, you can read about [the structure of figures](/python/figure-structure/), [how to create and update figures](/python/creating-and-updating-figures/), [how to display figures](/python/renderers/), [how to theme figures with templates](/python/templates/), [how to export figures to various formats](/python/static-image-export/) and about [Plotly Express, the high-level API](/python/plotly-express/) for doing all of the above.
3. You can check out our exhaustive **reference** guides: the [Python API reference](/python-api-reference) or the [Figure Reference](/python/reference)
For information on using Python to build web applications containing plotly figures, see the [_Dash User Guide_](https://dash.plotly.com/).
We also encourage you to join the [Plotly Community Forum](http://community.plotly.com/) if you want help with anything related to `plotly`.
### Installation
`plotly` may be installed using `pip`:
```
$ pip install plotly
```
or `conda`:
```
$ conda install -c plotly plotly
```
If you want to use Plotly Express, install its required dependencies with:
```
pip install plotly[express]
```
You'll also need to install a [supported dataframe library](/python/px-arguments#supported-dataFrame-types).
### Plotly charts in Dash
[Dash](https://plotly.com/dash/) is the best way to build analytical apps in Python using Plotly figures. To run the app below, run `pip install dash`, click "Download" to get the code and run `python app.py`.
Get started with [the official Dash docs](https://dash.plotly.com/installation) and **learn how to effortlessly [style](https://plotly.com/dash/design-kit/) & [deploy](https://plotly.com/dash/app-manager/) apps like this with <a class="plotly-red" href="https://plotly.com/dash/">Dash Enterprise</a>.**
```python hide_code=true
from IPython.display import IFrame
snippet_url = 'https://python-docs-dash-snippets.herokuapp.com/python-docs-dash-snippets/'
IFrame(snippet_url + 'getting-started', width='100%', height=1200)
```
<div style="font-size: 0.9em;"><div style="width: calc(100% - 30px); box-shadow: none; border: thin solid rgb(229, 229, 229);"><div style="padding: 5px;"><div><p><strong>Sign up for Dash Club</strong> → Free cheat sheets plus updates from Chris Parmer and Adam Schroeder delivered to your inbox every two months. Includes tips and tricks, community apps, and deep dives into the Dash architecture.
<u><a href="https://go.plotly.com/dash-club?utm_source=Dash+Club+2022&utm_medium=graphing_libraries&utm_content=inline">Join now</a></u>.</p></div></div></div></div>
#### JupyterLab Support
To use `plotly` in [JupyterLab](https://jupyterlab.readthedocs.io/en/stable/), install the `jupyterlab` and `anywidget` packages in the same environment as you installed `plotly`, using `pip`:
```
$ pip install jupyterlab anywidget
```
or `conda`:
```
$ conda install jupyterlab anywidget
```
Launch JupyterLab with:
```
$ jupyter lab
```
and display plotly figures inline:
```python
import plotly.express as px
fig = px.bar(x=["a", "b", "c"], y=[1, 3, 2])
fig.show()
```
or using `FigureWidget` objects.
```python
import plotly.express as px
fig = px.bar(x=["a", "b", "c"], y=[1, 3, 2])
import plotly.graph_objects as go
fig_widget = go.FigureWidget(fig)
fig_widget
```
See [_Displaying Figures in Python_](/python/renderers/) for more information on the renderers framework, and see [_Plotly FigureWidget Overview_](/python/figurewidget/) for more information on using `FigureWidget`.
See the [Troubleshooting guide](/python/troubleshooting/) if you run into any problems with JupyterLab, particularly if you are using multiple Python environments inside Jupyter.
#### Jupyter Notebook Support
For use in the classic [Jupyter Notebook](https://jupyter.org/), install the `notebook` and `ipywidgets`
packages using `pip`:
```
pip install "notebook>=7.0" "anywidget>=0.9.13"
```
or `conda`:
```
conda install "notebook>=7.0" "anywidget>=0.9.13"
```
These packages contain everything you need to run a Jupyter notebook...
```
$ jupyter notebook
```
and display plotly figures inline using the notebook renderer...
```python
import plotly.express as px
fig = px.bar(x=["a", "b", "c"], y=[1, 3, 2])
fig.show()
```
or using `FigureWidget` objects.
```python
import plotly.express as px
fig = px.bar(x=["a", "b", "c"], y=[1, 3, 2])
import plotly.graph_objects as go
fig_widget = go.FigureWidget(fig)
fig_widget
```
See [_Displaying Figures in Python_](/python/renderers/) for more information on the renderers framework, and see [_Plotly FigureWidget Overview_](/python/figurewidget/) for more information on using `FigureWidget`.
### Static Image Export
plotly.py supports [static image export](https://plotly.com/python/static-image-export/),
using the either the [`kaleido`](https://github.com/plotly/Kaleido)
package (recommended, supported as of `plotly` version 4.9) or the [orca](https://github.com/plotly/orca)
command line utility (legacy as of `plotly` version 4.9).
#### Kaleido
The [`kaleido`](https://github.com/plotly/Kaleido) package has no dependencies and can be installed
using pip...
```
$ pip install -U kaleido
```
or conda.
```
$ conda install -c plotly python-kaleido
```
#### Orca
While Kaleido is now the recommended image export approach because it is easier to install
and more widely compatible, [static image export](https://plotly.com/python/static-image-export/)
can also be supported
by the legacy [orca](https://github.com/plotly/orca) command line utility and the
[`psutil`](https://github.com/giampaolo/psutil) Python package.
These dependencies can both be installed using conda:
```
conda install -c plotly plotly-orca==1.3.1 psutil
```
Or, `psutil` can be installed using pip...
```
pip install psutil
```
and orca can be installed according to the instructions in the [orca README](https://github.com/plotly/orca).
#### Extended Geo Support
Some plotly.py features rely on fairly large geographic shape files. The county
choropleth figure factory is one such example. These shape files are distributed as a
separate `plotly-geo` package. This package can be installed using pip...
```
$ pip install plotly-geo==1.0.0
```
or conda.
```
$ conda install -c plotly plotly-geo=1.0.0
```
See [_USA County Choropleth Maps in Python_](/python/county-choropleth/) for more information on the county choropleth figure factory.
### Where to next?
Once you've installed, you can use our documentation in three main ways:
1. You jump right in to **examples** of how to make [basic charts](/python/basic-charts/), [statistical charts](/python/statistical-charts/), [scientific charts](/python/scientific-charts/), [financial charts](/python/financial-charts/), [maps](/python/maps/), and [3-dimensional charts](/python/3d-charts/).
2. If you prefer to learn about the **fundamentals** of the library first, you can read about [the structure of figures](/python/figure-structure/), [how to create and update figures](/python/creating-and-updating-figures/), [how to display figures](/python/renderers/), [how to theme figures with templates](/python/templates/), [how to export figures to various formats](/python/static-image-export/) and about [Plotly Express, the high-level API](/python/plotly-express/) for doing all of the above.
3. You can check out our exhaustive **reference** guides: the [Python API reference](/python-api-reference) or the [Figure Reference](/python/reference)
For information on using Python to build web applications containing plotly figures, see the [_Dash User Guide_](https://dash.plotly.com/).
We also encourage you to join the [Plotly Community Forum](http://community.plotly.com/) if you want help with anything related to `plotly`.
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@doc@python@getting-started.md@.PATH_END.py
|
{
"filename": "funcs_DConeMaps.py",
"repo_name": "simoncasassus/ConeRot",
"repo_path": "ConeRot_extracted/ConeRot-master/funcs_DConeMaps.py",
"type": "Python"
}
|
import sys
import numpy as np
import scipy as sp
import os
import os.path
from scipy import ndimage
from astropy.io import fits as pf
import re
from copy import deepcopy
import time
import matplotlib as mpl
import matplotlib.pyplot as plt
from pylab import *
import matplotlib.colors as colors
include_path = '/home/simon/common/python/include/'
sys.path.append(include_path)
from ImUtils.Resamp import gridding
from ImUtils.Cube2Im import slice0
import ConeRot.TakeAzAv as TakeAzAv
import ConeRot.ConicTransforms_numba as ConicTransforms
# import PyVtools.Vtools as Vtools
if not sys.warnoptions:
import os, warnings
#warnings.simplefilter("default") # Change the filter in this process
warnings.simplefilter("ignore") # Change the filter in this process
#os.environ["PYTHONWARNINGS"] = "default" # Also affect subprocesses
os.environ["PYTHONWARNINGS"] = "ignore" # Also affect subprocesses
def cartesian2conicpolar(outcoords, inputshape, origin, inc=0., tanpsi=0.):
"""Coordinate transform for converting a conic polar array to Cartesian coordinates.
inputshape is a tuple containing the shape of the polar array. origin is a
tuple containing the x and y indices of where the origin should be in the
output array."""
rindex, thetaindex = outcoords
x0, y0 = origin
#theta = float(thetaindex) * 2. * np.pi / float(inputshape[0])
theta = thetaindex * 2. * np.pi / inputshape[0]
y = rindex * np.cos(theta)
height = tanpsi * rindex
#height = -tanpsi * rindex
x = (rindex*np.sin(theta))/np.cos(inc) + \
(height - (rindex*np.sin(theta))*np.tan(inc))*np.sin(inc)
ix = -x + x0
iy = y + y0
return (iy, ix)
def cartesian2polar(outcoords, inputshape, origin, inc=0.):
"""Coordinate transform for converting a polar array to Cartesian coordinates.
inputshape is a tuple containing the shape of the polar array. origin is a
tuple containing the x and y indices of where the origin should be in the
output array."""
rindex, thetaindex = outcoords
x0, y0 = origin
#theta = float(thetaindex) * 2. * np.pi / float(inputshape[0] - 1.)
theta = thetaindex * 2. * np.pi / (inputshape[0] - 1.)
y = rindex * np.cos(theta)
x = rindex * np.sin(theta) * np.cos(inc)
ix = -x + x0
iy = y + y0
return (iy, ix)
def conicpolar2cartesian_ellipse_reflectedazimuths(outcoords,
inputshape,
origin,
inc=0.,
tanpsi=0.):
yindex, xindex = outcoords
x0, y0 = origin
nx = inputshape[0]
ny = inputshape[1]
x = -float(xindex - x0)
y = float(yindex - y0)
tanpsi0 = tanpsi
a = ((np.tan(inc) * tanpsi0)**2 - 1.0)
b = -2. * x * np.sin(inc) * tanpsi0 / (np.cos(inc))**2
c = y**2 + (x**2 / (np.cos(inc)**2))
Delta = b**2 - 4. * a * c
rho = (-b - np.sqrt(Delta)) / (2. * a)
rindex = rho
if (rho == 0.):
costheta = 0.
else:
costheta = y / rho
theta = np.arccos(costheta)
thetaindex = (theta * float(nx) / (2. * np.pi))
return (rindex, thetaindex)
def conicpolar2cartesian_ellipse(outcoords,
inputshape,
origin,
inc=0.,
tanpsi=0.):
yindex, xindex = outcoords
x0, y0 = origin
nx = inputshape[0]
ny = inputshape[1]
x = -float(xindex - x0)
y = float(yindex - y0)
tanpsi0 = tanpsi
a = ((np.tan(inc) * tanpsi0)**2 - 1.0)
b = -2. * x * np.sin(inc) * tanpsi0 / (np.cos(inc))**2
c = y**2 + (x**2 / (np.cos(inc)**2))
Delta = b**2 - 4. * a * c
rho_m = (-b - np.sqrt(Delta)) / (2. * a)
rho_p = (-b + np.sqrt(Delta)) / (2. * a)
if (rho_p > 0.):
print("rho_p > 0", rho_p, rho_m, x, y, inc * 180. / np.pi, tanpsi,
np.arctan(tanpsi) * 180. / np.pi)
rho = rho_m
rindex = rho
if (rho == 0.):
costheta = 0.
else:
costheta = y / rho
H1 = tanpsi0 * rho
num = x - H1 * np.sin(inc)
denom = rho * ((1. / np.cos(inc)) - np.tan(inc) * np.sin(inc))
sintheta = num / denom
theta = np.arccos(costheta)
if sintheta < 0:
theta = 2. * np.pi - theta
thetaindex = (theta * (float(nx) - 1.) / (2. * np.pi))
return (rindex, thetaindex)
def polar2cartesian(outcoords, inputshape, origin, inc=0.):
yindex, xindex = outcoords
x0, y0 = origin
nx = inputshape[0]
ny = inputshape[1]
x = -float(xindex - x0)
y = float(yindex - y0)
rho = np.sqrt((x**2 / (np.cos(inc)**2)) + y**2)
rindex = rho
if (rho == 0.):
costheta = 0.
else:
costheta = y / rho
# theta=np.arccos(costheta)
theta = np.arctan2((-x / np.cos(inc)), y)
if (theta < 0):
theta = theta + 2. * np.pi
thetaindex = (theta * (float(nx) - 1.) / (2. * np.pi))
return (rindex, thetaindex)
def carttoconicpolar_nonumba(im, inc, tanpsi):
(ny, nx) = im.shape
(i0, j0) = (((float(nx) + 1.) / 2.) - 1., ((float(ny) + 1.) / 2.) - 1.)
im_polar = sp.ndimage.geometric_transform(im,
cartesian2conicpolar,
order=1,
output_shape=(im.shape[0],
im.shape[1]),
extra_keywords={
'inputshape': im.shape,
'inc': inc,
'tanpsi': tanpsi,
'origin': (i0, j0)
})
return im_polar
def carttoconicpolar(im, inc, tanpsi):
#(ny, nx) = im.shape
#im = np.float32(im)
#im_polar = np.zeros(im.shape, dtype=float32)
#xoffset_polar = np.zeros(im.shape, dtype=float32)
#yoffset_polar = np.zeros(im.shape, dtype=float32)
im_polar = np.zeros(im.shape)
xoffset_polar = np.zeros(im.shape)
yoffset_polar = np.zeros(im.shape)
ConicTransforms.cart2conicpolar_matrix(im,
im_polar,
xoffset_polar,
yoffset_polar,
inc=inc,
tanpsi=tanpsi)
return im_polar
def carttopolar(im, inc):
(ny, nx) = im.shape
(i0, j0) = (((float(nx) + 1.) / 2.) - 1., ((float(ny) + 1.) / 2.) - 1.)
im_polar = sp.ndimage.geometric_transform(im,
cartesian2polar,
order=1,
output_shape=(im.shape[0],
im.shape[1]),
extra_keywords={
'inputshape': im.shape,
'inc': inc,
'origin': (i0, j0)
})
return im_polar
def conicpolartocart(im_polar, inc, tanpsi):
(ny, nx) = im_polar.shape
(i0, j0) = (((float(nx) + 1.) / 2.) - 1., ((float(ny) + 1.) / 2.) - 1.)
im_cart = sp.ndimage.geometric_transform(im_polar,
conicpolar2cartesian_ellipse,
order=1,
output_shape=(im_polar.shape[0],
im_polar.shape[1]),
extra_keywords={
'inputshape': im_polar.shape,
'inc': inc,
'tanpsi': tanpsi,
'origin': (i0, j0)
})
im_cart = np.nan_to_num(im_cart)
return im_cart
def polartocart(im_polar, inc):
(ny, nx) = im_polar.shape
(i0, j0) = (((float(nx) + 1.) / 2.) - 1., ((float(ny) + 1.) / 2.) - 1.)
im_cart = sp.ndimage.geometric_transform(im_polar,
polar2cartesian,
order=1,
output_shape=(im_polar.shape[0],
im_polar.shape[1]),
extra_keywords={
'inputshape': im_polar.shape,
'inc': inc,
'origin': (i0, j0)
})
im_cart = np.nan_to_num(im_cart)
return im_cart
def exec_prep_files(M):
"""Load the input FITS files prepare the input FITS files
"""
filename_source = M.filename_source
workdir = M.workdir
DumpAllFitsFiles = M.DumpAllFitsFiles
if (not re.search(r"\/$", workdir)):
workdir += '/'
M.workdir = workdir
print("added trailing back slash to outputdir")
inbasename = os.path.basename(filename_source)
filename_fullim = re.sub('.fits', '_fullim.fits', inbasename)
filename_fullim = workdir + filename_fullim
if (M.DoErrorMap):
inbasenameerr = os.path.basename(M.filename_errormap)
filename_fullimerr = re.sub('.fits', '_fullim.fits', inbasenameerr)
filename_fullimerr = workdir + filename_fullimerr
filename_fullimw = re.sub('.fits', '_fullimw.fits', inbasenameerr)
filename_fullimw = workdir + filename_fullimw
if (M.Verbose): #
print("BUILDING WORKDIR AND GENERATING CENTERED IMAGE")
hdu = pf.open(filename_source)
hdr0 = hdu[0].header
if (hdr0['NAXIS'] > 2):
if (M.Verbose): #
print("running cube2im")
hdu = slice0(filename_source, False)
im1 = hdu[0].data
im1 = im1 * M.unitscale
print("applied unit scale factor:", M.unitscale)
hdr1 = hdu[0].header
typicalerror = M.typicalerror
if (M.InjectNoise):
print("INJECTING NOISE")
im1 = im1 + np.random.normal(
loc=0.0, scale=typicalerror, size=im1.shape)
if (DumpAllFitsFiles):
pf.writeto(filename_fullim, im1, hdr1, overwrite=True)
hdu[0].data = im1
hduw = False
if (M.DoErrorMap):
hduerr = pf.open(M.filename_errormap)
hdrerr = hduerr[0].header
if (hdrerr['NAXIS'] > 2):
hduerr = slice0(M.filename_errormap, False)
imerr1 = hduerr[0].data
imerr1 = imerr1 * M.unitscale
#typicalerror=np.median(imerr1)
#imerr1[np.where(imerr1 < (typicalerror/3.))] = typicalerror
#imerr1[np.where(imerr1 > (100.*typicalerror))] = 1E20
hdrerr1 = hduerr[0].header
imw0 = 1. / imerr1**2
imw0 = np.nan_to_num(imw0)
imw0[np.where(np.fabs(imw0) > 1E10)] = 0.
if (M.Verbose):
print("fullim weights: max", np.max(imw0), " min ", np.min(imw0))
imerr = np.sqrt(1. / imw0)
imerr = np.nan_to_num(imerr)
imerr[np.where(np.fabs(imw0) < 1E-30)] = 1E20
typicalerror = np.median(imerr)
if (M.Verbose):
print("resamp errors: max", np.max(imerr), " min ", np.min(imerr))
print("typicalerror= ", typicalerror, " vs", M.typicalerror)
if (DumpAllFitsFiles):
pf.writeto(filename_fullimw, imw0, hdrerr1, overwrite=True)
hduw = pf.PrimaryHDU()
hduw.data = imw0
hduw.header = hdrerr1
M.Hdu = hdu
M.Hduw = hduw
return
def exec_grid_4center(M):
"""prepare the input FITS files: zoom, resample, center, etc...
"""
workdir = M.workdir
RA = M.RA
DEC = M.DEC
x_center = M.x_center
y_center = M.y_center
fieldscale = M.fieldscale # shrink radial field of view of polar maps by this factor
hdu = M.Hdu
hduw = M.Hduw
filename_source = M.filename_source
inbasename = os.path.basename(filename_source)
filename_fullim = re.sub('.fits', '_fullim.fits', inbasename)
filename_fullim = workdir + filename_fullim
fileout_centered = re.sub('fullim.fits', 'centered.fits', filename_fullim)
if (M.DoErrorMap):
inbasenameerr = os.path.basename(M.filename_errormap)
filename_fullimerr = re.sub('.fits', '_fullim.fits', inbasenameerr)
filename_fullimerr = workdir + filename_fullimerr
filename_fullimw = re.sub('.fits', '_fullimw.fits', inbasenameerr)
filename_fullimw = workdir + filename_fullimw
fileout_centerederr = re.sub('fullim.fits', 'centered.fits',
filename_fullimerr)
fileout_centeredw = re.sub('fullim.fits', 'wcentered.fits',
filename_fullimerr)
hdr1 = hdu[0].header
if (not (isinstance(y_center, bool))):
if (not isinstance(RA, float)):
RA = hdr1['CRVAL1']
DEC = hdr1['CRVAL2']
if (M.Verbose):
print("using center of coords CRVAL1 CRVAL2")
#RA=RA+(np.sin(x_center*np.pi/180.)*y_center/3600.)/np.cos(DEC*np.pi/180.)
RA = RA + ((x_center / 3600.) / np.cos(DEC * np.pi / 180.))
#DEC=DEC+np.cos(x_center*np.pi/180.)*y_center/3600.
DEC = DEC + ((y_center / 3600.) * np.pi / 180.)
if (M.Verbose):
print("RA =", RA)
print("DEC =", DEC)
nx = int(hdr1['NAXIS1'] / (M.pixscale_factor * M.fieldscale))
ny = nx
if ((nx % 2) == 0):
nx = nx + 1
ny = ny + 1
hdr2 = deepcopy(hdr1)
hdr2['NAXIS1'] = nx
hdr2['NAXIS2'] = ny
hdr2['CRPIX1'] = (nx + 1) / 2
hdr2['CRPIX2'] = (ny + 1) / 2
hdr2['CRVAL1'] = RA
hdr2['CRVAL2'] = DEC
hdr2['CDELT1'] = M.pixscale_factor * hdr2['CDELT1']
hdr2['CDELT2'] = M.pixscale_factor * hdr2['CDELT2']
resamp = gridding(hdu, hdr2, fullWCS=False)
resamp = np.nan_to_num(resamp)
if (M.DumpAllFitsFiles):
fileout_centered = re.sub('fullim.fits', 'centered.fits',
filename_fullim)
pf.writeto(fileout_centered, resamp, hdr2, overwrite=True)
hducentered = pf.PrimaryHDU()
hducentered.data = resamp
hducentered.header = hdr2
hduwcentered = False
if (M.DoErrorMap):
resampw = gridding(hduw, hdr2, fullWCS=False)
resampw = np.nan_to_num(resampw)
resampw[np.where(resampw < 0.)] = 0.
if (M.Verbose): #
print("resamp weights: max", np.max(resampw), " min ",
np.min(resampw))
resamperr = np.sqrt(1. / resampw)
if (M.DumpAllFitsFiles):
fileout_centeredw = re.sub('fullim.fits', 'wcentered.fits',
filename_fullimerr)
pf.writeto(fileout_centeredw, resampw, hdr2, overwrite=True)
fileout_centerederr = re.sub('fullim.fits', 'centered.fits',
filename_fullimerr)
pf.writeto(fileout_centerederr, resamperr, hdr2, overwrite=True)
hduwcentered = pf.PrimaryHDU()
hduwcentered.data = resampw
hduwcentered.header = hdr2
M.Ncorr = (np.pi / (4. * np.log(2))) * M.bmaj * M.bmin / (hdr2['CDELT2'] *
3600.)**2
M.Hducentered = hducentered
M.Hduwcentered = hduwcentered
def exec_conicpolar_expansions(M):
filename_source = M.filename_source
workdir = M.workdir
inc = M.inc
tanpsi = M.tanpsi
PlotRadialProfile = M.PlotRadialProfile
a_min = M.a_min
a_max = M.a_max
typicalerror = M.typicalerror
DumpAllFitsFiles = M.DumpAllFitsFiles
DoDCone = M.DoDCone
DoAccr = M.DoAccr
DoMerid = M.DoMerid
RestrictAvToRadialDomain = M.RestrictAvToRadialDomain # set to True is faster but may lead to discontinuities in region averages.
DoFarSideOnly = M.DoFarSideOnly
hdu = M.Hducentered
hduw = M.Hduwcentered
#typicalerror=ExpectedError
#cosi=np.cos(inc*np.pi/ 180.)
inbasename = os.path.basename(filename_source)
filename_fullim = re.sub('.fits', '_fullim.fits', inbasename)
filename_fullim = workdir + filename_fullim
if (M.DoErrorMap):
inbasenameerr = os.path.basename(M.filename_errormap)
filename_fullimerr = re.sub('.fits', '_fullim.fits', inbasenameerr)
filename_fullimerr = workdir + filename_fullimerr
filename_fullimw = re.sub('.fits', '_fullimw.fits', inbasenameerr)
filename_fullimw = workdir + filename_fullimw
resamp = hdu.data
hdr2 = hdu.header
nx = hdr2['NAXIS1']
ny = hdr2['NAXIS2']
if M.Verbose:
print("M.Ncorr = ", M.Ncorr)
if (M.DoErrorMap):
resampw = hduw.data
if (M.InheritMumap):
if (M.mumap is None):
mumap = np.ones(resamp.shape)
else:
mumap = M.mumap
else:
mumap = np.ones(resamp.shape)
rotangle = M.PA
im1rot = ndimage.rotate(resamp, rotangle, reshape=False)
hdr3 = deepcopy(hdr2)
im3 = np.double(im1rot)
if (DumpAllFitsFiles):
fileout_rotated = re.sub('fullim.fits', 'rotated.fits',
filename_fullim)
pf.writeto(fileout_rotated, im1rot, hdr2, overwrite=True)
if (M.DoErrorMap):
if (np.any(resampw < 0.)):
print("min / max:", np.min(resampw), np.max(resampw))
sys.exit("negative sky weights!!!!")
im1rotw = ndimage.rotate(resampw, rotangle, reshape=False, order=0)
im3w = np.double(im1rotw)
if (DumpAllFitsFiles):
fileout_rotatedw = re.sub('fullim.fits', 'wrotated.fits',
filename_fullimerr)
pf.writeto(fileout_rotatedw, im1rotw, hdr2, overwrite=True)
if (np.any(im1rotw < 0.)):
print("min / max:", np.min(im1rotw), np.max(im1rotw))
sys.exit("negative rot sky weights!!!!")
# #####################################################################
# take conic polar transforms
if (M.Verbose):
print("CARTESIAN2CONICPOLAR TRANSFORM START")
print("using inc ", inc * np.pi / 180., " tanpsi ", tanpsi)
im_polar = carttoconicpolar(im3, inc, tanpsi)
if (DoDCone):
mumap_polarpos = carttoconicpolar(mumap, inc, tanpsi)
nphis, nrs = im_polar.shape
if ((nphis != nx) or (nrs != ny)):
sys.exit("bug")
hdupolar = pf.PrimaryHDU()
hdupolar.data = im_polar
hdrpolar = hdupolar.header
hdrpolar['CRPIX1'] = 1
hdrpolar['CRVAL1'] = 0.
hdrpolar['CDELT1'] = 2. * np.pi / nphis
hdrpolar['CRPIX2'] = 1
hdrpolar['CRVAL2'] = 0.
hdrpolar['CDELT2'] = (hdr3['CDELT2'])
hdupolar.header = hdrpolar
fileout_polar = re.sub('fullim.fits', 'polar.fits', filename_fullim)
if (DumpAllFitsFiles):
hdupolar.writeto(fileout_polar, overwrite=True)
if (M.DoErrorMap):
im_polarw = carttoconicpolar(im3w, inc, tanpsi)
nphis, nrs = im_polarw.shape
hdupolarw = pf.PrimaryHDU()
hdupolarw.data = im_polarw
hdupolarw.header = hdrpolar
fileout_polarw = re.sub('fullim.fits', 'wpolar.fits',
filename_fullimerr)
if (DumpAllFitsFiles):
hdupolarw.writeto(fileout_polarw, overwrite=True)
else:
# im_polarw = np.ones(im_polar.shape, dtype=float32) / typicalerror**2
im_polarw = np.ones(im_polar.shape) / typicalerror**2
######################################################################
# take azimuthal averages on polar maps
weights = im_polarw.copy()
#im_Npolcorr = np.ones(im_polarw.shape, dtype=float32)
im_Npolcorr = np.ones(im_polarw.shape)
if (np.any(weights < 0.)):
print("min / max:", np.min(weights), np.max(weights))
sys.exit("negative polarweights!!")
im_polar_av = np.copy(im_polar)
im_polar_rrs = np.zeros(im_polar.shape)
im_polar_phis = np.zeros(im_polar.shape)
rrs = 3600. * (np.arange(hdrpolar['NAXIS2']) - hdrpolar['CRPIX2'] +
1.0) * hdrpolar['CDELT2'] + hdrpolar['CRVAL2']
phis = (180. / np.pi) * (
(np.arange(hdrpolar['NAXIS1']) - hdrpolar['CRPIX1'] + 1.0) *
hdrpolar['CDELT1'] + hdrpolar['CRVAL1'])
phis_rad = np.double(
((np.arange(hdrpolar['NAXIS1']) - hdrpolar['CRPIX1'] + 1.0) *
hdrpolar['CDELT1'] + hdrpolar['CRVAL1']))
KepAmps = np.double(np.zeros(len(rrs)))
sKepAmps = np.double(np.zeros(len(rrs)))
AccrAmps = np.double(np.zeros(len(rrs)))
sAccrAmps = np.double(np.zeros(len(rrs)))
MeridAmps = np.double(np.zeros(len(rrs)))
sMeridAmps = np.double(np.zeros(len(rrs)))
vsysts = np.zeros(hdrpolar['NAXIS2'])
if (a_min > 0):
ia_min = np.argmin(np.abs(rrs - a_min))
if (a_max > 0):
ia_max = np.argmin(np.abs(rrs - a_max))
if (M.ComputeSystVelo):
if (M.DoErrorMap):
for irrs in range(len(rrs)):
v0_vec = im_polar[irrs, :]
w_vec = im_polarw[irrs, :]
av_v0 = np.sum(w_vec * v0_vec) / np.sum(w_vec)
av_cosphi = np.sum(w_vec * np.cos(phis_rad)) / np.sum(w_vec)
KepAmp = np.sum(
(v0_vec - av_v0) * w_vec *
np.cos(phis_rad)) / np.sum(w_vec *
(np.cos(phis_rad))**2 - w_vec *
np.cos(phis_rad) * av_cosphi)
vsysts[irrs] = np.sum(
w_vec *
(v0_vec - KepAmp * np.cos(phis_rad))) / np.sum(w_vec)
else:
for irrs in range(len(rrs)):
v0_vec = im_polar[irrs, :]
KepAmp = np.sum(
(v0_vec - np.average(v0_vec)) * np.cos(phis_rad)) / np.sum(
(np.cos(phis_rad))**2 -
np.cos(phis_rad) * np.average(np.cos(phis_rad)))
vsysts[irrs] = np.average(v0_vec - KepAmp * np.cos(phis_rad))
vsyst = np.asscalar(np.median(vsysts[ia_min:ia_max]))
sigma_vsyst = np.asscalar(np.std(vsysts[ia_min:ia_max]))
print("vsyst calculated = ", vsyst, "+-", sigma_vsyst)
M.vsyst = vsyst
M.sigma_vsyst = sigma_vsyst
else:
vsyst = M.vsyst
if (M.Verbose):
print("vsyst from M = ", vsyst)
TakeAzAv.exec_av(M.DoErrorMap,
M.bmaj,
M.InheritMumap,
M.Verbose,
M.PA,
M.inc,
M.tanpsi,
rrs,
phis_rad,
im_polar,
KepAmps,
sKepAmps,
AccrAmps,
sAccrAmps,
MeridAmps,
sMeridAmps,
im_polar_av,
im_polar_rrs,
im_polar_phis,
ia_min,
ia_max,
im_Npolcorr,
vsyst=vsyst,
typicalerror=typicalerror,
weights=weights,
RestrictAvToRadialDomain=RestrictAvToRadialDomain,
DoAccr=DoAccr,
DoMerid=DoMerid,
DoFarSideOnly=DoFarSideOnly,
mumap_polarpos=None)
# SIGNS CALIBRATED ON THE RT TRIALS WIGGLERT
# beware when flipping across the sky as observer sees the cone with psi < 0, with z<0, where a wind would have v_z < 0 in disk coordinates.
# /strelka_ssd/simon/wiggleRT/
sini = np.sin(M.inc)
cosi = np.cos(M.inc)
if (np.fabs(M.inc) > (np.pi / 2.)):
# this fabs to report values for the upper side z>0 of the disk even when retrograde
# cosi=np.fabs(np.cos(M.inc))
# best not to do change the results, and instead take care when reporting in RotOrient
cosi = np.cos(M.inc)
v_Phi_prof = KepAmps / sini
sv_Phi_prof = sKepAmps / np.fabs(sini)
v_Phi_prof = np.nan_to_num(v_Phi_prof)
sv_Phi_prof = np.nan_to_num(sv_Phi_prof)
v_R_prof = AccrAmps / sini
sv_R_prof = sAccrAmps / np.fabs(sini)
v_R_prof = np.nan_to_num(v_R_prof)
sv_R_prof = np.nan_to_num(sv_R_prof)
v_z_prof = -MeridAmps / cosi
sv_z_prof = sMeridAmps / np.fabs(cosi)
v_z_prof = np.nan_to_num(v_z_prof)
sv_z_prof = np.nan_to_num(sv_z_prof)
######################################################################
# now compute chi2 in polar coords
if (DoDCone):
imazim_far = conicpolartocart(im_polar_av, inc, -tanpsi)
im_polar_av_far_near = carttoconicpolar(imazim_far, inc, tanpsi)
for irrs in range(len(rrs)):
v0_vec = im_polar[irrs, :] - vsyst
v0_vec_m = im_polar_av[irrs, :] - vsyst
v0_vec_FN_m = im_polar_av_far_near[irrs, :] - vsyst
if (M.DoErrorMap):
w_vec = weights[irrs, :]
else:
w_vec = 1. / typicalerror**2
Delta_o = v0_vec - v0_vec_FN_m
Delta_m = v0_vec_m - v0_vec_FN_m
cos_vec = np.fabs(np.cos(phis_rad))
A = np.sum(w_vec * cos_vec * Delta_m *
(Delta_m - Delta_o)) / np.sum(
w_vec * cos_vec**2 * Delta_m**2)
if (A < 0.):
A = 0.
# sys.exit("BUG")
if (A > 0.5):
#print( "setting A",A,"to 1")
A = 0.5
mu_vec = (1. - A * cos_vec)
#mumap_polarpos[irrs, :] = np.float32(mu_vec)
mumap_polarpos[irrs, :] = mu_vec
# testing for chi2 improvement:
TestChi2Improv = False
if TestChi2Improv:
subchi2 = np.sum(w_vec * (v0_vec - v0_vec_m)**2)
subchi2_mumap = np.sum(
w_vec * (v0_vec - (v0_vec_m * mu_vec + v0_vec_FN_m *
(1. - mu_vec)))**2)
flag = ''
if (subchi2_mumap > subchi2):
flag = '<<'
print(("irrs %d irrs r %f A %f chi2 %f chi2_mu %f %s " %
(irrs, rrs[irrs], A, subchi2, subchi2_mumap, flag)))
zeimage = weights * (im_polar - im_polar_av)**2 / im_Npolcorr
zeimage = np.nan_to_num(zeimage)
deltaChi2 = np.sum(zeimage, axis=1)
deltaimage = im_polar - im_polar_av
velodev_med = np.sqrt(np.median(deltaimage[ia_min:ia_max]**2))
velodev_std = np.std(deltaimage[ia_min:ia_max, :])
velodev_std_vec = np.std(deltaimage, axis=1)
velodev_std2 = np.std(velodev_std_vec[ia_min:ia_max])
im_polar_av_DCone = im_polar_av * mumap_polarpos + im_polar_av_far_near * (
1. - mumap_polarpos)
#M.DConeDeltaChi2=False
zeimage_DCone = weights * (im_polar - im_polar_av_DCone)**2
deltaChi2_DCone = np.sum(zeimage_DCone, axis=1)
chi2 = sum(deltaChi2[ia_min:ia_max])
#chi2_DCone=sum(deltaChi2_DCone[ia_min:ia_max])
if (((np.fabs(chi2 - M.chi2_prev) / chi2) < 1E-8)
and not M.DConeDeltaChi2):
print("using DConeDeltaChi2 chi2: ", chi2, " chi2_prev",
M.chi2_prev)
M.DConeDeltaChi2 = True
if M.DConeDeltaChi2:
deltaChi2 = deltaChi2_DCone
#zeimage_N=weights*mumap_polarpos*(im_polar-im_polar_av)**2
#zeimage_N=sp.nan_to_num(zeimage_N)
#deltaChi2_N = np.sum(zeimage_N,axis=1)
#zeimage_F=weights*(1.-mumap_polarpos)*(im_polar-im_polar_av_far_near)**2
#zeimage_F=sp.nan_to_num(zeimage_F)
#deltaChi2_F = np.sum(zeimage_F,axis=1)
#deltaChi2= deltaChi2_N + deltaChi2_F
dispv_Phi_prof = np.sqrt(deltaChi2 / np.sum(weights, axis=1))
dispv_Phi_prof = np.nan_to_num(dispv_Phi_prof)
else:
chi2image = weights * (im_polar - im_polar_av)**2 / im_Npolcorr
chi2image = np.nan_to_num(chi2image)
deltaChi2 = np.sum(chi2image, axis=1)
if (np.any(weights < 0.)):
sys.exit("negative weights!!!!")
if (np.any(chi2image < 0.)):
sys.exit("negative chi2!!!!")
deltaimage = im_polar - im_polar_av
velodev_med = np.sqrt(np.median(deltaimage[ia_min:ia_max, :]**2))
velodev_std = np.std(deltaimage[ia_min:ia_max, :])
velodev_std_vec = np.std(deltaimage, axis=1)
velodev_std2 = np.std(velodev_std_vec[ia_min:ia_max])
#varim = deltaimage**2 * weights
#varvec = np.sum(varim, axis=1)
#wvec = np.sum(weights, axis=1)
#mask = (wvec < 1E-10)
#vec_w_var = varvec # / wvec)
#vec_w_var[mask] = 0.
# #vec_median_w = np.median(weights, axis=1)
# #vec_typicalerror = np.sqrt(1. / vec_median_w)
# deltaChi2 = vec_w_var # / vec_typicalerror**2.)
#colapsed_weights=np.sum(weights,axis=1)
#dispv_Phi_prof=colapsed_weights.copy()
#mask=(colapsed_weights > 1E-10)
#dispv_Phi_prof[mask]=np.sqrt(deltaChi2[mask]/colapsed_weights[mask]) # << weighted dispersion of residuals
#dispv_Phi_prof[np.invert(mask)]=np.nan
## dispv_Phi_prof = np.sqrt(deltaChi2/colapsedweights)
#sv_Phi_prof=dispv_Phi_prof.copy()
#cosi=np.cos(M.inc)
#Nind=2.*np.pi*rrs * cosi /(M.bmaj) # number of beams at each radius
#sv_Phi_prof=sv_Phi_prof/np.sqrt(Nind)
#sv_Phi_prof=np.nan_to_num(sv_Phi_prof)
if (M.DoMerid):
M.RadialProfile = [
rrs, v_Phi_prof, sv_Phi_prof, v_R_prof, sv_R_prof, v_z_prof,
sv_z_prof
]
elif (M.DoAccr):
M.RadialProfile = [rrs, v_Phi_prof, sv_Phi_prof, v_R_prof, sv_R_prof]
else:
M.RadialProfile = [rrs, v_Phi_prof, sv_Phi_prof]
if (DumpAllFitsFiles):
if (M.DoMerid):
save_prof = np.zeros((hdrpolar['NAXIS2'], 7))
save_prof[:, 0] = rrs
save_prof[:, 1] = v_Phi_prof
save_prof[:, 2] = sv_Phi_prof
save_prof[:, 3] = v_R_prof
save_prof[:, 4] = sv_R_prof
save_prof[:, 5] = v_z_prof
save_prof[:, 6] = sv_z_prof
elif DoAccr:
save_prof = np.zeros((hdrpolar['NAXIS2'], 5))
save_prof[:, 0] = rrs
save_prof[:, 1] = v_Phi_prof
save_prof[:, 2] = sv_Phi_prof
save_prof[:, 3] = v_R_prof
save_prof[:, 4] = sv_R_prof
else:
save_prof = np.zeros((hdrpolar['NAXIS2'], 3))
save_prof[:, 0] = rrs
save_prof[:, 1] = v_Phi_prof
save_prof[:, 2] = sv_Phi_prof
fileout_radialprofile = re.sub('fullim.fits', 'radial_profile.dat',
filename_fullim)
np.savetxt(fileout_radialprofile,
save_prof) # x,y,z equal sized 1D arrays
fileout_polar_Npolcorr = re.sub('fullim.fits', 'Npolcorr.fits',
filename_fullim)
hdupolar.data = im_Npolcorr
hdupolar.writeto(fileout_polar_Npolcorr, overwrite=True)
fileout_polar_av = re.sub('fullim.fits', 'polar_av.fits',
filename_fullim)
hdupolar.data = im_polar_av
hdupolar.writeto(fileout_polar_av, overwrite=True)
im_polar_diff = im_polar - im_polar_av
fileout_polar_diff = re.sub('fullim.fits', 'polar_diff.fits',
filename_fullim)
hdupolar.data = im_polar_diff
hdupolar.writeto(fileout_polar_diff, overwrite=True)
if (DoDCone):
fileout_polar_av_far_near = re.sub('fullim.fits',
'polar_av_far_near.fits',
filename_fullim)
hdupolar.data = im_polar_av_far_near
hdupolar.writeto(fileout_polar_av_far_near, overwrite=True)
fileout_polar_av_far_near = re.sub('fullim.fits',
'polar_av_DCone.fits',
filename_fullim)
hdupolar.data = im_polar_av_DCone
hdupolar.writeto(fileout_polar_av_far_near, overwrite=True)
######################################################################
# Polar average: back to sky for diff image
if (M.ComputeSkyImages):
if (M.Verbose):
print(
"CONICPOLAR2CARTESIAN TRANSFORM FOR AZIM AV START im_polar_av")
print("using inc ", inc * np.pi / 180., "deg tanpsi ", tanpsi)
#(ny,nx) = im_polar.shape
#x=np.arange(0,nx)
#y=np.arange(0,ny)
#X, Y = np.meshgrid(x, y)
#rrs_polar= 3600.*(Y-hdrpolar['CRPIX2']+1.0)*hdrpolar['CDELT2']+hdrpolar['CRVAL2']
#phis_polar= (X-hdrpolar['CRPIX1']+1.0)*hdrpolar['CDELT1']+hdrpolar['CRVAL1']
#HHs_polar = rrs_polar * tanpsi
#phis_sky_domain_top=conicpolartocart(phis_polar,inc,tanpsi)
#rrs_sky_domain_top=conicpolartocart(rrs_polar,inc,tanpsi)
#HHs_sky_domain_top=conicpolartocart(HHs_polar,inc,tanpsi)
#phis_sky_domain_top_drot = ndimage.rotate(phis_sky_domain_top, -rotangle, reshape=False,order=0)
#rrs_sky_domain_top_drot = ndimage.rotate(rrs_sky_domain_top, -rotangle, reshape=False,order=0)
#HHs_domain_top_drot = ndimage.rotate(HHs_sky_domain_top, -rotangle, reshape=False,order=0)
#M.diskgeometry={
# 'HHs_sky_domain_top':HHs_sky_domain_top_drot,
# 'rrs_sky_domain_top':rrs_sky_domain_top_drot,
# 'phis_sky_domain_top':phis_sky_domain_top_drot}
imazim = conicpolartocart(im_polar_av, inc, tanpsi)
faceoninc = np.pi
# if (tanpsi < 0.):
# faceoninc=np.pi
if (inc > np.pi / 2.):
faceoninc = 0.
imazim_faceon = polartocart(im_polar_av, faceoninc)
resamp_faceon = polartocart(im_polar, faceoninc)
im4 = imazim #gridding(fileout_stretched_av,hdr3)
#if (M.Verbose):
# print("CONICPOLAR2CARTESIAN TRANSFORM FOR AZIM AV DONE in",
# time.time() - start_time)
if (DumpAllFitsFiles):
fileout_azimav = re.sub('fullim.fits', 'azim_av.fits',
filename_fullim)
pf.writeto(fileout_azimav, im4, hdr2, overwrite=True)
# back to sky - rotate
im4drot = ndimage.rotate(im4, -rotangle, reshape=False, order=0)
# diff
diff_im = resamp - im4drot
diff_im_faceon = resamp_faceon - imazim_faceon
im_polar_av_region = im_polar_av.copy()
im_polar_av_region[0:ia_min] = 0.
im_polar_av_region[ia_min:ia_max] = 1.
im_polar_av_region[ia_max:] = 0.
if M.ExtendRegions:
if M.iregion == 0:
print("extending inwards domain of inner region")
im_polar_av_region[0:ia_min] = 1.
if M.iregion == (M.n_abins - 2):
print("extending outwards domain of inner region")
im_polar_av_region[ia_max:] = 1.
imazim_region = conicpolartocart(im_polar_av_region, inc, tanpsi)
imazim_region_drot = ndimage.rotate(imazim_region,
-rotangle,
reshape=False,
order=0)
imazim_region_faceon = polartocart(im_polar_av_region, faceoninc)
imazim_rrs = conicpolartocart(im_polar_rrs, inc, tanpsi)
imazim_rrs_drot = ndimage.rotate(imazim_rrs,
-rotangle,
reshape=False,
order=0)
imazim_rrs_faceon = polartocart(im_polar_rrs, faceoninc)
imazim_phis = conicpolartocart(im_polar_phis, inc, tanpsi)
imazim_phis_drot = ndimage.rotate(imazim_phis,
-rotangle,
reshape=False,
order=0)
imazim_phis_faceon = polartocart(im_polar_phis, faceoninc)
mask = np.where(imazim_region_drot > 0.9)
skychi2 = np.sum(weights[mask] * diff_im[mask]**2) / M.Ncorr
M.skychi2 = skychi2
hdudiff = pf.PrimaryHDU()
hdudiff.data = diff_im
hdudiff.header = hdr2
M.Hdudiff = hdudiff
hdurrs = pf.PrimaryHDU()
hdurrs.data = imazim_rrs_drot
hdurrs.header = hdr2
M.Hdurrs = hdurrs
hdurrs_faceon = pf.PrimaryHDU()
hdurrs_faceon.data = imazim_rrs_faceon
hdurrs_faceon.header = hdr2
M.Hdurrs_faceon = hdurrs_faceon
hduphis_faceon = pf.PrimaryHDU()
hduphis_faceon.data = imazim_phis_faceon
hduphis_faceon.header = hdr2
M.Hduphis_faceon = hduphis_faceon
hduphis = pf.PrimaryHDU()
hduphis.data = imazim_phis_drot
hduphis.header = hdr2
M.Hduphis = hduphis
hdudiff_faceon = pf.PrimaryHDU()
hdudiff_faceon.data = diff_im_faceon
hdudiff_faceon.header = hdr2
M.Hdudiff_faceon = hdudiff_faceon
hduresamp_faceon = pf.PrimaryHDU()
hduresamp_faceon.data = resamp_faceon
hduresamp_faceon.header = hdr2
M.Hduresamp_faceon = hduresamp_faceon
hduregion = pf.PrimaryHDU()
hduregion.data = imazim_region_drot
hduregion.header = hdr2
M.Hduregion = hduregion
hdumoddrot = pf.PrimaryHDU()
hdumoddrot.data = im4drot
hdumoddrot.header = hdr2
M.Hdumoddrot = hdumoddrot
hduregion_faceon = pf.PrimaryHDU()
hduregion_faceon.data = imazim_region_faceon
hduregion_faceon.header = hdr2
M.Hduregion_faceon = hduregion_faceon
if (DumpAllFitsFiles):
fileout_drotated = re.sub('fullim.fits', 'azim_av_drot.fits',
filename_fullim)
pf.writeto(fileout_drotated, im4drot, hdr2, overwrite=True)
fileout_drotated = re.sub('fullim.fits', 'azim_av_drot_diff.fits',
filename_fullim)
pf.writeto(fileout_drotated, diff_im, hdr2, overwrite=True)
fileout_drotated_region = re.sub('fullim.fits', 'region_drot.fits',
filename_fullim)
pf.writeto(fileout_drotated_region,
imazim_region_drot,
hdr2,
overwrite=True)
fileout_diff_im_faceon = re.sub('fullim.fits', 'diff_faceon.fits',
filename_fullim)
pf.writeto(fileout_diff_im_faceon,
diff_im_faceon,
hdr2,
overwrite=True)
fileout_im_faceon = re.sub('fullim.fits', 'resamp_faceon.fits',
filename_fullim)
pf.writeto(fileout_im_faceon, resamp_faceon, hdr2, overwrite=True)
fileout_region_faceon = re.sub('fullim.fits', 'region_faceon.fits',
filename_fullim)
pf.writeto(fileout_region_faceon,
imazim_region_faceon,
hdr2,
overwrite=True)
##fileout_proj=re.sub('fullim.fits', 'azim_av_proj.fits', filename_fullim)
##if (not OptimOrient):
## pf.writeto(fileout_proj,im4, hdr2, overwrite=True)
if (DoDCone):
######################################################################
# average +-psi in sky plane
imazim_far = conicpolartocart(im_polar_av, inc, -tanpsi)
imazim_far_drot = ndimage.rotate(imazim_far,
-rotangle,
reshape=False,
order=0)
mumap0 = conicpolartocart(mumap_polarpos, inc, tanpsi)
mumap = ndimage.rotate(mumap0, -rotangle, reshape=False)
if M.DConeDeltaChi2:
M.mumap = mumap
immod_DCone = mumap * im4drot + (1. - mumap) * imazim_far_drot
diff_im_DCone = resamp - immod_DCone
# immod_DCone_b_rot = conicpolartocart(im_polar_av_DCone,inc,tanpsi)
# immod_DCone_b=ndimage.rotate(immod_DCone_b_rot, -rotangle, reshape=False)
# diff_im_DCone_b = resamp - immod_DCone_b
if DoDCone:
hduDConemoddrot = pf.PrimaryHDU()
hduDConemoddrot.data = immod_DCone
hduDConemoddrot.header = hdr2
M.HduDConemoddrot = hduDConemoddrot
hdudiffDConemoddrot = pf.PrimaryHDU()
hdudiffDConemoddrot.data = diff_im_DCone
hdudiffDConemoddrot.header = hdr2
M.HdudiffDConemoddrot = hdudiffDConemoddrot
hdumumap = pf.PrimaryHDU()
hdumumap.data = mumap
hdumumap.header = hdr2
M.Hdumumap = hdumumap
if (DumpAllFitsFiles):
fileout_mumap = re.sub('fullim.fits', 'mumap.fits',
filename_fullim)
pf.writeto(fileout_mumap, mumap, hdr2, overwrite=True)
fileout_azimav_far = re.sub('fullim.fits', 'azim_av_far.fits',
filename_fullim)
pf.writeto(fileout_azimav_far,
imazim_far,
hdr2,
overwrite=True)
fileout_azimav_far_drot = re.sub('fullim.fits',
'azim_av_far_drot.fits',
filename_fullim)
pf.writeto(fileout_azimav_far_drot,
imazim_far_drot,
hdr2,
overwrite=True)
fileout_immod_DCone = re.sub('fullim.fits', 'immod_DCone.fits',
filename_fullim)
pf.writeto(fileout_immod_DCone,
immod_DCone,
hdr2,
overwrite=True)
# for some reason _b does not work, there seems to be a background
# fileout_immod_DCone_b=re.sub('fullim.fits', 'immod_DCone_b.fits', filename_fullim)
# pf.writeto(fileout_immod_DCone_b,immod_DCone_b, hdr2, overwrite=True)
# for some reason _b does not work, there seems to be a background
# fileout_immod_DCone_a_b=re.sub('fullim.fits', 'immod_DCone_a-b.fits', filename_fullim)
# pf.writeto(fileout_immod_DCone_a_b,immod_DCone-immod_DCone_b, hdr2, overwrite=True)
fileout_immod_DCone_diff = re.sub('fullim.fits',
'diff_DCone.fits',
filename_fullim)
pf.writeto(fileout_immod_DCone_diff,
diff_im_DCone,
hdr2,
overwrite=True)
# for some reason _b does not work, there seems to be a background
# fileout_immod_DCone_diff_b=re.sub('fullim.fits', 'diff_DCone_b.fits', filename_fullim)
# pf.writeto(fileout_immod_DCone_diff_b,diff_im_DCone_b, hdr2, overwrite=True)
######################################################################
# CROSS CHECK INVERSE TRANSFORM
if (M.XCheckInv):
print("CONICPOLAR2CARTESIAN TRANSFORM FOR XCHECK INV START XCheckInv")
start_time = time.time()
im_x = conicpolartocart(im_polar, inc, tanpsi)
print("CONICPOLAR2CARTESIAN TRANSFORM FOR XCHECK INV DONE in",
time.time() - start_time)
fileout_stretched_x = re.sub('fullim.fits', 'stretched_x.fits',
filename_fullim)
pf.writeto(fileout_stretched_x, im_x, hdr2, overwrite=True)
#hdr3 = deepcopy(hdr2)
fileout_proj_x = re.sub('fullim.fits', 'x_proj.fits', filename_fullim)
#im4_x=gridding(fileout_stretched_x,hdr3)
pf.writeto(fileout_proj_x, im_x, hdr2, overwrite=True)
im4_x_drot = ndimage.rotate(im_x, -rotangle, reshape=False)
fileout_drotated_x = re.sub('fullim.fits', 'x_drot.fits',
filename_fullim)
pf.writeto(fileout_drotated_x, im4_x_drot, hdr2, overwrite=True)
fileout_skydiff = re.sub('fullim.fits', 'x_diff.fits', filename_fullim)
pf.writeto(fileout_skydiff, resamp - im4_x_drot, hdr2, overwrite=True)
######################################################################
# profile plotting
if (PlotRadialProfile and DumpAllFitsFiles):
# -----------------------------------------------------------
# nice fonts
# -----------------------------------------------------------
matplotlib.rc('font', family='sans-serif')
matplotlib.rcParams.update({'font.size': 9})
plt.figure(figsize=(10, 8))
axprofile = plt.subplot(111)
rmax = np.max(rrs)
plt.setp(axprofile.get_xticklabels(), visible=True) #, fontsize=6)
plt.setp(axprofile.get_yticklabels(), visible=True) #, fontsize=6)
plt.xlim(0., rmax)
#plt.ylim(np.min(v_Phi_prof),1.1*np.max(v_Phi_prof))
plt.ylim(-0.1 * np.max(v_Phi_prof[ia_min:ia_max]),
1.1 * np.max(v_Phi_prof[ia_min:ia_max]))
plt.plot(rrs,
v_Phi_prof,
color='grey',
linewidth=0.1,
linestyle='solid')
plt.fill_between(rrs,
v_Phi_prof + sv_Phi_prof,
v_Phi_prof - sv_Phi_prof,
lw=0.1,
color='r',
alpha=0.3,
interpolate=True,
step='mid')
if (DoMerid):
plt.plot(rrs,
v_z_prof,
color='orange',
linewidth=1.,
linestyle='solid',
alpha=0.5,
label='v_z')
plt.plot(rrs,
v_R_prof,
color='green',
linewidth=1.,
linestyle='solid',
alpha=0.5,
label='v_R')
elif (DoAccr):
plt.plot(rrs,
v_R_prof,
color='green',
linewidth=1.,
linestyle='solid',
alpha=0.5,
label='v_R')
#print( np.min(v_Phi_prof))
#print( np.max(v_Phi_prof))
plt.plot(rrs,
rrs * 0.,
color='black',
linewidth=0.1,
linestyle='solid')
#plt.plot(rrs,Icutav,color='blue',linewidth=1,linestyle='solid')
#plt.fill_between(rrs, v_Phi_prof+dispv_Phi_prof, v_Phi_prof-dispv_Phi_prof, lw=0.1,color='b', alpha=0.3, interpolate=True)
plt.ylabel(r'$\langle |v_{\circ}(r)| \rangle$')
plt.xlabel(r'$r$ / arcsec')
fileout_fig = re.sub('fullim.fits', 'fig_profile.pdf', filename_fullim)
plt.savefig(fileout_fig, bbox_inches='tight')
######################################################################
chi2 = sum(deltaChi2[ia_min:ia_max])
M.chi2_prev = chi2
M.velodev_med = velodev_med
M.velodev_std = velodev_std
M.velodev_std2 = velodev_std2
#print( "chi2: ",chi2," typical error ",typicalerror," velodev_std ",velodev_std," velodev_std2 ",velodev_std2,"velodev_med",velodev_med)
# print( "chi2: ",chi2," typical error ",typicalerror," velodev_med ",velodev_med)
M.polarchi2 = chi2
retchi2 = chi2 # / M.Ncorr ## very aprox correction for correlated pixels because retchi2 is in the polar domain, so NCorr is variable over the polar map.
return retchi2
|
simoncasassusREPO_NAMEConeRotPATH_START.@ConeRot_extracted@ConeRot-master@funcs_DConeMaps.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/src/amuse/community/mosse/__init__.py",
"type": "Python"
}
|
from .interface import Mosse
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@src@amuse@community@mosse@__init__.py@.PATH_END.py
|
{
"filename": "scipy_solver.py",
"repo_name": "hmuellergoe/mrbeam",
"repo_path": "mrbeam_extracted/mrbeam-main/mr_beam/imagingbase/imagingbase/solvers/scipy_solver.py",
"type": "Python"
}
|
import scipy.optimize as opt
import numpy as np
from regpy.solvers import Solver
from regpy.functionals import Functional
from regpy.operators import Identity
class Minimize():
def __init__(self, fun, x0, args=(), method=None, bounds=None, constraints=(), tol=None, callback=None, options=None):
self.fun = fun
self.x0 = x0
self.args = args
self.method = method
self.bounds = bounds
self.constraints = constraints
self.tol = tol
self.callback = callback
self.options = options
try:
self.jac = self.fun.gradient
except:
print("Functional has no gradient")
self.jac = None
try:
self.hess = self.fun.hessian
except:
print("Functional has no Hessian")
self.hess = None
def run(self):
return opt.minimize(self.fun, self.x0, args=self.args, method=self.method,
jac=self.jac, hess=self.hess, bounds=self.bounds, constraints=self.constraints,
tol=self.tol, callback=self.callback, options=self.options).x
class Forward_Backward_Optimization(Solver):
def __init__(self, setting, data_fidelity, penalty, init, zbl, regpar=1, proximal_pars=None, args=(), method=None,
bounds=None, constraints=(), tol=None, callback=None, options=None, op=None):
super().__init__()
self.setting = setting
self.zbl = zbl
self.regpar = regpar
self.proximal_pars = proximal_pars
self.op = op or Identity(self.penalty.domain)
self.data_fidelity = data_fidelity
self.penalty = penalty
assert isinstance(self.data_fidelity, Functional)
assert isinstance(self.penalty, Functional)
assert self.op.codomain == self.setting.Hdomain.discr
self.x = init
self.y = self.setting.op(self.op(self.x))
self.minimizer = Minimize(data_fidelity, init, args=args, method=method, bounds=bounds,
constraints=constraints, tol=tol, callback=callback, options=options)
def _next(self):
self.x = self.minimizer.run()
self.x = self.penalty.proximal(self.op(self.x), self.regpar, self.proximal_pars)
self.y = self.setting.op(self.x)
correct = self.zbl/np.sum(self.y)
self.y *= correct
self.x *= correct
self.minimizer.x0 = self.op.adjoint(self.x)
|
hmuellergoeREPO_NAMEmrbeamPATH_START.@mrbeam_extracted@mrbeam-main@mr_beam@imagingbase@imagingbase@solvers@scipy_solver.py@.PATH_END.py
|
{
"filename": "plot_meerkat_hz_erc.py",
"repo_name": "philbull/RadioFisher",
"repo_path": "RadioFisher_extracted/RadioFisher-master/plotting/plot_meerkat_hz_erc.py",
"type": "Python"
}
|
#!/usr/bin/python
"""
Plot functions of redshift for RSDs.
"""
import numpy as np
import pylab as P
from rfwrapper import rf
import matplotlib.patches
import matplotlib.cm
import os
cosmo = rf.experiments.cosmo
#-------------------------------------------------------------------------------
# Define colours
#-------------------------------------------------------------------------------
red1 = '#fb9a99' # IM Band 1
red2 = '#e31a1c' # IM Band 2
orange1 = '#fdbf6f' # LOW (lower band)
orange2 = '#FFD025' # LOW (upper band)
green1 = '#b2df8a' # WL/Continuum Band 1
green2 = '#33a02c' # WL/Continuum Band 2
blue1 = '#a6cee3' # HI Galaxies Band 1
blue2 = '#1f78b4' # HI Galaxies Band 2
black1 = '#232323' # External Survey 1
black2 = '#707070' # External Survey 2
black3 = '#A9A9A9' # External Survey 3
names = [
'MeerKATL_4000hr_4000', 'MeerKATUHF_4000hr_4000',
'MID_B2_MK_RedBook_5000', 'MID_B1_MK_RedBook_20000',
'EuclidRef', ]
labels = [
'MeerKAT L band', 'MeerKAT UHF band',
'SKA1-MID Medium-Deep (Band 2)',
'SKA1-MID Wide (Band 1)',
'Sample variance-limited galaxy survey',]
colours = [red1, red2, blue1, blue2, black1, black2]
linestyle = [[], [], [], [], [], []]
marker = ['s', '.', 's', '.', '.', '.']
ms = [6., 15., 6., 15., 15., 15.]
# Fiducial value and plotting
P.subplot(111)
for k in range(len(names)):
root = "output/" + names[k]
print(root)
# Load cosmo fns.
dat = np.atleast_2d( np.genfromtxt(root+"-cosmofns-zc.dat") ).T
zc, Hc, dAc, Dc, fc = dat
z, H, dA, D, f = np.genfromtxt(root+"-cosmofns-smooth.dat").T
kc = np.genfromtxt(root+"-fisher-kc.dat").T
# Load Fisher matrices as fn. of z
Nbins = zc.size
F_list = [np.genfromtxt(root+"-fisher-full-%d.dat" % i) for i in range(Nbins)]
# EOS FISHER MATRIX
# Actually, (aperp, apar) are (D_A, H)
pnames = rf.load_param_names(root+"-fisher-full-0.dat")
zfns = ['bs8', 'fs8', 'H', 'DA',]
excl = ['Tb', 'n_s', 'sigma8', 'omegak', 'omegaDE', 'w0', 'wa', 'h',
'gamma', 'N_eff', 'pk*', 'f', 'b_HI',
'gamma0', 'gamma1', 'eta0', 'eta1', 'A_xi', 'logkmg',
'sigma8tot', 'sigma_8', 'k*', 'A', 'aperp', 'apar']
F, lbls = rf.combined_fisher_matrix( F_list,
expand=zfns, names=pnames,
exclude=excl )
print(lbls)
cov = np.linalg.inv(F)
errs = np.sqrt(np.diag(cov))
# Identify functions of z
pH = rf.indices_for_param_names(lbls, 'H*')
errH = 1e2 * errs[pH] / Hc
# Plot errors as fn. of redshift
P.plot( zc, errH, color=colours[k], label=labels[k], lw=2.2,
marker=marker[k], markersize=ms[k], markeredgecolor=colours[k],
dashes=linestyle[k] )
P.tick_params(axis='both', which='major', labelsize=18, width=1.5, size=8., pad=10)
P.tick_params(axis='both', which='minor', labelsize=18, width=1.5, size=5.)
# Set axis limits
P.xlim((-0.001, 3.15))
P.ylim((1e-3, 0.5))
P.xlabel('$z$', labelpad=7., fontdict={'fontsize':'x-large'})
P.ylabel('$\sigma_H / H$', labelpad=10., fontdict={'fontsize':'x-large'})
# Set tick locations
#P.gca().yaxis.set_major_locator(matplotlib.ticker.MultipleLocator(0.02))
#P.gca().yaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(0.01))
P.gca().xaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(0.5))
P.yscale('log')
P.legend(loc='upper right', frameon=False)
# Set size
P.tight_layout()
P.savefig("MeerKAT-Hz-ERC.png", transparent=False)
P.show()
|
philbullREPO_NAMERadioFisherPATH_START.@RadioFisher_extracted@RadioFisher-master@plotting@plot_meerkat_hz_erc.py@.PATH_END.py
|
{
"filename": "example_mod.py",
"repo_name": "spacetelescope/pystortion",
"repo_path": "pystortion_extracted/pystortion-master/pystortion/example_mod.py",
"type": "Python"
}
|
def primes(imax):
"""
Returns prime numbers up to imax.
Parameters
----------
imax: int
The number of primes to return. This should be less or equal to 10000.
Returns
-------
result: list
The list of prime numbers.
"""
p = list(range(10000))
result = []
k = 0
n = 2
if imax > 10000:
raise ValueError("imax should be <= 10000")
while len(result) < imax:
i = 0
while i < k and n % p[i] != 0:
i = i + 1
if i == k:
p[k] = n
k = k + 1
result.append(n)
if k > 10000:
break
n = n + 1
return result
def do_primes(n, usecython=False):
if usecython:
raise Exception("This template does not have the example C code included.")
else:
print('Using pure python primes')
return primes(n)
def main(args=None):
from astropy.utils.compat import argparse
from time import time
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-c', '--use-cython', dest='cy', action='store_true',
help='Use the Cython-based Prime number generator.')
parser.add_argument('-t', '--timing', dest='time', action='store_true',
help='Time the Fibonacci generator.')
parser.add_argument('-p', '--print', dest='prnt', action='store_true',
help='Print all of the Prime numbers.')
parser.add_argument('n', metavar='N', type=int,
help='Get Prime numbers up to this number.')
res = parser.parse_args(args)
pre = time()
primes = do_primes(res.n, res.cy)
post = time()
print('Found {0} prime numbers'.format(len(primes)))
print('Largest prime: {0}'.format(primes[-1]))
if res.time:
print('Running time: {0} s'.format(post - pre))
if res.prnt:
print('Primes: {0}'.format(primes))
|
spacetelescopeREPO_NAMEpystortionPATH_START.@pystortion_extracted@pystortion-master@pystortion@example_mod.py@.PATH_END.py
|
{
"filename": "Degeneracy.py",
"repo_name": "mmicromegas/ransX",
"repo_path": "ransX_extracted/ransX-master/EQUATIONS/Degeneracy.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
from UTILS.Calculus import Calculus
from UTILS.SetAxisLimit import SetAxisLimit
from UTILS.Tools import Tools
from UTILS.Errors import Errors
import sys
# Theoretical background https://arxiv.org/abs/1401.5176
# Mocak, Meakin, Viallet, Arnett, 2014, Compressible Hydrodynamic Mean-Field #
# Equations in Spherical Geometry and their Application to Turbulent Stellar #
# Convection Data #
class Degeneracy(Calculus, SetAxisLimit, Tools, Errors, object):
def __init__(self, filename, ig, intc, data_prefix):
super(Degeneracy, self).__init__(ig)
# load data to structured array
eht = self.customLoad(filename)
# load grid
xzn0 = self.getRAdata(eht, 'xzn0')
# pick specific Reynolds-averaged mean fields according to:
# https://github.com/mmicromegas/ransX/blob/master/DOCS/ransXimplementationGuide.pdf
psi = self.getRAdata(eht, 'psi')[intc]
# assign global data to be shared across whole class
self.data_prefix = data_prefix
self.xzn0 = xzn0
self.psi = psi
def plot_degeneracy(self, LAXIS, xbl, xbr, ybu, ybd, ilg):
"""Plot degeneracy parameter in the model"""
# check supported geometries
if self.ig != 1 and self.ig != 2:
print("ERROR(Degeneracy.py):" + self.errorGeometry(self.ig))
sys.exit()
# load x GRID
grd1 = self.xzn0
# load DATA to plot
plt1 = self.psi
# create FIGURE
plt.figure(figsize=(7, 6))
# format AXIS, make sure it is exponential
plt.gca().yaxis.get_major_formatter().set_powerlimits((0, 0))
# set plot boundaries
to_plot = [plt1]
self.set_plt_axis(LAXIS, xbl, xbr, ybu, ybd, to_plot)
# plot DATA
plt.title('degeneracy parameter')
plt.plot(grd1, plt1, color='brown', label=r'$\psi$')
# define and show x/y LABELS
if self.ig == 1:
setxlabel = r"x (cm)"
setylabel = r"$\psi$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
elif self.ig == 2:
setxlabel = r"r (cm)"
setylabel = r"$\psi$"
plt.xlabel(setxlabel)
plt.ylabel(setylabel)
# show LEGEND
plt.legend(loc=ilg, prop={'size': 18})
# display PLOT
plt.show(block=False)
# save PLOT
plt.savefig('RESULTS/' + self.data_prefix + 'mean_degeneracy.png')
|
mmicromegasREPO_NAMEransXPATH_START.@ransX_extracted@ransX-master@EQUATIONS@Degeneracy.py@.PATH_END.py
|
{
"filename": "trace.py",
"repo_name": "spacetelescope/calcos",
"repo_path": "calcos_extracted/calcos-master/calcos/trace.py",
"type": "Python"
}
|
#
# This module has all the code for trace profile application
# and calculation of centroids for reference profile and science
# data - the TRCECORR and ALGNCORR steps
#
from __future__ import division, absolute_import
import astropy.io.fits as fits
import numpy as np
import math
from . import airglow
from .calcosparam import *
from . import ccos
from . import cosutil
from . import dispersion
TRACE_OK = 1
CENTROID_OK = 1
CENTROID_UNDEFINED = 2
CENTROID_ERROR_TOO_LARGE = 3
CENTROID_SET_BY_USER = 4
NO_CONVERGENCE = 5
#
# Temporarily use one of the unused bits in the DQ array to mark
# airglow lines
DQ_AIRGLOW = 1024
#
# Minimum number of good columns to make a row 'good'
COLMIN = 100
def doTrace(events, info, reffiles, tracemask):
"""Do the trace correction. The trace reference file follows the
centroid of a point source. Applying the correction involves
subtracting the profile of trace vs. xcorr from yfull"""
traceprofile = getTrace(reffiles['tracetab'], info)
#
# Remove bad values from the trace
cleanTrace(traceprofile)
#
# Apply the trace correction to the yfull values
applyTrace(events.field('xcorr'), events.field('yfull'), traceprofile,
tracemask)
return traceprofile
def getTrace(tracetab, info):
"""Get the trace from the tracetab reference file."""
filter = {"segment": info["segment"],
"opt_elem": info["opt_elem"],
"cenwave": info["cenwave"],
"aperture": info["aperture"]
}
trace_row = cosutil.getTable(tracetab, filter)
if trace_row is None:
raise MissingRowError("Missing row in TRACETAB, filter = %s" % str(filter))
trace = trace_row["TRACE"][0]
return trace
def cleanTrace(trace):
"""Clean the trace. For now, this just means replacing values
of -999.0 with 0.0"""
bad = np.where(trace == -999.0)
trace[bad] = 0.0
return
def applyTrace(xcorr, yfull, trace, tracemask):
"""Apply the trace correction"""
nevents = len(xcorr)
ixcorr = xcorr.astype(np.int32)
#
# remainder and delta are 1-d arrays
remainder = xcorr - ixcorr
delta = (1.0-remainder)*trace[ixcorr] + remainder*trace[ixcorr+1]
yfull[tracemask] = yfull[tracemask] - delta[tracemask]
return
def doProfileAlignment(events, input, info, switches, reffiles, phdr, hdr,
dq_array, tracemask):
#
# First look for a user-defined value for the alignment correction
# in the SP_SET_A/B keyword
try:
segmentletter = info["segment"][-1]
key = "SP_SET_" + segmentletter
user_offset = hdr[key]
except KeyError:
user_offset = None
#
# Now do the profile correction. First we calculate
# the centroid of the profile
cosutil.printMsg("Calculating centroid")
filter = {"segment": info["segment"],
"opt_elem": info["opt_elem"],
"cenwave": info["cenwave"],
"aperture": info["aperture"]}
#
# Use the value of the XTRCTALG keyword to decide what to use to
# define regions:
# BOXCAR: Use XTRACTAB
# TWOZONE: Use TWOZXTAB
if info["xtrctalg"] == "BOXCAR":
xtract_info = cosutil.getTable(reffiles["xtractab"], filter)
if xtract_info is None:
raise MissingRowError("Missing row in XTRACTAB; filter = %s" %
str(filter))
else:
xtract_info = cosutil.getTable(reffiles["twozxtab"], filter)
if xtract_info is None:
raise MissingRowError("Missing row in TWOZXTAB; filter = %s" %
str(filter))
#
# Make sure the table doesn't have a SLOPE column
try:
slope = xtract_info.field("SLOPE")[0]
cosutil.printWarning("TWOZXTAB file has a SLOPE column")
except KeyError:
slope = 0.0
#
# Rebin the data. Output is the flatfielded image, with events binned
# in (XFULL, YFULL) space
rebinned_data = rebinData(events, info)
nrows, ncols = rebinned_data.shape
#
# Mask airglow lines in the DQ image
maskAirglowLines(dq_array, info, reffiles['disptab'], DQ_AIRGLOW)
#
# Apply wavelength limits specified in the PROFTAB reference file
# so we don't centroid over noise
applyWavelengthLimits(dq_array, info, reffiles['proftab'],
reffiles['disptab'])
#
# Determine which columns are good by checking the dq flags in the
# extraction regions of the background and source
# The bit value for SDQFLAGS comes from the science data header
sdqflags = hdr["SDQFLAGS"]
dqexclude = sdqflags | DQ_AIRGLOW
centroid = None
goodcolumns = None
regions = None
try:
slope = xtract_info.field('SLOPE')[0]
except KeyError:
slope = 0.0
startcenter = xtract_info.field('B_SPEC')[0] + slope*(ncols / 2)
#
# Calculate the centroid in the science data
status, centroid, goodcolumns, regions = getScienceCentroid(rebinned_data,
dq_array,
xtract_info,
dqexclude,
centroid)
#
# Calculate the error on the centroid using the counts data which
# will be calculated from the event list
error = getCentroidError(events, info, goodcolumns, regions)
if error is not None:
cosutil.printMsg("Error on centroid = %f" % (error))
else:
cosutil.printWarning("Centroid error not defined in science data")
#
# Get the YERRMAX parameter from the twozxtab
try:
max_err = xtract_info["yerrmax"][0]
except KeyError:
raise MissingColumnError("Missing YERRMAX column in TWOZXTAB, filter = %s" %
str(filter))
if error is not None and error > max_err:
cosutil.printWarning("Error on flux-weighted centroid > %f" % max_err)
status = CENTROID_ERROR_TOO_LARGE
ref_centroid = getReferenceCentroid(reffiles['proftab'],
info,
goodcolumns,
regions)
#
# Calculate the offset
if (user_offset is None) or (user_offset < -990.0):
if status == CENTROID_OK:
offset = ref_centroid - centroid
cosutil.printMsg("Using calculated offset of %f" % (offset))
else:
#
# If something went wrong with the centroid calculation, use
# the center from the xtractab or twozxtab in calculating the
# offset to make the centroid the same as that in the reference
# profile
offset = ref_centroid - startcenter
cosutil.printMsg("Using offset of {:+f} calculated using".format(offset))
cosutil.printMsg("initial reference file aperture center")
else:
offset = -user_offset
cosutil.printMsg("Using user-supplied offset of %f" % (offset))
status = CENTROID_SET_BY_USER
#
# Apply the offset. When this is done, the science target and the
# reference profile will have the same centroid
# Only do the correction to events inside the active area and outside
# the WCA aperture
applyOffset(events.field('yfull'), offset, tracemask)
updateTraceKeywords(hdr, info["segment"], ref_centroid, offset, error)
return status
def rebinData(events, info):
rebinned = np.zeros(info['npix'], dtype=np.float32)
ccos.binevents(events.field('xfull'),
events.field('yfull'),
rebinned,
info['x_offset'],
events.field('dq'),
SERIOUS_DQ_FLAGS,
events.field('epsilon'))
return rebinned
def rebinCounts(events, info):
rebinnedCounts = np.zeros(info['npix'], dtype=np.float32)
ccos.binevents(events.field('xfull'),
events.field('yfull'),
rebinnedCounts,
info['x_offset'],
events.field('dq'),
SERIOUS_DQ_FLAGS)
return rebinnedCounts
def maskAirglowLines(dq_image, info, disptab, airglow_bits):
segment = info["segment"]
for airglow_line in airglow.AIRGLOW_WAVELENGTHS:
limits = airglow.findAirglowLimits(info, segment,disptab,
airglow_line)
if limits is not None:
colstart, colstop = limits
dq_image[:,int(colstart):int(colstop+1)] = \
np.bitwise_or(dq_image[:,int(colstart):int(colstop+1)], airglow_bits)
return
def applyWavelengthLimits(dq_image, info, proftab, disptab):
"""Get the min and max wavelength for our setting. Calculate
column numbers for those wavelengths and set dq_image to SERIOUS_DQ_FLAGS
for columns outside those columns"""
#
# Select the row on OPT_ELEM, CENWAVE, SEGMENT and APERTURE
filter = {'OPT_ELEM': info['opt_elem'],
'CENWAVE': info['cenwave'],
'SEGMENT': info['segment'],
'APERTURE': info['aperture']
}
prof_row = cosutil.getTable(proftab, filter)
if prof_row is None:
raise MissingRowError("Missing row in PROFTAB: filter = %s" %
str(filter))
wmin, wmax = getWavelengthLimits(prof_row)
#
# Now get the dispersion information
disp_rel = dispersion.Dispersion(disptab, filter)
min_column = float(disp_rel.evalInvDisp(wmin, tiny=1.0e-8))
max_column = float(disp_rel.evalInvDisp(wmax, tiny=1.0e-8))
cosutil.printMsg("Lower wavelength limit of %f corresponds to column %d" % \
(wmin, int(min_column)))
cosutil.printMsg("Upper wavelength limit of %f corresponds to column %d" % \
(wmax, int(max_column)))
if min_column >= 0:
dq_image[:, 0:int(min_column+1)] = DQ_AIRGLOW
if max_column <= FUV_X:
dq_image[:, int(max_column):] = DQ_AIRGLOW
return
def getWavelengthLimits(prof_row):
return (900.0, 2100.0)
def getGoodRows(dq_array, dqexclude):
#
# Get the first and last good rows in the science data
nrows, ncols = dq_array.shape
masked = np.bitwise_and(dq_array, dqexclude)
mask = np.where(masked != 0, 1, 0)
rowsum = mask.sum(axis=1)
#
# Need to have at least COLMIN good columns
goodrows = np.where(rowsum < (ncols-COLMIN))
firstgood = goodrows[0][0]
lastgood = goodrows[0][-1]
return firstgood, lastgood
def getRegions(dq_array, xtract_info, dqexclude, center):
regions = {'bg1start': 0,
'bg1stop': 0,
'bg2start': 0,
'bg2stop': 0,
'specstart': 0,
'specstop': 0,
'firstgoodrow' : 0,
'lastgoodrow' : 0,
'center' : 0.0
}
#
# First get the range of rows to use
regions['firstgoodrow'], regions['lastgoodrow'] = getGoodRows(dq_array,
dqexclude)
nrows, ncols = dq_array.shape
# BHEIGHT is the width for the background extraction regions
bgheight = xtract_info["BHEIGHT"][0]
if center is None:
# Background and source extraction regions in the XTRACTAB and TWOZXTAB
# are slanted relative to rows at an angle with tangent = SLOPE.
# We can assume that the trace takes out the overall slope.
# The limits of the extraction are calculated at the first column,
# whereas we need them at the center. Hence we correct by the
# delta_y parameter below.
#
try:
slope = xtract_info["SLOPE"][0]
except KeyError:
slope = 0.0
delta_y = slope*ncols/2
center = xtract_info["B_SPEC"][0] + delta_y
bg1offset = int(round(xtract_info["B_BKG1"][0] - xtract_info["B_SPEC"][0]))
regions['bg1start'] = int(round(center)) + bg1offset - bgheight//2
regions['bg1stop'] = int(round(center)) + bg1offset + bgheight//2
bg2offset = int(round(xtract_info["B_BKG2"][0] - xtract_info["B_SPEC"][0]))
regions['bg2start'] = int(round(center)) + bg2offset - bgheight//2
regions['bg2stop'] = int(round(center)) + bg2offset + bgheight//2
#
# Make sure the background regions doesn't extend into bad rows
regions['bg1start'] = max(regions['bg1start'], regions['firstgoodrow'])
regions['bg2stop'] = min(regions['bg2stop'], regions['lastgoodrow'])
specheight = xtract_info["HEIGHT"][0]
regions['specstart'] = int(round(center)) - specheight//2
regions['specstop'] = int(round(center)) + specheight//2
regions['center'] = center
return regions
def getGoodColumns(dq_array, dqexclude_target, dqexclude_background, regions):
#
# Only use columns where the DQ values are good for both background
# regions and the source
rowstart = regions['bg1start']
rowstop = regions['bg1stop']
dqsum1 = np.bitwise_and(dq_array[int(rowstart):int(rowstop+1)],
dqexclude_background).sum(axis=0)
rowstart = regions['bg2start']
rowstop = regions['bg2stop']
dqsum2 = np.bitwise_and(dq_array[int(rowstart):int(rowstop+1)],
dqexclude_background).sum(axis=0)
rowstart = regions['specstart']
rowstop = regions['specstop']
dqsum3 = np.bitwise_and(dq_array[int(rowstart):int(rowstop+1)],
dqexclude_target).sum(axis=0)
dqsum = dqsum1 + dqsum2 + dqsum3
goodcolumns = np.where(dqsum == 0)
return goodcolumns
def getScienceCentroid(rebinned_data, dq_array, xtract_info,
dqexclude, centroid):
n_iterations = 5
nrows, ncols = rebinned_data.shape
center = None
try:
slope = xtract_info.field('SLOPE')[0]
except KeyError:
slope = 0.0
startcenter = xtract_info.field('B_SPEC')[0] + slope*(ncols / 2)
cosutil.printMsg('Starting center = %f' % startcenter)
for iteration in range(n_iterations):
#
# Determine which columns to use. The rows to be used are centered
# on the value of the variable 'center', which gets updated between
# iterations, so the good columns can change from one iteration to
# the next
regions = getRegions(dq_array, xtract_info, dqexclude, center)
#
# Get the DQ values for target and background regions. Both
# use header value of SDQFLAGS or'd with DQ_AIRGLOW
# with the gain sag hole DQ taken out. Put this in a couple of
# functions so we can change if needed
dqexclude_target = getTargetDQ(dqexclude)
dqexclude_background = getBackgroundDQ(dqexclude)
goodcolumns = getGoodColumns(dq_array, dqexclude_target,
dqexclude_background, regions)
n_goodcolumns = len(goodcolumns[0])
cosutil.printMsg("%d good columns" % (len(goodcolumns[0])))
if n_goodcolumns == 0:
#
# If this happens, it means the region over which we are calculating the
# centroid and associated backgrounds is overlapping part of the detector
# with non-zero DQ (usually the DQ=8/poorly calibrated regions at the top
# and bottom edges of the active area)
# In that case, return with a status of CENTROID_UNDEFINED and set the
# centroid to the center from the reference file and the goodcolumns and
# regions to what are appropriate for that centroid
centroid = startcenter
center = None
regions = getRegions(dq_array, xtract_info, dqexclude, center)
goodcolumns = getGoodColumns(dq_array, dqexclude_target,
dqexclude_background, regions)
status = CENTROID_UNDEFINED
cosutil.printMsg("No good columns.")
cosutil.printMsg("Centroid set to input value from reference file: {}".format(centroid))
return status, centroid, goodcolumns, regions
#
# Now calculate the background. Use the XTRACTAB reference file
# to determine the background regions but center them on the 'center'
# parameter
background = getBackground(rebinned_data, goodcolumns, regions)
cosutil.printMsg("Adopted background in science data = %f" %
(background))
rowstart = regions['specstart']
rowstop = regions['specstop']
centroid = getCentroid(rebinned_data,
goodcolumns,
rowstart,
rowstop,
background)
if centroid is not None:
cosutil.printMsg("Centroid = %f" % (centroid))
difference = (centroid - regions['center'])
if abs(difference) > 20:
cosutil.printMsg("Centroid shift too big")
status = CENTROID_UNDEFINED
break
elif abs(difference) < 0.005:
cosutil.printMsg("Centroid calculation converged")
status = CENTROID_OK
break
else:
cosutil.printWarning("Centroid is not defined in science data")
status = CENTROID_UNDEFINED
center = centroid
if iteration == n_iterations - 1:
#
# If we get here, it means we didn't converge after the maximum
# number of iterations
cosutil.printWarning("Centroid calculation did not converge")
cosutil.printWarning("after %d iterations" % (n_iterations))
status = NO_CONVERGENCE
centroid = difference
return status, centroid, goodcolumns, regions
def getBackgroundDQ(dqexclude):
"""Return the DQ value to be used for the background regions. For now, this
means removing the bit corresponding to DQ_GAIN_SAG_HOLE. To unset a bit,
AND it with NOT"""
return dqexclude&(~DQ_GAIN_SAG_HOLE)
def getTargetDQ(dqexclude):
"""Return the DQ value to be used for the target region. For now, this
means removing the bit corresponding to DQ_GAIN_SAG_HOLE. To unset a bit,
AND it with NOT"""
return dqexclude&(~DQ_GAIN_SAG_HOLE)
def getBackground(data_array, goodcolumns, regions):
bg1start = regions['bg1start']
bg1stop = regions['bg1stop']
bg2start = regions['bg2start']
bg2stop = regions['bg2stop']
cosutil.printMsg("Background regions are from %d to %d" % (bg1start,
bg1stop))
cosutil.printMsg("and from %d to %d" % (bg2start, bg2stop))
bkg1 = data_array[int(bg1start):int(bg1stop+1)].mean(axis=0,
dtype=np.float64)[goodcolumns].mean(dtype=np.float64)
nbkg1 = bg1stop - bg1start + 1
bkg2 = data_array[int(bg2start):int(bg2stop+1)].mean(axis=0,
dtype=np.float64)[goodcolumns].mean(dtype=np.float64)
nbkg2 = bg2stop - bg2start + 1
bkg = (nbkg1*bkg1 + nbkg2*bkg2)/float(nbkg1 + nbkg2)
return bkg
def getCentroid(data_array, goodcolumns, rowstart, rowstop, background=None):
"""Calculate the centroid of data_array."""
rows = np.arange(rowstart, rowstop+1)
if background is None: background = 0.0
i = data_array[int(rowstart):int(rowstop+1)] - background
y = rows[:,np.newaxis]
sumy = (i*y).sum(axis=0, dtype=np.float64)[goodcolumns].sum(dtype=np.float64)
sumi = i.sum(axis=0,dtype=np.float64)[goodcolumns].sum(dtype=np.float64)
if sumi != 0.0:
ycent = sumy/sumi
return ycent
else:
return None
def getCentroidError(events, info, goodcolumns, regions):
#
# Calculate the error on the centroid.
# Use counts data, not flatfielded data.
# Use the same columns as were used to calculate regular centroid
# Calculate background for counts data
# Calculate centroid of counts data
# Calculate error in centroid for counts data
# Even though the centroid and background may be different, the error should
# be appropriate
#
if len(goodcolumns[0]) == 0:
cosutil.printMsg("No good columns, cannot calculate centroid error")
return None
# First need to rebin evens to counts
counts_ij = rebinCounts(events, info)
#
# Now need to calculate the background
background = getBackground(counts_ij, goodcolumns, regions)
#
# Calculate centroid
rowstart = regions['specstart']
rowstop = regions['specstop']
centroid = getCentroid(counts_ij, goodcolumns, rowstart, rowstop,
background)
error = calculateCentroidError(counts_ij, goodcolumns, regions,
centroid, background=background)
return error
def calculateCentroidError(data_ij, goodcolumns, regions,
centroid, background=0.0):
rowstart = regions['specstart']
rowstop = regions['specstop']
rows = np.arange(rowstart, rowstop+1)
if background is None: background = 0.0
i = data_ij[int(rowstart):int(rowstop+1)]
y = rows[:,np.newaxis]
sumi = (i - background).sum(axis=0, dtype=np.float64)[goodcolumns].sum(dtype=np.float64)
sumsq = (i*(y - centroid) * \
(y - centroid)).sum(axis=0,
dtype=np.float64)[goodcolumns].sum(dtype=np.float64)
if sumi != 0.0:
error = sumsq / sumi / sumi
if error >= 0.0:
return math.sqrt(error)
else:
return None
else:
return None
def getReferenceBackground(profile, goodcolumns, refcenter, regions):
nrows, ncols = profile.shape
if refcenter is None:
refcenter = nrows/2
offset = int(round(refcenter - regions['center']))
bg1start = offset + regions['bg1start']
bg1stop = offset + regions['bg1stop']
bg2start = offset + regions['bg2start']
bg2stop = offset + regions['bg2stop']
if bg1start >= 0:
bkg = profile[int(bg1start):int(bg1stop+1)].mean(axis=0,
dtype=np.float64)[goodcolumns].mean(dtype=np.float64)
wbkg = 1.0
elif bg1stop > 0:
cosutil.printMsg("Background region #1 truncated")
cosutil.printMsg("Reguested region: %d to %d" % (bg1start, bg1stop))
wbkg = float(bg1stop+1)/(bg1stop+1-bg1start)
bg1start = 0
cosutil.printMsg("Using %d to %d, with weight %f" % (bg1start,
bg1stop, wbkg))
bkg = profile[int(bg1start):int(bg1stop+1)].mean(axis=0,
dtype=np.float64)[goodcolumns].mean(dtype=np.float64)
else:
cosutil.printMsg("Unable to extract background region #1 from reference profile")
cosutil.printMsg("Requested rows: %d to %d" % (bg1start, bg1stop))
bkg = 0
wbkg = 0.0
if bg2stop <= nrows:
bkg = bkg + profile[int(bg2start):int(bg2stop+1)].mean(axis=0,
dtype=np.float64)[goodcolumns].mean(dtype=np.float64)
wbkg2 = 1.0
elif bg2start <= nrows:
cosutil.printMsg("Background region #2 truncated")
cosutil.printMsg("Reguested region: %d to %d" % (bg2start, bg2stop))
wbkg2 = float(nrows-bg2start+1)/(bg2stop+1-bg2start)
bg2stop = nrows
cosutil.printMsg("Using %d to %d, with weight %f" % (bg2start,
bg2stop, wbkg2))
bkg = bkg + profile[int(bg2start):int(bg2stop+1)].mean(axis=0,
dtype=np.float64)[goodcolumns].mean(dtype=np.float64)
else:
cosutil.printMsg("Unable to extract background region #2 from" \
" reference profile")
cosutil.printMsg("Requested rows: %d to %d" % (bg2start, bg2stop))
wbkg2 = 0.0
wbkg = wbkg + wbkg2
if wbkg > 0.0:
cosutil.printMsg("Adopted background regions for reference profile:")
cosutil.printMsg("Rows %d to %d, and rows %d to %d" % (bg1start,
bg1stop,
bg2start,
bg2stop))
background = bkg/wbkg
else:
cosutil.printMsg("Unable to determine background in reference profile")
background = 0.0
cosutil.printMsg("Background = %f" % background)
return background
def getReferenceCentroid(proftab, info, goodcolumns, regions):
#
# Select the row on OPT_ELEM, CENWAVE, SEGMENT and APERTURE
filter = {'OPT_ELEM': info['opt_elem'],
'CENWAVE': info['cenwave'],
'SEGMENT': info['segment'],
'APERTURE': info['aperture']
}
prof_row = cosutil.getTable(proftab, filter)
if prof_row is None:
raise MissingRowError("Missing row in PROFTAB: filter = %s" %
str(filter))
else:
cosutil.printMsg("Using profile reference file %s" % (proftab))
#
# Get the profile and center
profile = prof_row['PROFILE'][0]
nrows, ncols = profile.shape
row_0 = prof_row['ROW_0'][0]
refcenter = prof_row['CENTER'][0]
n_iterations = 5
for iteration in range(n_iterations):
profcenter = refcenter - row_0
cosutil.printMsg("Input Reference centroid = %f, row_0 = %d" %
(refcenter, row_0))
offset = int(round(profcenter - regions['center']))
rowstart = offset + regions['specstart']
rowstop = offset + regions['specstop']
background = getReferenceBackground(profile, goodcolumns,
profcenter, regions)
centroid = getCentroid(profile, goodcolumns, rowstart, rowstop,
background=background)
ref_centroid = centroid + row_0
cosutil.printMsg("Measured reference centroid = %f" %
(ref_centroid))
difference = (refcenter - ref_centroid)
if abs(difference) < 0.005:
cosutil.printMsg("Reference centroid calculation converged")
break
refcenter = ref_centroid
if iteration == n_iterations -1:
#
# If we get here, it means we didn't converge after the maximum
# number of iterations
cosutil.printWarning("Reference centroid calculation did not converge")
cosutil.printWarning("after %d iterations" % (n_iterations))
return refcenter
def applyOffset(yfull, offset, tracemask):
yfull[tracemask] = yfull[tracemask] + offset
return
def updateTraceKeywords(hdr, segment, ref_centroid, offset, error):
segmentlist = ['A', 'B']
segment_letter = segment[-1]
segmentlist.remove(segment_letter)
othersegment = segmentlist[0]
key = "SP_LOC_" + segment_letter
hdr[key] = ref_centroid
key = "SP_ERR_" + segment_letter
if error is not None:
hdr[key] = error
else:
hdr[key] = -999.0
key = "SP_ERR_" + othersegment
try:
temp = hdr[key]
except KeyError:
hdr[key] = -999.0
key = "SP_OFF_" + segment_letter
hdr[key] = -offset
return
|
spacetelescopeREPO_NAMEcalcosPATH_START.@calcos_extracted@calcos-master@calcos@trace.py@.PATH_END.py
|
{
"filename": "plot_velpdf.py",
"repo_name": "justinread/gravsphere",
"repo_path": "gravsphere_extracted/gravsphere-master/plot_velpdf.py",
"type": "Python"
}
|
###########################################################
#plot_velpdf
###########################################################
#Python programme to plot the velpdf function used by
#binulator to fit each velocity bin.
###########################################################
#Main code:
#Imports & dependencies:
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from functions import *
from constants import *
from figures import *
import sys
#Set up example vel PDF plot. Parameters are:
#[vzmean,alp,bet,backamp,backmean,backsig]
beta_range = np.array([1.0,2.0,5.0])
mycol = ['black','blue','green']
pars_beta_range = np.array([0.0,15.0,0.0,0.0,0.0,50.0])
vz = np.linspace(-50,50,np.int(1e3))
vzerr = np.zeros(len(vz)) + 2.0
###########################################################
#Plots:
#Sequence of velpdf showing its kurtosis range for the
#true PDF convolved with Gaussian velocity errors, and
#a fast analytic approximation. Includes a stacked error
#residual plot.
fig = plt.figure(figsize=(figx,figy))
plt.rcParams['font.size'] = myfontsize
ax = fig.add_subplot(211)
for axis in ['top','bottom','left','right']:
ax.spines[axis].set_linewidth(mylinewidth)
ax.minorticks_on()
ax.tick_params('both', length=10, width=2, which='major')
ax.tick_params('both', length=5, width=1, which='minor')
ax.set_ylabel(r'frequency',fontsize=myfontsize)
for i in range(len(beta_range)):
pars_beta_range[2] = beta_range[i]
pdfnoerr = velpdf_noerr(vz,pars_beta_range)
pdf = velpdf(vz,vzerr,pars_beta_range)
pdffast = velpdffast(vz,vzerr,pars_beta_range)
kurt = kurt_calc(pars_beta_range)
plt.plot(vz,pdffast,linewidth=mylinewidth,\
color=mycol[i],linestyle='dashed')
plt.plot(vz,pdf,linewidth=mylinewidth,color=mycol[i],\
label=r'$\beta_v,\kappa = %.1f,%.1f$' % \
(beta_range[i],kurt))
ax.set_xlim([-50,50])
plt.legend(fontsize=16,loc='upper left')
ax2 = fig.add_subplot(212)
for axis in ['top','bottom','left','right']:
ax2.spines[axis].set_linewidth(mylinewidth)
ax2.minorticks_on()
ax2.tick_params('both', length=10, width=2, which='major')
ax2.tick_params('both', length=5, width=1, which='minor')
ax2.set_xlabel(r'v$_{\rm los}$ (km/s)',fontsize=myfontsize)
ax2.set_ylabel(r'Residual (\%)',fontsize=myfontsize)
for i in range(len(beta_range)):
pars_beta_range[2] = beta_range[i]
pdf = velpdf(vz,vzerr,pars_beta_range)
pdffast = velpdffast(vz,vzerr,pars_beta_range)
residual = (pdf-pdffast) / np.max(pdf) * 100.0
plt.plot(vz,residual,linewidth=mylinewidth,\
color=mycol[i])
ax2.set_xlim([-50,50])
#ax2.set_ylim([-20,20])
plt.savefig(output_base+'velpdf_kurtosis_example.pdf',\
bbox_inches='tight')
|
justinreadREPO_NAMEgravspherePATH_START.@gravsphere_extracted@gravsphere-master@plot_velpdf.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/graph_objs/volume/colorbar/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._tickfont import Tickfont
from ._tickformatstop import Tickformatstop
from ._title import Title
from . import title
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[".title"],
["._tickfont.Tickfont", "._tickformatstop.Tickformatstop", "._title.Title"],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@graph_objs@volume@colorbar@__init__.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatterpolargl/marker/__init__.py",
"type": "Python"
}
|
import sys
from typing import TYPE_CHECKING
if sys.version_info < (3, 7) or TYPE_CHECKING:
from ._symbolsrc import SymbolsrcValidator
from ._symbol import SymbolValidator
from ._sizesrc import SizesrcValidator
from ._sizeref import SizerefValidator
from ._sizemode import SizemodeValidator
from ._sizemin import SizeminValidator
from ._size import SizeValidator
from ._showscale import ShowscaleValidator
from ._reversescale import ReversescaleValidator
from ._opacitysrc import OpacitysrcValidator
from ._opacity import OpacityValidator
from ._line import LineValidator
from ._colorsrc import ColorsrcValidator
from ._colorscale import ColorscaleValidator
from ._colorbar import ColorbarValidator
from ._coloraxis import ColoraxisValidator
from ._color import ColorValidator
from ._cmin import CminValidator
from ._cmid import CmidValidator
from ._cmax import CmaxValidator
from ._cauto import CautoValidator
from ._autocolorscale import AutocolorscaleValidator
from ._anglesrc import AnglesrcValidator
from ._angle import AngleValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
[
"._symbolsrc.SymbolsrcValidator",
"._symbol.SymbolValidator",
"._sizesrc.SizesrcValidator",
"._sizeref.SizerefValidator",
"._sizemode.SizemodeValidator",
"._sizemin.SizeminValidator",
"._size.SizeValidator",
"._showscale.ShowscaleValidator",
"._reversescale.ReversescaleValidator",
"._opacitysrc.OpacitysrcValidator",
"._opacity.OpacityValidator",
"._line.LineValidator",
"._colorsrc.ColorsrcValidator",
"._colorscale.ColorscaleValidator",
"._colorbar.ColorbarValidator",
"._coloraxis.ColoraxisValidator",
"._color.ColorValidator",
"._cmin.CminValidator",
"._cmid.CmidValidator",
"._cmax.CmaxValidator",
"._cauto.CautoValidator",
"._autocolorscale.AutocolorscaleValidator",
"._anglesrc.AnglesrcValidator",
"._angle.AngleValidator",
],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatterpolargl@marker@__init__.py@.PATH_END.py
|
{
"filename": "transfer.py",
"repo_name": "Harry45/DESEMU",
"repo_path": "DESEMU_extracted/DESEMU-main/jax_cosmo/transfer.py",
"type": "Python"
}
|
# This module contains various transfer functions from the literatu
import jax.numpy as np
import jax_cosmo.background as bkgrd
import jax_cosmo.constants as const
__all__ = ["Eisenstein_Hu"]
def Eisenstein_Hu(cosmo, k, type="eisenhu_osc"):
"""Computes the Eisenstein & Hu matter transfer function.
Parameters
----------
cosmo: Background
Background cosmology
k: array_like
Wave number in h Mpc^{-1}
type: str, optional
Type of transfer function. Either 'eisenhu' or 'eisenhu_osc'
(def: 'eisenhu_osc')
Returns
-------
T: array_like
Value of the transfer function at the requested wave number
Notes
-----
The Eisenstein & Hu transfer functions are computed using the fitting
formulae of :cite:`1998:EisensteinHu`
"""
#############################################
# Quantities computed from 1998:EisensteinHu
# Provides : - k_eq : scale of the particle horizon at equality epoch
# - z_eq : redshift of equality epoch
# - R_eq : ratio of the baryon to photon momentum density
# at z_eq
# - z_d : redshift of drag epoch
# - R_d : ratio of the baryon to photon momentum density
# at z_d
# - sh_d : sound horizon at drag epoch
# - k_silk : Silk damping scale
T_2_7_sqr = (const.tcmb / 2.7) ** 2
h2 = cosmo.h**2
w_m = cosmo.Omega_m * h2
w_b = cosmo.Omega_b * h2
fb = cosmo.Omega_b / cosmo.Omega_m
fc = (cosmo.Omega_m - cosmo.Omega_b) / cosmo.Omega_m
k_eq = 7.46e-2 * w_m / T_2_7_sqr / cosmo.h # Eq. (3) [h/Mpc]
z_eq = 2.50e4 * w_m / (T_2_7_sqr) ** 2 # Eq. (2)
# z drag from Eq. (4)
b1 = 0.313 * np.power(w_m, -0.419) * (1.0 + 0.607 * np.power(w_m, 0.674))
b2 = 0.238 * np.power(w_m, 0.223)
z_d = (
1291.0
* np.power(w_m, 0.251)
/ (1.0 + 0.659 * np.power(w_m, 0.828))
* (1.0 + b1 * np.power(w_b, b2))
)
# Ratio of the baryon to photon momentum density at z_d Eq. (5)
R_d = 31.5 * w_b / (T_2_7_sqr) ** 2 * (1.0e3 / z_d)
# Ratio of the baryon to photon momentum density at z_eq Eq. (5)
R_eq = 31.5 * w_b / (T_2_7_sqr) ** 2 * (1.0e3 / z_eq)
# Sound horizon at drag epoch in h^-1 Mpc Eq. (6)
sh_d = (
2.0
/ (3.0 * k_eq)
* np.sqrt(6.0 / R_eq)
* np.log((np.sqrt(1.0 + R_d) + np.sqrt(R_eq + R_d)) / (1.0 + np.sqrt(R_eq)))
)
# Eq. (7) but in [hMpc^{-1}]
k_silk = (
1.6
* np.power(w_b, 0.52)
* np.power(w_m, 0.73)
* (1.0 + np.power(10.4 * w_m, -0.95))
/ cosmo.h
)
#############################################
alpha_gamma = (
1.0
- 0.328 * np.log(431.0 * w_m) * w_b / w_m
+ 0.38 * np.log(22.3 * w_m) * (cosmo.Omega_b / cosmo.Omega_m) ** 2
)
gamma_eff = (
cosmo.Omega_m
* cosmo.h
* (alpha_gamma + (1.0 - alpha_gamma) / (1.0 + (0.43 * k * sh_d) ** 4))
)
if type == "eisenhu":
q = k * np.power(const.tcmb / 2.7, 2) / gamma_eff
# EH98 (29) #
L = np.log(2.0 * np.exp(1.0) + 1.8 * q)
C = 14.2 + 731.0 / (1.0 + 62.5 * q)
res = L / (L + C * q * q)
elif type == "eisenhu_osc":
# Cold dark matter transfer function
# EH98 (11, 12)
a1 = np.power(46.9 * w_m, 0.670) * (1.0 + np.power(32.1 * w_m, -0.532))
a2 = np.power(12.0 * w_m, 0.424) * (1.0 + np.power(45.0 * w_m, -0.582))
alpha_c = np.power(a1, -fb) * np.power(a2, -(fb**3))
b1 = 0.944 / (1.0 + np.power(458.0 * w_m, -0.708))
b2 = np.power(0.395 * w_m, -0.0266)
beta_c = 1.0 + b1 * (np.power(fc, b2) - 1.0)
beta_c = 1.0 / beta_c
# EH98 (19). [k] = h/Mpc
def T_tilde(k1, alpha, beta):
# EH98 (10); [q] = 1 BUT [k] = h/Mpc
q = k1 / (13.41 * k_eq)
L = np.log(np.exp(1.0) + 1.8 * beta * q)
C = 14.2 / alpha + 386.0 / (1.0 + 69.9 * np.power(q, 1.08))
T0 = L / (L + C * q * q)
return T0
# EH98 (17, 18)
f = 1.0 / (1.0 + (k * sh_d / 5.4) ** 4)
Tc = f * T_tilde(k, 1.0, beta_c) + (1.0 - f) * T_tilde(k, alpha_c, beta_c)
# Baryon transfer function
# EH98 (19, 14, 21)
y = (1.0 + z_eq) / (1.0 + z_d)
x = np.sqrt(1.0 + y)
G_EH98 = y * (-6.0 * x + (2.0 + 3.0 * y) * np.log((x + 1.0) / (x - 1.0)))
alpha_b = 2.07 * k_eq * sh_d * np.power(1.0 + R_d, -0.75) * G_EH98
beta_node = 8.41 * np.power(w_m, 0.435)
tilde_s = sh_d / np.power(1.0 + (beta_node / (k * sh_d)) ** 3, 1.0 / 3.0)
beta_b = 0.5 + fb + (3.0 - 2.0 * fb) * np.sqrt((17.2 * w_m) ** 2 + 1.0)
# [tilde_s] = Mpc/h
Tb = (
T_tilde(k, 1.0, 1.0) / (1.0 + (k * sh_d / 5.2) ** 2)
+ alpha_b
/ (1.0 + (beta_b / (k * sh_d)) ** 3)
* np.exp(-np.power(k / k_silk, 1.4))
) * np.sinc(k * tilde_s / np.pi)
# Total transfer function
res = fb * Tb + fc * Tc
else:
raise NotImplementedError
return res
|
Harry45REPO_NAMEDESEMUPATH_START.@DESEMU_extracted@DESEMU-main@jax_cosmo@transfer.py@.PATH_END.py
|
{
"filename": "spd_pgs_make_phi_spec.py",
"repo_name": "spedas/pyspedas",
"repo_path": "pyspedas_extracted/pyspedas-master/pyspedas/particles/spd_part_products/spd_pgs_make_phi_spec.py",
"type": "Python"
}
|
import math
import numpy as np
from scipy.ndimage import shift
# use nansum from bottleneck if it's installed, otherwise use the numpy one
try:
import bottleneck as bn
nansum = bn.nansum
except ImportError:
nansum = np.nansum
def spd_pgs_make_phi_spec(data_in, resolution=None):
"""
Builds phi (longitudinal) spectrogram from the particle data structure
Parameters
----------
data_in: dict
Particle data structure
resolution: int
Number of phi bins in the output
Returns
-------
tuple
Tuple containing: (phi values for y-axis, spectrogram values)
"""
dr = math.pi/180.
data = data_in.copy()
# zero inactive bins to ensure areas with no data are represented as NaN
zero_bins = np.argwhere(data['bins'] == 0)
if zero_bins.size != 0:
for item in zero_bins:
data['data'][item[0], item[1]] = 0.0
# get number of phi values
if resolution is None:
# method taken from the IDL code
idx = np.nanargmin(np.abs((data['theta'][0, :])))
n_phi = len(np.argwhere(data['theta'][0, :] == np.abs((data['theta'][0, :]))[idx]))
else:
n_phi = resolution
# init this sample's piece of the spectrogram
ave = np.zeros(n_phi)
# form grid specifying the spectrogram's phi bins
phi_grid = np.linspace(0, 360.0, n_phi+1)
phi_grid_width = np.nanmedian(phi_grid - shift(phi_grid, 1))
# get min/max of all data bins
# keep phi in [0, 360]
phi_min = (data['phi'] - 0.5*data['dphi'])
phi_max = (data['phi'] + 0.5*data['dphi']) % 360.0
# algorithm below assumes maximums at 360 not wrapped to 0
phi_max[phi_max == 0] = 360.0
# keep phi in [0, 360]
phi_min[phi_min < 0] = phi_min[phi_min < 0] + 360
# keep track of bins that span phi=0
wrapped = phi_min > phi_max
# When averaging data bins will be weighted by the solid angle of their overlap,
# with the given spectrogram bin. Since each spectrogram bin spans all theta
# values the theta portion of that calculation can be done in advance. These
# values will later be multiplied by the overlap along phi to get the total
# solid angle.
omega_part = np.abs(np.sin(dr * (data['theta'] + .5*data['dtheta'])) - np.sin(dr * (data['theta'] - .5*data['dtheta'])))
omega_part_flat = omega_part.flatten(order='F')
phi_max_flat = phi_max.flatten(order='F')
phi_min_flat = phi_min.flatten(order='F')
wrapped_flat = wrapped.flatten(order='F')
data_flat = data['data'].flatten(order='F')
bins_flat = data['bins'].flatten(order='F')
dphi_flat = data['dphi'].flatten(order='F')
# Loop over each phi bin in the spectrogram and determine which data bins
# overlap. All overlapping bins will be weighted according to the solid
# angle of their intersection and averaged.
for i in range(0, n_phi):
weight = np.zeros(phi_min_flat.shape)
# data bins whose maximum overlaps the current spectrogram bin
idx_max = np.argwhere((phi_max_flat > phi_grid[i]) & (phi_max_flat < phi_grid[i+1]))
if idx_max.size != 0:
weight[idx_max] = (phi_max_flat[idx_max] - phi_grid[i]) * omega_part_flat[idx_max]
# data bins whose minimum overlaps the current spectrogram bin
idx_min = np.argwhere((phi_min_flat > phi_grid[i]) & (phi_min_flat < phi_grid[i+1]))
if idx_min.size != 0:
weight[idx_min] = (phi_grid[i+1] - phi_min_flat[idx_min]) * omega_part_flat[idx_min]
# data bins contained within the current spectrogram bin
contained = np.intersect1d(idx_max, idx_min)
if contained.size != 0:
weight[contained] = dphi_flat[contained] * omega_part_flat[contained]
# data bins that completely cover the current spectrogram bin
idx_all = np.argwhere(((phi_min_flat <= phi_grid[i]) & (phi_max_flat >= phi_grid[i+1])) |
(wrapped_flat & ((phi_min_flat > phi_grid[i+1]) & (phi_max_flat > phi_grid[i+1]))) |
(wrapped_flat & ((phi_min_flat < phi_grid[i]) & (phi_max_flat < phi_grid[i]))))
if idx_all.size != 0:
weight[idx_all] = phi_grid_width * omega_part_flat[idx_all]
# combine indices
idx = np.unique(np.concatenate((idx_min, idx_max, idx_all)))
# assign a weighted average to this bin
if idx_max.size + idx_min.size + idx_all.size > 0:
# normalize weighting to selected, active bins
weight[idx] = weight[idx] * bins_flat[idx]
weight = weight/nansum(weight)
# average
ave[i] = nansum(data_flat[idx]*weight[idx])
# get y axis
y = (phi_grid+shift(phi_grid, 1))/2.0
y = y[1:]
return (y, ave)
|
spedasREPO_NAMEpyspedasPATH_START.@pyspedas_extracted@pyspedas-master@pyspedas@particles@spd_part_products@spd_pgs_make_phi_spec.py@.PATH_END.py
|
{
"filename": "main.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/tools/python3/Lib/unittest/main.py",
"type": "Python"
}
|
"""Unittest main program"""
import sys
import argparse
import os
import warnings
from . import loader, runner
from .signals import installHandler
__unittest = True
_NO_TESTS_EXITCODE = 5
MAIN_EXAMPLES = """\
Examples:
%(prog)s test_module - run tests from test_module
%(prog)s module.TestClass - run tests from module.TestClass
%(prog)s module.Class.test_method - run specified test method
%(prog)s path/to/test_file.py - run tests from test_file.py
"""
MODULE_EXAMPLES = """\
Examples:
%(prog)s - run default set of tests
%(prog)s MyTestSuite - run suite 'MyTestSuite'
%(prog)s MyTestCase.testSomething - run MyTestCase.testSomething
%(prog)s MyTestCase - run all 'test*' test methods
in MyTestCase
"""
def _convert_name(name):
# on Linux / Mac OS X 'foo.PY' is not importable, but on
# Windows it is. Simpler to do a case insensitive match
# a better check would be to check that the name is a
# valid Python module name.
if os.path.isfile(name) and name.lower().endswith('.py'):
if os.path.isabs(name):
rel_path = os.path.relpath(name, os.getcwd())
if os.path.isabs(rel_path) or rel_path.startswith(os.pardir):
return name
name = rel_path
# on Windows both '\' and '/' are used as path
# separators. Better to replace both than rely on os.path.sep
return os.path.normpath(name)[:-3].replace('\\', '.').replace('/', '.')
return name
def _convert_names(names):
return [_convert_name(name) for name in names]
def _convert_select_pattern(pattern):
if not '*' in pattern:
pattern = '*%s*' % pattern
return pattern
class TestProgram(object):
"""A command-line program that runs a set of tests; this is primarily
for making test modules conveniently executable.
"""
# defaults for testing
module=None
verbosity = 1
failfast = catchbreak = buffer = progName = warnings = testNamePatterns = None
_discovery_parser = None
def __init__(self, module='__main__', defaultTest=None, argv=None,
testRunner=None, testLoader=loader.defaultTestLoader,
exit=True, verbosity=1, failfast=None, catchbreak=None,
buffer=None, warnings=None, *, tb_locals=False,
durations=None):
if isinstance(module, str):
self.module = __import__(module)
for part in module.split('.')[1:]:
self.module = getattr(self.module, part)
else:
self.module = module
if argv is None:
argv = sys.argv
self.exit = exit
self.failfast = failfast
self.catchbreak = catchbreak
self.verbosity = verbosity
self.buffer = buffer
self.tb_locals = tb_locals
self.durations = durations
if warnings is None and not sys.warnoptions:
# even if DeprecationWarnings are ignored by default
# print them anyway unless other warnings settings are
# specified by the warnings arg or the -W python flag
self.warnings = 'default'
else:
# here self.warnings is set either to the value passed
# to the warnings args or to None.
# If the user didn't pass a value self.warnings will
# be None. This means that the behavior is unchanged
# and depends on the values passed to -W.
self.warnings = warnings
self.defaultTest = defaultTest
self.testRunner = testRunner
self.testLoader = testLoader
self.progName = os.path.basename(argv[0])
self.parseArgs(argv)
self.runTests()
def usageExit(self, msg=None):
warnings.warn("TestProgram.usageExit() is deprecated and will be"
" removed in Python 3.13", DeprecationWarning)
if msg:
print(msg)
if self._discovery_parser is None:
self._initArgParsers()
self._print_help()
sys.exit(2)
def _print_help(self, *args, **kwargs):
if self.module is None:
print(self._main_parser.format_help())
print(MAIN_EXAMPLES % {'prog': self.progName})
self._discovery_parser.print_help()
else:
print(self._main_parser.format_help())
print(MODULE_EXAMPLES % {'prog': self.progName})
def parseArgs(self, argv):
self._initArgParsers()
if self.module is None:
if len(argv) > 1 and argv[1].lower() == 'discover':
self._do_discovery(argv[2:])
return
self._main_parser.parse_args(argv[1:], self)
if not self.tests:
# this allows "python -m unittest -v" to still work for
# test discovery.
self._do_discovery([])
return
else:
self._main_parser.parse_args(argv[1:], self)
if self.tests:
self.testNames = _convert_names(self.tests)
if __name__ == '__main__':
# to support python -m unittest ...
self.module = None
elif self.defaultTest is None:
# createTests will load tests from self.module
self.testNames = None
elif isinstance(self.defaultTest, str):
self.testNames = (self.defaultTest,)
else:
self.testNames = list(self.defaultTest)
self.createTests()
def createTests(self, from_discovery=False, Loader=None):
if self.testNamePatterns:
self.testLoader.testNamePatterns = self.testNamePatterns
if from_discovery:
loader = self.testLoader if Loader is None else Loader()
self.test = loader.discover(self.start, self.pattern, self.top)
elif self.testNames is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames,
self.module)
def _initArgParsers(self):
parent_parser = self._getParentArgParser()
self._main_parser = self._getMainArgParser(parent_parser)
self._discovery_parser = self._getDiscoveryArgParser(parent_parser)
def _getParentArgParser(self):
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-v', '--verbose', dest='verbosity',
action='store_const', const=2,
help='Verbose output')
parser.add_argument('-q', '--quiet', dest='verbosity',
action='store_const', const=0,
help='Quiet output')
parser.add_argument('--locals', dest='tb_locals',
action='store_true',
help='Show local variables in tracebacks')
parser.add_argument('--durations', dest='durations', type=int,
default=None, metavar="N",
help='Show the N slowest test cases (N=0 for all)')
if self.failfast is None:
parser.add_argument('-f', '--failfast', dest='failfast',
action='store_true',
help='Stop on first fail or error')
self.failfast = False
if self.catchbreak is None:
parser.add_argument('-c', '--catch', dest='catchbreak',
action='store_true',
help='Catch Ctrl-C and display results so far')
self.catchbreak = False
if self.buffer is None:
parser.add_argument('-b', '--buffer', dest='buffer',
action='store_true',
help='Buffer stdout and stderr during tests')
self.buffer = False
if self.testNamePatterns is None:
parser.add_argument('-k', dest='testNamePatterns',
action='append', type=_convert_select_pattern,
help='Only run tests which match the given substring')
self.testNamePatterns = []
return parser
def _getMainArgParser(self, parent):
parser = argparse.ArgumentParser(parents=[parent])
parser.prog = self.progName
parser.print_help = self._print_help
parser.add_argument('tests', nargs='*',
help='a list of any number of test modules, '
'classes and test methods.')
return parser
def _getDiscoveryArgParser(self, parent):
parser = argparse.ArgumentParser(parents=[parent])
parser.prog = '%s discover' % self.progName
parser.epilog = ('For test discovery all test modules must be '
'importable from the top level directory of the '
'project.')
parser.add_argument('-s', '--start-directory', dest='start',
help="Directory to start discovery ('.' default)")
parser.add_argument('-p', '--pattern', dest='pattern',
help="Pattern to match tests ('test*.py' default)")
parser.add_argument('-t', '--top-level-directory', dest='top',
help='Top level directory of project (defaults to '
'start directory)')
for arg in ('start', 'pattern', 'top'):
parser.add_argument(arg, nargs='?',
default=argparse.SUPPRESS,
help=argparse.SUPPRESS)
return parser
def _do_discovery(self, argv, Loader=None):
self.start = '.'
self.pattern = 'test*.py'
self.top = None
if argv is not None:
# handle command line args for test discovery
if self._discovery_parser is None:
# for testing
self._initArgParsers()
self._discovery_parser.parse_args(argv, self)
self.createTests(from_discovery=True, Loader=Loader)
def runTests(self):
if self.catchbreak:
installHandler()
if self.testRunner is None:
self.testRunner = runner.TextTestRunner
if isinstance(self.testRunner, type):
try:
try:
testRunner = self.testRunner(verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer,
warnings=self.warnings,
tb_locals=self.tb_locals,
durations=self.durations)
except TypeError:
# didn't accept the tb_locals or durations argument
testRunner = self.testRunner(verbosity=self.verbosity,
failfast=self.failfast,
buffer=self.buffer,
warnings=self.warnings)
except TypeError:
# didn't accept the verbosity, buffer or failfast arguments
testRunner = self.testRunner()
else:
# it is assumed to be a TestRunner instance
testRunner = self.testRunner
self.result = testRunner.run(self.test)
if self.exit:
if self.result.testsRun == 0 and len(self.result.skipped) == 0:
sys.exit(_NO_TESTS_EXITCODE)
elif self.result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
main = TestProgram
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@tools@python3@Lib@unittest@main.py@.PATH_END.py
|
{
"filename": "rfast_routines.py",
"repo_name": "hablabx/rfast",
"repo_path": "rfast_extracted/rfast-main/rfast_routines.py",
"type": "Python"
}
|
import lblabc_input
import math
import os
import numpy as np
from scipy import interpolate
from astropy.io import ascii
from rfast_opac_routines import opacities_read
from rfast_opac_routines import cia_read
from rfast_opac_routines import rayleigh
#
#
# flux adding routine for non-emitting, inhomogeneous, scattering atmosphere
#
# inputs:
#
# dtau - layer extinction optical depth [Nlam,Nlay]
# g - layer asymmetry parameter [Nlam,Nlay]
# omega - layer single scattering albedo [Nlam,Nlay]
# As - surface albedo
#
# optional:
#
# r,t,a - return layer reflectivity, transmissivity, and/or absorptivity if layp = True [Nlam,Nlay]
#
# outputs:
#
# Ag - planetary geometric albedo [Nlam]
#
def flxadd(dtau,g,omega,As,layp=-1):
# determine number of atmospheric layers, wavelength points
Nlay = dtau.shape[1]
Nlam = dtau.shape[0]
# initialize variables
r = np.zeros([Nlam,Nlay])
t = np.zeros([Nlam,Nlay])
Ru = np.zeros([Nlam,Nlay+1])
# special case for single-scattering albedo of unity
ic = np.where(omega == 1)
if ic[0].size !=0:
r[ic] = 3/4*np.divide(np.multiply(1-g[ic],dtau[ic]),1 + np.multiply(3/4*(1-g[ic]),dtau[ic]))
t[ic] = np.divide(1,1 + np.multiply(3/4*(1-g[ic]),dtau[ic]))
# more general case
ic = np.where(omega != 1)
if (ic[0].size !=0):
# intermediate quantities for computing layer radiative properties
a = np.zeros([Nlam,Nlay])
b = np.zeros([Nlam,Nlay])
d = np.zeros([Nlam,Nlay])
Ainf = np.zeros([Nlam,Nlay])
a[ic] = np.sqrt(1-omega[ic])
b[ic] = 3/2*np.sqrt((1-np.multiply(omega[ic],g[ic])))
d = a + 2/3*b # convenient definition
id = np.where(d != 0)
if id[0].size != 0:
Ainf[id] = np.divide(2/3*b[id] - a[id],d[id])
d = 1 - np.multiply(np.exp(-2*np.multiply(np.multiply(a[ic],b[ic]),dtau[ic])),np.power(Ainf[ic],2)) # another convenient definition
r[ic] = np.divide(np.multiply(Ainf[ic],(1 - np.exp(-2*np.multiply(np.multiply(a[ic],b[ic]),dtau[ic])))),d)
t[ic] = np.divide(np.multiply(1 - np.power(Ainf[ic],2),np.exp(-np.multiply(np.multiply(a[ic],b[ic]),dtau[ic]))),d)
# lower boundary condition
Ru[:,Nlay] = As
# add reflectivity upwards from surface
for i in range(0,Nlay):
j = Nlay - 1 - i
Ru[:,j] = r[:,j] + t[:,j]**2*Ru[:,j+1]/(1 - r[:,j]*Ru[:,j+1])
# planetary albedo
Ag = Ru[:,0]
# return additional quantities, if requested
if layp != -1:
if layp:
return Ag, r, t, a
return Ag
#
#
# flux adding routine for emitting, inhomogeneous, scattering atmosphere; w/o solar sources
#
# inputs:
#
# dtau - layer extinction optical depth [Nlam,Nlay]
# g - layer asymmetry parameter [Nlam,Nlay]
# omega - layer single scattering albedo [Nlam,Nlay]
# lam - wavelength grid (um) [Nlam]
# T - temperature profile (K) [Nlev]
# Ts - surface temperature (K)
# em - surface emissivity
#
# optional:
#
# r,t,a - return layer reflectivity, transmissivity, and/or absorptivity if layp = True [Nlam,Nlay]
#
# outputs:
#
# Fu - upwelling specific flux at toa (W m**-2 um**-2)
#
def flxadd_em(dtau,g,omega,lam,T,Ts,em,layp=-1):
# small dtau to prevent divide by zero
small_tau = 1.e-7
# determine number of atmospheric layers, wavelength points
Nlay = dtau.shape[1]
Nlam = dtau.shape[0]
Nlev = Nlay + 1
# initialize variables
r = np.zeros([Nlam,Nlay])
t = np.zeros([Nlam,Nlay])
a = np.zeros([Nlam,Nlay])
su = np.zeros([Nlam,Nlay])
sd = np.zeros([Nlam,Nlay])
id = np.zeros([Nlam,Nlay])
dBlam = np.zeros([Nlam,Nlay])
Rd = np.zeros([Nlam,Nlev])
Sd = np.zeros([Nlam,Nlev])
Ru = np.zeros([Nlam,Nlev])
Su = np.zeros([Nlam,Nlev])
# identitity matrix
id[:,:] = 1.
# special case for single-scattering albedo of unity
ic = np.where(omega == 1)
if ic[0].size !=0:
r[ic] = 3/4*np.divide(np.multiply(1-g[ic],dtau[ic]),1 + np.multiply(3/4*(1-g[ic]),dtau[ic]))
t[ic] = np.divide(1,1 + np.multiply(3/4*(1-g[ic]),dtau[ic]))
# more general case
ic = np.where(omega != 1)
if (ic[0].size !=0):
# intermediate quantities for computing layer radiative properties
f = np.zeros([Nlam,Nlay])
b = np.zeros([Nlam,Nlay])
d = np.zeros([Nlam,Nlay])
Ainf = np.zeros([Nlam,Nlay])
f[ic] = np.sqrt(1-omega[ic])
b[ic] = 3/2*np.sqrt((1-np.multiply(omega[ic],g[ic])))
d = f + 2/3*b # convenient definition
id0 = np.where(d != 0)
if id0[0].size != 0:
Ainf[id0] = np.divide(2/3*b[id0] - f[id0],d[id0])
d = 1 - np.multiply(np.exp(-2*np.multiply(np.multiply(f[ic],b[ic]),dtau[ic])),np.power(Ainf[ic],2)) # another convenient definition
r[ic] = np.divide(np.multiply(Ainf[ic],(1 - np.exp(-2*np.multiply(np.multiply(f[ic],b[ic]),dtau[ic])))),d)
t[ic] = np.divide(np.multiply(1 - np.power(Ainf[ic],2),np.exp(-np.multiply(np.multiply(f[ic],b[ic]),dtau[ic]))),d)
# layer absorptivity
a = id - r - t
ia0 = np.where(a <= id - np.exp(-small_tau))
if ia0[0].size != 0:
a[ia0] = 1 - np.exp(-small_tau)
ia0 = np.where(a >= 1)
if ia0[0].size != 0:
a[ia0] = np.exp(-small_tau)
# level Planck function (W m**-2 um**-1 sr**-1) [Nlam,Nlev]
Blam = planck2D(lam,T)
# difference across layer in Planck function
dBlam[:,:] = Blam[:,1:] - Blam[:,:-1]
# repeated term in source def'n
corr = np.multiply(dBlam,id-a) + np.multiply(a,np.divide(dBlam,np.log(id-a)))
# source terms
su = np.pi*(np.multiply(a,Blam[:,0:Nlay]) - corr)
sd = np.pi*(np.multiply(a,Blam[:,1:Nlay+1]) + corr)
# upper boundary condition: no downwelling ir flux at TOA
Rd[:,0] = 0.
Sd[:,0] = 0.
# add reflectivity and source terms downwards
for j in range(1,Nlev):
Rd[:,j] = r[:,j-1] + t[:,j-1]**2*Rd[:,j-1]/(1 - r[:,j-1]*Rd[:,j-1])
Sd[:,j] = sd[:,j-1] + t[:,j-1]*(Sd[:,j-1] + su[:,j-1]*Rd[:,j-1])/(1 - r[:,j-1]*Rd[:,j-1])
# lower boundary condition
Ru[:,Nlev-1] = 1 - em
Su[:,Nlev-1] = np.pi*em*planck(lam,Ts)
# add reflectivity and source terms upwards
for i in range(1,Nlev):
j = Nlev - i - 1
Ru[:,j] = r[:,j] + t[:,j]**2*Ru[:,j+1]/(1 - r[:,j]*Ru[:,j+1])
Su[:,j] = su[:,j] + t[:,j]*(Su[:,j+1] + sd[:,j]*Ru[:,j+1])/(1 - r[:,j]*Ru[:,j+1])
# upwelling flux at top of atmosphere
Fu = (Su[:,0] + Ru[:,0]*Sd[:,0])/(1 - Ru[:,0]*Rd[:,0])
# return additional quantities, if requested
if layp != -1:
if layp:
return Fu, r, t, a
# code here would give up/down fluxes at each level
# Fu = np.zeros([Nlam,Nlev])
# Fd = np.zeros([Nlam,Nlev])
# for i in range(0,Nlev):
# Fu[:,i] = (Su[:,i] + Ru[:,i]*Sd[:,i])/(1 - Ru[:,i]*Rd[:,i])
# Fd[:,i] = (Sd[:,i] + Rd[:,i]*Su[:,i])/(1 - Ru[:,i]*Rd[:,i])
return Fu
#
#
# flux adding routine for inhomogeneous, scattering atmosphere with treatment for direct beam
#
# inputs:
#
# dtau - layer extinction optical depth [Nlam,Nlay]
# g - layer asymmetry parameter [Nlam,Nlay]
# omega - layer single scattering albedo [Nlam,Nlay]
# dtau_ray - layer rayleigh scattering optical depth [Nlam,Nlay]
# dtau_cld - layer cloud extinction optical depth [Nlam,Nlay]
# gc - packaged 1st, 2nd, and 3rd cloud scattering moments, each [Nlam,Nlay]
# phfc - 0 -> henyey-greenstein | 1-> two-term hg
# As - surface albedo
# alpha - phase angle (deg)
# threeD - contains points/weights for gauss-tchebyshev integration
#
# optional:
#
# r,t,a - return layer reflectivity, transmissivity, and/or absorptivity if layp = True [Nlam,Nlay]
#
# outputs:
#
# Ap - planetary reflectivity (geometric albedo x phase function)
#
def flxadd_3d(dtau,g,omega,dtau_ray,dtau_cld,gc,phfc,As,alpha,threeD,layp=-1):
# small dtau to prevent divide by zero
small_tau = 1.e-7
# unpack cloud moments
gc1,gc2,gc3 = gc
# determine number of atmospheric layers, wavelength points
Nlay = dtau.shape[1]
Nlam = dtau.shape[0]
Nlev = Nlay + 1
# unpack gauss and tchebyshev points and weights
thetaG,thetaT,wG,wT = threeD
# phase angle in radians
ar = alpha*np.pi/180.
# diffusivity factor (3/2 for underlying derivation)
# D = 1.5
# initialize variables
r = np.zeros([Nlam,Nlay])
t = np.zeros([Nlam,Nlay])
a = np.zeros([Nlam,Nlay])
su = np.zeros([Nlam,Nlay])
sd = np.zeros([Nlam,Nlay])
id = np.zeros([Nlam,Nlay])
dBlam = np.zeros([Nlam,Nlay])
Rd = np.zeros([Nlam,Nlev])
Sd = np.zeros([Nlam,Nlev])
Ru = np.zeros([Nlam,Nlev])
Su = np.zeros([Nlam,Nlev])
# identitity matrix
id[:,:] = 1.
# special case for single-scattering albedo of unity
ic = np.where(omega == 1)
if ic[0].size !=0:
r[ic] = 3/4*np.divide(np.multiply(1-g[ic],dtau[ic]),1 + np.multiply(3/4*(1-g[ic]),dtau[ic]))
t[ic] = np.divide(1,1 + np.multiply(3/4*(1-g[ic]),dtau[ic]))
# more general case
ic = np.where(omega != 1)
if (ic[0].size !=0):
# intermediate quantities for computing layer radiative properties
f = np.zeros([Nlam,Nlay])
b = np.zeros([Nlam,Nlay])
d = np.zeros([Nlam,Nlay])
Ainf = np.zeros([Nlam,Nlay])
f[ic] = np.sqrt(1-omega[ic])
b[ic] = 3/2*np.sqrt((1-np.multiply(omega[ic],g[ic])))
d = f + 2/3*b # convenient definition
id0 = np.where(d != 0)
if id0[0].size != 0:
Ainf[id0] = np.divide(2/3*b[id0] - f[id0],d[id0])
d = 1 - np.multiply(np.exp(-2*np.multiply(np.multiply(f[ic],b[ic]),dtau[ic])),np.power(Ainf[ic],2)) # another convenient definition
r[ic] = np.divide(np.multiply(Ainf[ic],(1 - np.exp(-2*np.multiply(np.multiply(f[ic],b[ic]),dtau[ic])))),d)
t[ic] = np.divide(np.multiply(1 - np.power(Ainf[ic],2),np.exp(-np.multiply(np.multiply(f[ic],b[ic]),dtau[ic]))),d)
# layer absorptivity
a = id - r - t
ia0 = np.where(a == 0)
if ia0[0].size != 0:
a[ia0] = 1 - np.exp(-small_tau)
# fraction of scattering in each layer due to Rayleigh [Nlam x Nlay]
fray = np.zeros([Nlam,Nlay])
fray[:,:] = 1.
ic0 = np.where(dtau_cld > small_tau)
cld = False
if (ic0[0].size != 0):
cld = True
fray = np.divide(dtau_ray,np.multiply(omega,dtau))
# scattering angle (trivial here -- general logic commented out below)
cosTh = np.cos(np.pi - ar)
# integrated optical depths
tau = np.zeros([Nlam,Nlev])
tau[:,1:] = np.cumsum(dtau,axis=1)
taum = 0.5*(tau[:,1:] + tau[:,:-1]) # integrated optical depth to mid-levels [Nlam x Nlay]
ftau = dtau/2*np.exp(-dtau) # direct beam correction term
# pixel geometry-insensitive cloud scattering terms
if cld:
if (phfc == 0): # single-term henyey-greenstein
pc = pHG(gc1,cosTh)
if (phfc == 1): # improved two-term henyey-greenstein; zhang & li (2016) JQSRT 184:40
# defn following Eqn 6 of Z&L2016; note: g = gc1; h = gc2; l = gc3
w = np.power(np.multiply(gc1,gc2)-gc3,2) - np.multiply(4*(gc2-np.power(gc1,2)),np.multiply(gc1,gc3)-np.power(gc2,2))
iw0 = np.where(w < 0)
if (iw0[0].size != 0):
w[iw0] = 1.e-5
de = 2*(gc2-np.power(gc1,2))
g1 = np.divide(gc3 - np.multiply(gc1,gc2) + np.power(w,0.5),de) # eqn 7a
g2 = np.divide(gc3 - np.multiply(gc1,gc2) - np.power(w,0.5),de) # eqn 7b
al = 0.5*id + np.divide(3*np.multiply(gc1,gc2)-2*np.power(gc1,3)-gc3,2*np.power(w,0.5)) # eqn 7c
iw0 = np.where(w < 1.e-4) # avoiding divide by zero in extreme forward scattering case
if (iw0[0].size != 0):
g1[iw0] = gc1[iw0]
g2[iw0] = 0.
al[iw0] = 1.
ip0 = np.where(np.abs(g2) > np.abs(g1))
if (ip0[0].size != 0):
g2[ip0] = -g1[ip0]
pc = np.multiply(al,pHG(g1,cosTh)) + np.multiply(id-al,pHG(g2,cosTh))
# loop over gauss and tchebyshev points
F0 = 1. # normal incidence flux
Ap = np.zeros(Nlam) # planetary reflectivity
for i in range(len(thetaG)):
nu = 0.5*(thetaG[i] - (np.cos(ar)-1)/(np.cos(ar)+1))*(np.cos(ar)+1) # H&L Eqn 9
for j in range(len(thetaT)):
# compute solar and observer zenith angles; horak & little (1965)
mu0 = np.sin(np.arccos(thetaT[j]))*np.cos(np.arcsin(nu)-ar) # solar incidence; H&L Eqn 1
mu1 = np.sin(np.arccos(thetaT[j]))*np.cos(np.arcsin(nu)) # observer zenith; H&L Eqn 2
# convoluted, unneeded geometry to get scattering angle
#fac = ((1-mu0**2)**0.5)*((1-mu1**2)**0.5)
#if (fac > 0):
# cosphi = (mu0*mu1 - np.cos(ar))/fac # H&L Eqn 3
# cosTh = -(mu0*mu1 - fac*cosphi) # scattering angle
#else:
# cosTh = 0.
# direct beam treatment
Idir0 = F0*np.exp(-(tau[:,0:-1]+ftau)/mu0) # solar intensity that reaches a given layer
dIdir = np.multiply(fray,Idir0)*pray(cosTh)/(4*np.pi) # intensity rayleigh scattered to observer in each layer
if cld:
dIdir = dIdir + np.multiply(np.multiply(id-fray,pc),Idir0)/(4*np.pi) # intensity cloud scattered to observer in each layer
dFdi = np.multiply(dIdir,np.multiply(omega,id-np.exp(-dtau))) # layer upwelling flux going into direct beam
dIdir = np.multiply(dIdir,np.multiply(omega,id-np.exp(-dtau/mu1))) # scale with single scattering albedo, layer optical depth
# direct beam total scattered flux in each layer [Nlam x Nlay]
dF = np.multiply(Idir0,np.multiply(omega,id-np.exp(-dtau)))
# portions that enter diffuse up and down streams
sd = np.multiply(dF,fray)/2
if cld:
if (phfc == 0): # single-term henyey-greenstein
fdc = (pHG_int(gc1,(1-mu0**2)**0.5) + pHG_int(gc1,-(1-mu0**2)**0.5))/4
if (phfc == 1): # improved two-term henyey-greenstein
fr = (pHG_int(g1,(1-mu0**2)**0.5) + pHG_int(g1,-(1-mu0**2)**0.5))/4
bk = (pHG_int(g2,(1-mu0**2)**0.5) + pHG_int(g2,-(1-mu0**2)**0.5))/4
fdc = np.multiply(al,fr) + np.multiply(id-al,bk)
sd = sd + np.multiply(fdc,np.multiply(dF,id-fray))
su = dF - sd - dFdi # from conservation
# upper boundary condition: no downwelling ir flux at TOA
Rd[:,0] = 0.
Sd[:,0] = 0.
# add reflectivity and source terms downwards
for k in range(1,Nlev):
Rd[:,k] = r[:,k-1] + t[:,k-1]**2*Rd[:,k-1]/(1 - r[:,k-1]*Rd[:,k-1])
Sd[:,k] = sd[:,k-1] + t[:,k-1]*(Sd[:,k-1] + su[:,k-1]*Rd[:,k-1])/(1 - r[:,k-1]*Rd[:,k-1])
# lower boundary condition
Ru[:,Nlev-1] = As
Su[:,Nlev-1] = As*mu0*F0*np.exp(-tau[:,-1]/mu0)
# add reflectivity and source terms upwards
for kk in range(1,Nlev):
k = Nlev - kk - 1
Ru[:,k] = r[:,k] + t[:,k]**2*Ru[:,k+1]/(1 - r[:,k]*Ru[:,k+1])
Su[:,k] = su[:,k] + t[:,k]*(Su[:,k+1] + sd[:,k]*Ru[:,k+1])/(1 - r[:,k]*Ru[:,k+1])
# attenuate direct beam as it exits atmosphere; sum intensities from each layer
dIdir = np.multiply(dIdir,np.exp(-(tau[:,0:-1]+ftau)/mu1))
Idir = np.sum(dIdir,axis=1)
# upwelling flux at top of atmosphere
Fu = (Su[:,0] + Ru[:,0]*Sd[:,0])/(1 - Ru[:,0]*Rd[:,0])
# sum over quadrature points
Ap = Ap + (Idir + Fu/np.pi)*wG[i]*wT[j]
# return additional quantities, if requested
if layp != -1:
if layp:
return Ap*(np.cos(ar) + 1), r, t, a
return Ap*(np.cos(ar) + 1)
#
#
# planetary spectrum model
#
# inputs:
#
# Nlev - number of vertical levels to use in model
# Rp - planetary radius (R_Earth)
# a - orbital distance (au)
# As - grey surface albedo
# em - surface emissivity
# p - pressure profile (Pa)
# t - temperature profile (K)
# t0 - "surface" temperature (K)
# m - atmospheric mean molecular weight (kg/molecule)
# z - altitude profile (m)
# grav - gravitational acceleration profile (m/s/s)
# Ts - host star temperature (K)
# Rs - host star radius (Rsun)
# ray - True -> do rayleigh scattering ; False -> no rayleigh scattering
# ray0 - gas rayleigh cross sections at 0.4579 um (m**2/molec) (see set_gas_info)
# rayb - background gas rayleigh cross section relative to Ar
# f - vector of molecular mixing ratios; order: Ar, CH4, CO2, H2, H2O, He, N2, O2, O3
# fb - background gas mixing ratio
# mmr - if true, interprets mixing ratios as mass mixing ratios; vmrs otherwise
# nu - refractivity at STP (averaged over column)
# threeD - disk integration quantities (Gauss/Tchebyshev points and weights)
# gasid - names of radiatively active gases (see set_gas_info)
# species_l - names of species to include as line absorbers; options: ch4, co2, h2o, o2, o3
# species_c - names of species to include as cia; options: co2, h2, n2, o2
# ref - include refraction in transit case if true
# cld - True -> include cloud ; False -> no cloud
# sct - include forward scattering correction in transit case if true
# fc - cloud fractional coverage
# pt - top-of-cloud pressure (Pa)
# dpc - cloud thickness (Pa)
# tauc - cloud optical thickness (extinction)
# src - source type (diff -> diffuse reflectance; thrm -> thermal sources)
# g0 - cloud asymmetry parameter (Nmom x len(lam))
# w0 - cloud sungle scattering albedo (len(lam))
# sigma_interp- line absorber pressure interpolation function
# cia_interp- cia coefficient temperature interpolation function
# lam - wavelength grid for output (um)
#
# options:
#
# pf - if set, line opacities are interpolated to this fixed pressure (Pa)
# tf - if set, cia coefficients are interpolated to this fixed temperature
#
# outputs:
#
# Ap - planetary albedo, akin to geometric albedo [Nlam]
# FpFs - planet-to-star flux ratio [Nlam]
#
def gen_spec(Nlev,Rp,a,As,em,p,t,t0,m,z,grav,Ts,Rs,ray,ray0,rayb,f,fb,
mmw0,mmr,ref,nu,alpha,threeD,
gasid,ncia,ciaid,species_l,species_c,
cld,sct,phfc,fc,pt,dpc,g0,w0,tauc,
src,sigma_interp,cia_interp,lam,pf=-1,tf=-1):
# constants
Re = 6.378e6 # earth radius (m)
Rsun = 6.957e8 # solar radius (m)
au = 1.496e11 # au (m)
kB = 1.38064852e-23 # m**2 kg s**-2 K**-1
Na = 6.0221408e23 # avogradro's number
# small optical depth to prevent divide by zero
small_tau = 1e-10
# number of wavelength points
Nlam = len(lam)
# number of layers
Nlay = Nlev - 1
# midpoint grids and pressure change across each layer
dp = p[1:] - p[:-1] # pressure difference across each layer
pm = 0.5*(p[1:] + p[:-1]) # mid-layer pressure
tm = 0.5*(t[1:] + t[:-1]) # mid-layer temperature
nm = pm/kB/tm # mid-layer number density, ideal gas law (m**-3)
gravm = 0.5*(grav[1:] + grav[:-1]) # mid-layer gravity
fm = 0.5*(f[:,1:] + f[:,:-1]) # mid-layer mixing ratios
fbm = 0.5*(fb[1:] + fb[:-1]) # mid-layer background gas mixing ratios
mm = 0.5*(m[1:] + m[:-1]) # mid-layer mean molecular weight
# layer column number density (molecules/m**2) or mass density (kg/m**2)
if mmr:
dMc = dp/gravm
else:
dNc = dp/gravm/mm
# interpolate line opacities onto p/T grid, cannot be less than zero
if ( np.any( pf == -1) and np.any( tf == -1) ): # varying p/t case
sigma = np.power(10,sigma_interp(np.log10(pm),1/tm))
elif ( np.any( pf != -1) and np.any( tf == -1) ): # fixed p case
sigma = np.power(10,sigma_interp(1/tm))
elif ( np.any( pf == -1) and np.any( tf != -1) ): # fixed t case
sigma = np.power(10,sigma_interp(np.log10(pm)))
else: # fixed p and t
sigma0 = np.power(10,sigma_interp())
sigma = np.repeat(sigma0[:,np.newaxis,:], Nlay, axis=1)
izero = np.where(sigma < 0)
sigma[izero] = 0
# interpolate cia coefficients onto temperature grid, cannot be less than zero
if ( np.any( tf == -1) ): # varying temp case
kcia = cia_interp(1/tm)
else:
kcia0 = cia_interp(1/tf)
kcia = np.zeros([kcia0.shape[0],Nlay,Nlam])
for isp in range(0,kcia0.shape[0]):
kcia[isp,:,:] = np.repeat(kcia0[isp,np.newaxis,:], Nlay, axis=0)
izero = np.where(kcia < 0)
kcia[izero] = 0
# rayleigh scattering opacities
if ray:
if mmr:
sigma_ray = rayleigh(lam,ray0/(mmw0/1.e3/Na),fm,fbm,rayb) # m**2/kg
else:
sigma_ray = rayleigh(lam,ray0,fm,fbm,rayb) # m**2/molec
else:
sigma_ray = np.zeros([Nlay,Nlam])
# line absorber opacity
sigma_gas = np.zeros([Nlay,Nlam])
for isp in range(0,len(species_l)):
idg = gasid.index(np.char.lower(species_l[isp]))
if mmr:
sigma_gas = sigma_gas + fm[idg,:,np.newaxis]*sigma[isp,:,:]/(mmw0[idg]/1.e3/Na) # m**2/kg
else:
sigma_gas = sigma_gas + fm[idg,:,np.newaxis]*sigma[isp,:,:] # m**2/molec
# cia opacity (m**2/molec, or m**2/kg if mmr is true)
sigma_cia = np.zeros([Nlay,Nlam])
icia = 0
for isp in range(0,len(species_c)):
idg1 = gasid.index(np.char.lower(species_c[isp]))
mmw1 = mmw0[idg1]
for ipar in range(0,ncia[isp]):
# case where background is everything but absorber
if (np.char.lower(ciaid[ipar+1,isp]) == 'x'):
ff = fm[idg1,:]*(1-fm[idg1,:])
mmw2 = mm*Na*1e3
# case where partner is a specified gas
else:
idg2 = gasid.index(np.char.lower(ciaid[ipar+1,isp]))
ff = fm[idg1,:]*fm[idg2,:]
mmw2 = mmw0[idg2]/Na/1e3
if mmr:
fac = mm/mmw1/mmw2 # factor so that sigma is in units of m**2/kg
sigma_cia = sigma_cia + ff[:,np.newaxis]*np.transpose(np.multiply(np.transpose(kcia[icia,:,:]),nm*fac))
else:
sigma_cia = sigma_cia + ff[:,np.newaxis]*np.transpose(np.multiply(np.transpose(kcia[icia,:,:]),nm))
icia = icia + 1
# total opacity (m**2/molec, or m**2/kg if mmr is true)
sigma_tot = sigma_gas + sigma_ray + sigma_cia
# clearsky layer optical depth, single scattering albedo, and asymmetry parameter
if mmr:
dtau = np.multiply(np.transpose(sigma_tot),dMc)
dtau_ray = np.multiply(np.transpose(sigma_ray),dMc)
else:
dtau = np.multiply(np.transpose(sigma_tot),dNc)
dtau_ray = np.multiply(np.transpose(sigma_ray),dNc)
g = np.zeros([Nlam,Nlay]) # note that g=0 for Rayleigh scattering
if (not ray):
dtau_ray = np.zeros([Nlam,Nlay])
dtau_ray[:] = small_tau
dtau = dtau + dtau_ray
omega = np.zeros([Nlam,Nlay])
else:
omega = np.divide(dtau_ray,dtau)
# transit refractive floor correction (robinson et al. 2017)
pref = max(p)
if (src == 'trns' and ref):
pref = refract_floor(nu,t,Rs,a,Rp,m,grav)
# call radiative transfer model
if (src == 'diff' or src == 'cmbn'):
Ap = flxadd(dtau,g,omega,As)*2/3 # factor of 2/3 converts to geometric albedo, for Lambert case
if (src == 'thrm' or src == 'scnd' or src == 'cmbn'):
Flam = flxadd_em(dtau,g,omega,lam,t,t0,em)
if (src == 'trns'):
td = transit_depth(Rp,Rs,z,dtau,p,ref,pref)
if (src == 'phas'):
dtau_cld = np.zeros([Nlam,Nlay])
gc1 = np.zeros([Nlam,Nlay]) # first moment
gc2 = np.zeros([Nlam,Nlay]) # second moment
gc3 = np.zeros([Nlam,Nlay]) # second moment
gc = gc1,gc2,gc3
Ap = flxadd_3d(dtau,g,omega,dtau_ray,dtau_cld,gc,phfc,As,alpha,threeD)
# if doing clouds
if cld:
# unpack first, second and third moments of phase function
g1,g2,g3 = g0
# cloudy optical properties
dtaug = np.copy(dtau)
dtau_cld = np.zeros([Nlam,Nlay])
ip = np.argwhere( (p < pt+dpc) & (p >= pt) )
#
# logic for cloud uniformly distributed in pressure
dtau_cld[:,ip[np.where(ip < Nlay)]] = dp[ip[np.where(ip < Nlay)]]/dpc
# rough logic for exponentially-distributed cloud
# logs = np.logspace(np.log10(1.e-3*max(tauc)),np.log10(max(tauc)),len(ip))
# logs = logs/np.sum(logs)
# dtau_cld[:,ip[:,0]] = logs
#
dtau_cld = np.multiply(dtau_cld,np.repeat(tauc[:,np.newaxis], Nlay, axis=1))
wc = np.transpose(np.repeat(w0[np.newaxis,:], Nlay, axis=0))
gc1 = np.transpose(np.repeat(g1[np.newaxis,:], Nlay, axis=0)) # first moment
gc2 = np.transpose(np.repeat(g2[np.newaxis,:], Nlay, axis=0)) # second moment
gc3 = np.transpose(np.repeat(g3[np.newaxis,:], Nlay, axis=0)) # second moment
dtau_cld_s = np.multiply(wc,dtau_cld)
omega = np.divide(dtau_ray + dtau_cld_s,dtau + dtau_cld)
g = np.divide(np.multiply(gc1,dtau_cld_s),dtau_ray + dtau_cld_s)
dtau = dtaug + dtau_cld
gc = gc1,gc2,gc3
# transit forward scattering correction (robinson et al. 2017)
if (src == 'trns' and sct):
f = np.zeros([Nlam,Nlay])
ifor = np.where(gc1 >= 1 - 0.1*(1-np.cos(Rs*Rsun/a/au)))
f[ifor] = np.multiply(np.divide((1 - np.power(gc1[ifor],2)),2*gc1[ifor]),np.power(1-gc1[ifor],-1)-np.power(1+np.power(gc1[ifor],2)-2*gc1[ifor]*np.cos(Rs*Rsun/a/au),-0.5))
dtau[ifor] = np.multiply(1-np.multiply(f[ifor],wc[ifor]),dtau_cld[ifor]) + dtaug[ifor]
# call radiative transfer model
if (src == 'diff' or src == 'cmbn'):
Apc = flxadd(dtau,g,omega,As)*2/3 # factor of 2/3 converts to geometric albedo, for Lambert case
if (src == 'thrm' or src == 'scnd' or src == 'cmbn'):
Flamc = flxadd_em(dtau,g,omega,lam,t,t0,em)
if (src == 'trns'):
td = transit_depth(Rp,Rs,z,dtau,p,ref,pref)
if (src == 'phas'):
Apc = flxadd_3d(dtau,g,omega,dtau_ray,dtau_cld,gc,phfc,As,alpha,threeD)
# weighted albedo or flux
if (src == 'diff' or src == 'cmbn' or src == 'phas'):
Ap = (1-fc)*Ap + fc*Apc
if (src == 'thrm' or src == 'scnd' or src == 'cmbn'):
Flam = (1-fc)*Flam + fc*Flamc
# planet-to-star flux ratio, brightness temp, or effective transit altitude
if (src == 'diff' or src == 'phas'):
FpFs = Ap*(Rp*Re/a/au)**2
if (src == 'thrm' or src == 'scnd'):
Tbrt = Tbright(lam,Flam)
if (src == 'cmbn'):
FpFs = Ap*(Rp*Re/a/au)**2 + Flam/(np.pi*planck(lam,Ts))*(Rp/Rs)**2*((Re/Rsun)**2)
if (src == 'scnd'):
FpFs = Flam/(np.pi*planck(lam,Ts))*(Rp/Rs)**2*((Re/Rsun)**2)
if (src == 'trns'):
zeff = Rs*Rsun*td**0.5 - Rp*Re
# return quantities
if (src == 'diff' or src == 'phas'):
ret = Ap,FpFs
if (src == 'thrm'):
ret = Tbrt,Flam
if (src == 'scnd'):
ret = Tbrt,FpFs
if (src == 'cmbn'):
ret = Ap,FpFs
if (src == 'trns'):
ret = zeff,td
return ret
#
#
# spectral grid routine, pieces together multiple wavelength
# regions with differing resolutions
#
# inputs:
#
# res - spectral resolving power (x/dx)
# x_min - minimum spectral cutoff
# x_max - maximumum spectral cutoff
#
# outputs:
#
# x - center of spectral gridpoints
# Dx - spectral element width
#
def gen_spec_grid(x_min,x_max,res,Nres=0):
if ( len(x_min) == 1 ):
x_sml = x_min/1e3
x_low = max(x_sml,x_min - x_min/res*Nres)
x_hgh = x_max + x_max/res*Nres
x,Dx = spectral_grid(x_low,x_hgh,res=res)
else:
x_sml = x_min[0]/1e3
x_low = max(x_sml,x_min[0] - x_min[0]/res[0]*Nres)
x_hgh = x_max[0] + x_max[0]/res[0]*Nres
x,Dx = spectral_grid(x_low,x_hgh,res=res[0])
for i in range(1,len(x_min)):
x_sml = x_min[i]/1e3
x_low = max(x_sml,x_min[i] - x_min[i]/res[i]*Nres)
x_hgh = x_max[i] + x_max[i]/res[i]*Nres
xi,Dxi = spectral_grid(x_low,x_hgh,res=res[i])
x = np.concatenate((x,xi))
Dx = np.concatenate((Dx,Dxi))
Dx = [Dxs for _,Dxs in sorted(zip(x,Dx))]
x = np.sort(x)
return np.squeeze(x),np.squeeze(Dx)
#
#
# routine to read in inputs from script
#
# inputs:
#
# filename_scr - filename containing input parameters and calues
#
#
# outputs:
#
# (a long collection of all parameters needed to run models)
#
def inputs(filename_scr):
# flags for planet mass and gravity inputs
mf = False
gf = False
# read inputs
with open(filename_scr) as f:
for line in f:
line = line.partition('#')[0] # split line at '#' symbol
line = line.strip() # trim whitespace
vn = line.partition('=')[0] # variable name
vn = vn.strip()
vv = line.partition('=')[2] # variable value
vv = vv.strip()
if (vn.lower() == 'fnr' ):
fnr = vv
elif (vn.lower() == 'fns' ):
fns = vv
elif (vn.lower() == 'fnn' ):
fnn = vv
elif (vn.lower() == 'dirout' ):
dirout = vv
elif (vn.lower() == 'fnatm' ):
fnatm = vv
elif (vn.lower() == 'fntmp' ):
fntmp = vv
elif (vn.lower() == 'skpatm'):
skpatm = int(vv)
elif (vn.lower() == 'skptmp'):
skptmp = int(vv)
elif (vn.lower() == 'colpr'):
colpr = int(vv)
elif (vn.lower() == 'colpt'):
colpt = int(vv)
elif (vn.lower() == 'colt'):
colt = int(vv)
elif (vn.lower() == 'psclr'):
psclr = float(vv)
elif (vn.lower() == 'psclt'):
psclt = float(vv)
elif (vn.lower() == 'imix'):
imix = int(vv)
elif (vn.lower() == 'pmin'):
pmin = float(vv)
elif (vn.lower() == 'pmax'):
pmax = float(vv)
elif (vn.lower() == 't0'):
t0 = float(vv)
elif (vn.lower() == 'rp'):
Rp = float(vv)
elif (vn.lower() == 'mp'):
Mp = float(vv)
mf = True
elif (vn.lower() == 'gp'):
gp = float(vv)
gf = True
elif (vn.lower() == 'a'):
a = float(vv)
elif (vn.lower() == 'as'):
As = float(vv)
elif (vn.lower() == 'em'):
em = float(vv)
elif (vn.lower() == 'phfc'):
phfc = int(vv)
elif (vn.lower() == 'w'):
w = float(vv)
elif (vn.lower() == 'g1'):
g1 = float(vv)
elif (vn.lower() == 'g2'):
g2 = float(vv)
elif (vn.lower() == 'g3'):
g3 = float(vv)
elif (vn.lower() == 'pt'):
pt = float(vv)
elif (vn.lower() == 'dpc'):
dpc = float(vv)
elif (vn.lower() == 'tauc0'):
tauc0 = float(vv)
elif (vn.lower() == 'lamc0'):
lamc0 = float(vv)
elif (vn.lower() == 'fc'):
fc = float(vv)
elif (vn.lower() == 'pf'):
pf = float(vv)
elif (vn.lower() == 'tf'):
tf = float(vv)
elif (vn.lower() == 'smpl'):
vv = vv.partition(',')
if (len(vv[2]) > 0):
smpl = [float(vv[0])]
while (len(vv[2]) > 0):
vv = vv[2].partition(',')
smpl = np.concatenate((smpl,[float(vv[0])]))
else:
smpl = np.zeros(1)
smpl[:] = float(vv[0])
elif (vn.lower() == 'opdir' ):
opdir = vv
elif (vn.lower() == 'snr0'):
vv = vv.partition(',')
if (len(vv[2]) > 0):
snr0 = [float(vv[0])]
while (len(vv[2]) > 0):
vv = vv[2].partition(',')
snr0 = np.concatenate((snr0,[float(vv[0])]))
else:
snr0 = np.zeros(1)
snr0[:] = float(vv[0])
elif (vn.lower() == 'lam0'):
vv = vv.partition(',')
if (len(vv[2]) > 0):
lam0 = [float(vv[0])]
while (len(vv[2]) > 0):
vv = vv[2].partition(',')
lam0 = np.concatenate((lam0,[float(vv[0])]))
else:
lam0 = np.zeros(1)
lam0[:] = float(vv[0])
elif (vn.lower() == 'ts'):
Ts = float(vv)
elif (vn.lower() == 'rs'):
Rs = float(vv)
elif (vn.lower() == 'p10'):
p10 = float(vv)
elif (vn.lower() == 'nlev'):
Nlev = int(vv)
elif (vn.lower() == 'alpha'):
alpha = float(vv)
elif (vn.lower() == 'ntg'):
ntg = int(vv)
elif (vn.lower() == 'bg'):
bg = vv.strip()
elif (vn.lower() == 'ray'):
ray = True
if (vv == 'False'):
ray = False
elif (vn.lower() == 'cld'):
cld = True
if (vv == 'False'):
cld = False
elif (vn.lower() == 'ref'):
ref = True
if (vv == 'False'):
ref = False
elif (vn.lower() == 'sct'):
sct = True
if (vv == 'False'):
sct = False
elif (vn.lower() == 'fixp'):
fixp = True
if (vv == 'False'):
fixp = False
elif (vn.lower() == 'fixt'):
fixt = True
if (vv == 'False'):
fixt = False
elif (vn.lower() == 'rnd'):
rnd = True
if (vv == 'False'):
rnd = False
elif (vn.lower() == 'ntype'):
ntype = vv.strip()
elif (vn.lower() == 'src'):
src = vv.strip()
elif (vn.lower() == 'lams'):
vv = vv.partition(',')
if (len(vv[2]) > 0):
lams = [float(vv[0])]
while (len(vv[2]) > 0):
vv = vv[2].partition(',')
lams = np.concatenate((lams,[float(vv[0])]))
else:
lams = np.zeros(1)
lams[:] = float(vv[0])
elif (vn.lower() == 'laml'):
vv = vv.partition(',')
if (len(vv[2]) > 0):
laml = [float(vv[0])]
while (len(vv[2]) > 0):
vv = vv[2].partition(',')
laml = np.concatenate((laml,[float(vv[0])]))
else:
laml = np.zeros(1)
laml[:] = float(vv[0])
elif (vn.lower() == 'res'):
vv = vv.partition(',')
if (len(vv[2]) > 0):
res = [float(vv[0])]
while (len(vv[2]) > 0):
vv = vv[2].partition(',')
res = np.concatenate((res,[float(vv[0])]))
else:
res = np.zeros(1)
res[:] = float(vv[0])
elif (vn.lower() == 'f0'):
vv = vv.partition(',')
if (len(vv[2]) > 0):
f0 = [float(vv[0])]
while (len(vv[2]) > 0):
vv = vv[2].partition(',')
f0 = np.concatenate((f0,[float(vv[0])]))
else:
f0 = np.zeros(1)
f0[:] = float(vv[0])
elif (vn.lower() == 'colr'):
vv = vv.partition(',')
if (len(vv[2]) > 0):
colr = [int(vv[0])]
while (len(vv[2]) > 0):
vv = vv[2].partition(',')
colr = np.concatenate((colr,[int(vv[0])]))
else:
colr = np.zeros(1)
colr[:] = int(vv[0])
elif (vn.lower() == 'regrid'):
regrid = True
if (vv == 'False'):
regrid = False
elif (vn.lower() == 'species_r'):
if (vv.isspace() or len(vv) == 0):
species_r = []
else:
vv = vv.partition(',')
species_r = [vv[0].strip()]
if (len(vv[2]) > 0):
while (len(vv[2]) > 0):
vv = vv[2].partition(',')
species_r = np.concatenate((species_r,[vv[0].strip()]))
elif (vn.lower() == 'species_l'):
if (vv.isspace() or len(vv) == 0):
species_l = []
else:
vv = vv.partition(',')
species_l = [vv[0].strip()]
if (len(vv[2]) > 0):
while (len(vv[2]) > 0):
vv = vv[2].partition(',')
species_l = np.concatenate((species_l,[vv[0].strip()]))
elif (vn.lower() == 'species_c'):
if (vv.isspace() or len(vv) == 0):
species_c = []
else:
vv = vv.partition(',')
species_c = [vv[0].strip()]
if (len(vv[2]) > 0):
while (len(vv[2]) > 0):
vv = vv[2].partition(',')
species_c = np.concatenate((species_c,[vv[0].strip()]))
elif (vn.lower() == 'restart'):
restart = True
if (vv == 'False'):
restart = False
elif (vn.lower() == 'fp10'):
fp10 = True
if (vv == 'False'):
fp10 = False
elif (vn.lower() == 'rdgas'):
rdgas = True
if (vv == 'False'):
rdgas = False
elif (vn.lower() == 'rdtmp'):
rdtmp = True
if (vv == 'False'):
rdtmp = False
elif (vn.lower() == 'mmr'):
mmr = False
if (vv == 'True'):
mmr = True
elif (vn.lower() == 'clr'):
clr = False
if (vv == 'True'):
clr = True
elif (vn.lower() == 'fmin'):
fmin = float(vv)
elif (vn.lower() == 'nwalkers'):
nwalkers = int(vv)
elif (vn.lower() == 'nstep'):
nstep = int(vv)
elif (vn.lower() == 'nburn'):
nburn = int(vv)
elif (vn.lower() == 'thin'):
thin = int(vv)
elif (vn.lower() == 'grey'):
grey = True
if (vv == 'False'):
grey = False
elif (vn.lower() == 'progress'):
progress = True
if (vv == 'False'):
progress = False
# set pf to -1 if user does not want iso-pressure opacities
if (not fixp):
pf = -1
# set tf to -1 if user does not want iso-temperature opacities
if (not fixt):
tf = -1
# check for consistency between wavelength grid and resolution grid
if (lams.shape[0] > 1 and lams.shape[0] != res.shape[0]):
print("rfast warning | major | smpl length inconsistent with wavelength grid")
quit()
# check for consistency between resolution grid and over-sample factor
if (smpl.shape[0] > 1 and smpl.shape[0] != res.shape[0]):
print("rfast warning | major | smpl length inconsistent with resolution grid")
quit()
# check for consistency between resolution grid and snr0 parameter
if (snr0.shape[0] > 1 and snr0.shape[0] != res.shape[0]):
print("rfast warning | major | snr0 length inconsistent with wavelength grid")
quit()
# check for consistency between resolution grid and lam0 parameter
if (lam0.shape[0] > 1 and lam0.shape[0] != res.shape[0]):
print("rfast warning | major | lam0 length inconsistent with wavelength grid")
quit()
# check that snr0 is within applicable wavelength range
if (lam0.shape[0] > 1):
for i in range(lam0.shape[0]):
if (lam0[i] < min(lams) or lam0[i] > max(laml)):
print("rfast warning | major | lam0 outside wavelength grid")
quit()
else:
if (lam0[0] < min(lams) or lam0[0] > max(laml)):
print("rfast warning | major | lam0 outside wavelength grid")
quit()
# complete directory path if '/' is omitted
if (len(opdir) > 0 and opdir[-1] != '/'):
opdir = opdir + '/'
if (len(opdir) == 0):
opdir = './hires_opacities/'
# check if opacities directory exists
if (not os.path.isdir(opdir)):
print("rfast warning | major | opacities directory does not exist")
quit()
# check if output directory exist; create if it does not
if (len(dirout) > 0 and dirout[-1] != '/'):
dirout = dirout + '/'
if (len(dirout) > 0 and not os.path.isdir(dirout) and sys.argv[0] == 'rfast_genspec.py'):
print("rfast warning | minor | output directory does not exist, attempting to create")
os.makedirs(dirout)
elif (len(dirout) > 0 and not os.path.isdir(dirout)):
print("rfast warning | minor | output directory does not exist, use current directory")
dirout = os.getcwd() + '/'
# check for mixing ratio issues
if (np.sum(f0) - 1 > 1.e-6 and not rdgas):
if (np.sum(f0) - 1 < 1.e-3):
print("rfast warning | minor | input gas mixing ratios sum to slightly above unity")
else:
print("rfast warning | major | input gas mixing ratios sum to much above unity")
quit()
# set gravity if gp not set
if (not gf):
gp = 9.798*Mp/Rp**2
# set planet mass if not set
if (not mf):
Mp = (gp/9.798)*Rp**2
# cannot have both Mp and gp
if (mf and gf):
print("rfast warning | major | cannot independently set planet mass and gravity in inputs")
quit()
# cloud base cannot be below bottom of atmosphere
if (pt+dpc > pmax):
print("rfast warning | major | cloud base below bottom of atmosphere")
quit()
# transit radius pressure cannot be larger than max pressure
if (p10 > pmax):
print("rfast warning | major | transit radius pressure below bottom of atmosphere")
quit()
return fnr,fnn,fns,dirout,Nlev,pmin,pmax,bg,\
species_r,f0,rdgas,fnatm,skpatm,colr,colpr,psclr,imix,\
t0,rdtmp,fntmp,skptmp,colt,colpt,psclt,\
species_l,species_c,\
lams,laml,res,regrid,smpl,opdir,\
Rp,Mp,gp,a,As,em,\
grey,phfc,w,g1,g2,g3,pt,dpc,tauc0,lamc0,fc,\
ray,cld,ref,sct,fixp,pf,fixt,tf,p10,fp10,\
src,\
alpha,ntg,\
Ts,Rs,\
ntype,snr0,lam0,rnd,\
clr,fmin,mmr,nwalkers,nstep,nburn,thin,restart,progress
#
#
# initializes opacities and convolution kernels
#
# inputs:
#
# lam_lr - low-resolution wavelength grid midpoints (um)
# dlam_lr - low-resolution wavelength grid widths (um)
# lam_hr - high-resolution wavelength grid midpoints (um)
# mode - optional, indicates photometry vs. spectroscopy
# species_l - list of line absorbers to include
# species_c - list of cia absorbers to include
# opdir - directory where hi-res opacities are located (string)
# pf - pressure for iso-pressure case (Pa)
# tf - temperature for iso-temperature case (K)
#
# outputs:
#
# sigma_interp- line absorber opacity interpolation function (m**2/molecule)
# sigma_cia- cia coefficient interpolation function (m**-1 m**-6)
# kern - convolution kernel
#
def init(lam_lr,dlam_lr,lam_hr,species_l,species_c,opdir,pf,tf,mode=-1):
# gaussian kernel for later degrading
kern = kernel(lam_lr,lam_hr,Dx = dlam_lr,mode = mode)
# read in line absorbers
press,temp,sigma = opacities_read(species_l,lam_hr,opdir)
# setup up opacities interpolation routine
x = np.log10(press)
y = 1/temp
z = np.log10(sigma)
# gradients in log-pressure, inverse temperature
gradx = np.gradient(z,x,axis=1)
grady = np.gradient(z,y,axis=2)
# define function that interpolates opacities to p, T grid
# x0 is log10(pressure), y0 is inverse temperature
# x0, y0 are vectors of identical length (e.g., atm. model)
# returns log10(opacity) in m**2/molecule
def sigma_interp_2D(x0,y0):
# finds gridpoints nearest to interpolation points
dx = np.subtract.outer(x,x0)
ix = np.argmin(np.absolute(dx),axis=0)
dy = np.subtract.outer(y,y0)
iy = np.argmin(np.absolute(dy),axis=0)
# matrices of distances from interpolation points
dx = x[ix]-x0
dx = np.repeat(dx[np.newaxis,:], gradx.shape[0], axis=0)
dx = np.repeat(dx[:,:,np.newaxis], gradx.shape[3], axis=2)
dy = y[iy]-y0
dy = np.repeat(dy[np.newaxis,:], grady.shape[0], axis=0)
dy = np.repeat(dy[:,:,np.newaxis], grady.shape[3], axis=2)
# interpolate using gradients, distances
z0 = z[:,ix,iy,:] - np.multiply(gradx[:,ix,iy,:],dx) - np.multiply(grady[:,ix,iy,:],dy)
return z0
# set up 1-D interpolation, if needed
if ( np.any( pf != -1) or np.any( tf != -1) ): # fixed p or t case
if ( np.any( pf != -1) and np.any( tf == -1) ): # fixed p case
p0 = np.zeros(len(temp))
p0[:] = pf
sigma = np.power(10,sigma_interp_2D(np.log10(p0),1/temp))
sigma_interp_1D = interpolate.interp1d(1/temp,np.log10(sigma),axis=1,fill_value="extrapolate")
sigma_interp = sigma_interp_1D
elif ( np.any( pf == -1) and np.any( tf != -1) ): # fixed t case
t0 = np.zeros(len(press))
t0[:] = tf
sigma = np.power(10,sigma_interp_2D(np.log10(press),1/t0))
sigma_interp_1D = interpolate.interp1d(np.log10(press),np.log10(sigma),axis=1,fill_value="extrapolate")
sigma_interp = sigma_interp_1D
else: # fixed p and t case
p0 = np.zeros(len(temp))
p0[:] = pf
sigma = np.power(10,sigma_interp_2D(np.log10(p0),1/temp))
sigma_interp_1D = interpolate.interp1d(1/temp,np.log10(sigma),axis=1,fill_value="extrapolate")
sigma0 = np.power(10,sigma_interp_1D(1/tf))
def sigma_interp_0D():
return np.log10(sigma0)
sigma_interp = sigma_interp_0D
else: # general variable p and t case
sigma_interp = sigma_interp_2D
# read in and down-sample collision-induced absorbers
tempcia,kcia,ncia,ciaid = cia_read(species_c,lam_hr,opdir)
cia_interp = interpolate.interp1d(1/tempcia,kcia,axis=1,fill_value="extrapolate")
return sigma_interp,cia_interp,ncia,ciaid,kern
#
#
# initialize quantities for disk integration
#
# inputs:
#
# src - model type flag
# ntg - number of Tchebyshev and Gauss integration points
#
# outputs:
#
# threeD - if src is phas -> Gauss and Tchebyshev points and weights
#
def init_3d(src,ntg):
threeD = -1
# if doing 3-D model
if (src == 'phas'):
# set Gauss / Tchebyshev points and weights
thetaG, wG = np.polynomial.legendre.leggauss(ntg)
thetaT, wT = tchebyshev_pts(ntg)
# take advantage of symmetry about illumination equator
thetaT = thetaT[0:math.ceil(ntg/2)] # only need 1/2 Tchebyshev points
wT = wT[0:math.ceil(ntg/2)] # also only need 1/2 weights
if (ntg % 2 != 0):
wT[-1] = 0.5*wT[-1] # if n is odd, must halve equatorial weight
threeD = thetaG,thetaT,wG,wT
return threeD
#
#
# set weights (kernel) for spectral convolution
#
# inputs:
#
# x - low-resolution spectral grid
# x_hr - high-resolution spectral grid (same units as x)
#
# outputs:
#
# kern - array describing wavelength-dependent kernels for
# convolution (len(x) x len(x_hr))
#
# options:
#
# Dx - widths of low-resolution gridpoints (len(x))
# mode - vector (len(x)) of integers indicating if
# x_i is a spectroscopic point (1) or photometric
# point. if not set, assumes all are spectroscopic
# and applies gaussian lineshape.
#
# notes:
#
# designed to pair with kernel_convol function. heavily modified
# and sped-up from a version originated by Mike Line.
#
def kernel(x,x_hr,Dx = -1,mode = -1):
# number of points in lo-res grid
Nx= len(x)
# compute widths if not provided
if ( np.any( Dx == -1) ):
dx = np.zeros(Nx)
xm = 0.5*(x[1:] + x[:-1])
dx1 = xm[1:] - xm[:-1]
dx[1:-1] = dx1[:]
res_interp = interpolate.interp1d(x[1:-1],x[1:-1]/dx1,fill_value="extrapolate")
dx[0] = x[0]/res_interp(x[0])
dx[Nx-1] = x[Nx-1]/res_interp(x[Nx-1])
else:
dx = np.zeros(Nx)
dx[:] = Dx
# initialize output array
kern = np.zeros([Nx,len(x_hr)])
# loop over lo-res grid and compute convolution kernel
fac = (2*(2*np.log(2))**0.5) # ~= 2.355
# case where mode is not specified
if ( np.any( mode == -1) ):
for i in range(Nx):
# FWHM = 2.355 * standard deviation of a gaussian
sigma=dx[i]/fac
# kernel
kern[i,:]=np.exp(-(x_hr[:]-x[i])**2/(2*sigma**2))
kern[i,:]=kern[i,:]/np.sum(kern[i,:])
# case where mode is specified
else:
for i in range(Nx):
if (mode[i] == 1): # spectroscopic point
# FWHM = 2.355 * standard deviation of a gaussian
sigma=dx[i]/fac
# kernel
kern[i,:] = np.exp(-(x_hr[:]-x[i])**2/(2*sigma**2))
sumk = np.sum(kern[i,:])
if (sumk != 0):
kern[i,:] = kern[i,:]/np.sum(kern[i,:])
else:
kern[i,:] = 0
elif (mode[i] == 0): # photometric point
j = np.squeeze(np.where(np.logical_and(x_hr >= x[i]-Dx[i]/2, x_hr <= x[i]+Dx[i]/2)))
if ( len(j) > 0 ):
kern[i,j] = 1
# edge handling
jmin = j[0]
jmax = j[-1]
if (jmin == 0):
Dxmin = abs(x_hr[jmin+1]-x_hr[jmin])
else:
Dxmin = abs( 0.5*(x_hr[jmin]+x_hr[jmin+1]) - 0.5*(x_hr[jmin]+x_hr[jmin-1]) )
if (jmax == len(x_hr)-1):
Dxmax = abs(x_hr[jmax]-x_hr[jmax-1])
else:
Dxmax = abs( 0.5*(x_hr[jmax]+x_hr[jmax+1]) - 0.5*(x_hr[jmax]+x_hr[jmax-1]) )
xb = (x[i]-Dx[i]/2) - (x_hr[jmin]-Dxmin/2)
xa = (x_hr[jmax]-Dxmax/2) - (x[i]+Dx[i]/2)
if (xb >= 0):
fb = 1 - xb/Dxmin
else:
fb = 1
if (xa >= 0):
fa = 1 - xa/Dxmax
else:
fa = 1
kern[i,jmin] = fb
kern[i,jmax] = fa
kern[i,:] = kern[i,:]/np.sum(kern[i,:]) #re-normalize
return kern
#
#
# convolve spectrum with general kernel
#
# inputs:
#
# kern - kernel matrix from kernel (len(low-res) x len(high-res))
# spec_hr - hi-res spectrum (len(hi-res))
#
# outputs:
#
# spec_lr - degraded spectrum (len(low-res))
#
def kernel_convol(kern,spec_hr):
spec_lr = np.zeros(kern.shape[0])
conv = np.multiply(kern,spec_hr)
spec_lr[:] = np.sum(conv,axis=1)
return spec_lr
#
#
# simple wavelength-dependent noise model
#
# inputs:
#
# lam0 - wavelength where snr0 is normalized to (um)
# snr0 - signal-to-noise at lam0
# lam - wavelength (um)
# dlam - wavelength bin width (um)
# FpFs - planet-to-star flux ratio
# Ts - host star effective temperature
# ntype - noise type, options are:
# 'csnr' = constant snr
# 'cnse' = constant noise
# 'cerr' = constant error in FpFs
# 'plan' = noise dominated by planetary counts
# 'ezod' = noise dominated by exozodiacal light
# 'detr' = noise dominated by detector
# 'leak' = noise dominated by stellar leakage
#
# outputs:
#
# err - 1-sigma uncertainty in planet-to-star flux ratio
#
# notes:
#
# assumes transmission, quantum efficiency, raw contrast are all grey.
#
def noise(lam0,snr0,lam,dlam,FpFs,Ts,ntype):
# scalings based on dominant noise type
if (ntype == 'csnr'):
snr = np.zeros(lam.shape[0])
snr[:] = snr0
err = FpFs/snr
elif (ntype == 'cnse'):
FpFs_interp = interpolate.interp1d(lam,FpFs)
res_interp = interpolate.interp1d(lam,lam/dlam)
snr = snr0*(FpFs/FpFs_interp(lam0))*((lam/lam0)**2)*(res_interp(lam0)/(lam/dlam))*(planck(lam,Ts)/planck(lam0,Ts))
err = FpFs/snr
elif (ntype == 'cerr'):
FpFs_interp = interpolate.interp1d(lam,FpFs)
err = np.zeros(lam.shape[0])
err[:] = FpFs_interp(lam0)/snr0
elif (ntype == 'plan'):
FpFs_interp = interpolate.interp1d(lam,FpFs)
res_interp = interpolate.interp1d(lam,lam/dlam)
snr = snr0*((FpFs/FpFs_interp(lam0))**0.5)*(lam/lam0)*((res_interp(lam0)/(lam/dlam))**0.5)*((planck(lam,Ts)/planck(lam0,Ts))**0.5)
err = FpFs/snr
elif (ntype == 'ezod'):
FpFs_interp = interpolate.interp1d(lam,FpFs)
res_interp = interpolate.interp1d(lam,lam/dlam)
snr = snr0*(FpFs/FpFs_interp(lam0))*((res_interp(lam0)/(lam/dlam))**0.5)*((planck(lam,Ts)/planck(lam0,Ts))**0.5)
err = FpFs/snr
elif (ntype == 'detr'):
FpFs_interp = interpolate.interp1d(lam,FpFs)
res_interp = interpolate.interp1d(lam,lam/dlam)
snr = snr0*(FpFs/FpFs_interp(lam0))*(lam/lam0)*(res_interp(lam0)/(lam/dlam))*(planck(lam,Ts)/planck(lam0,Ts))
err = FpFs/snr
elif (ntype == 'leak'):
FpFs_interp = interpolate.interp1d(lam,FpFs)
res_interp = interpolate.interp1d(lam,lam/dlam)
snr = snr0*(FpFs/FpFs_interp(lam0))*(lam/lam0)*((res_interp(lam0)/(lam/dlam))**0.5)*((planck(lam,Ts)/planck(lam0,Ts))**0.5)
err = FpFs/snr
return err
#
#
# planck function
#
# inputs:
#
# lam - wavelength (um)
# T - temperature (K)
#
# outputs:
#
# Blam - planck intensity (W m**-2 um**-1 sr**-1)
#
def planck(lam,T):
# constants
kB = 1.38064852e-23 # m**2 kg s**-2 K**-1
h = 6.62607015e-34 # kg m**2 s**-1
c = 2.99792458e8 # m s**-1
# convert to m
lam0 = lam/1.e6
# planck function in W m**-2 m**-1 sr**-1
Blam = 2*h*c**2/lam0**5/(np.exp(h*c/kB/T/lam0) - 1)
return Blam/1.e6
#
#
# planck function, w/matrix ops for all lam / T combos
#
# inputs:
#
# lam - wavelength (um)
# T - temperature (K)
#
# outputs:
#
# Blam - planck intensity (W m**-2 um**-1 sr**-1) [Nlam,Ntemp]
#
def planck2D(lam,T):
# number of elements
Nlam = lam.shape[0]
Ntemp = T.shape[0]
# constants
kB = 1.38064852e-23 # m**2 kg s**-2 K**-1
h = 6.62607015e-34 # kg m**2 s**-1
c = 2.99792458e8 # m s**-1
# combined constants
a = np.zeros([Nlam,Ntemp])
b = np.zeros([Nlam,Ntemp])
a[:,:] = 2*h*c**2 # m**4 kg s**-3
b[:,:] = h*c/kB # m K
# identity matrix
id = np.zeros([Nlam,Ntemp])
id[:,:] = 1.
# 2D matrices
lam0 = np.repeat(lam[:,np.newaxis], Ntemp, axis=1)/1.e6 # m
T0 = np.repeat(T[np.newaxis,:], Nlam, axis=0)
# planck function in W m**-2 m**-1 sr**-1
Blam = np.divide(np.divide(a,np.power(lam0,5)),(np.exp(np.divide(b,np.multiply(T0,lam0)))-id))
return Blam/1.e6
#
#
# brightness temperature
#
# inputs:
#
# lam - wavelength (um)
# Flam - specific flux density (W m**-2 um**-1)
#
# outputs:
#
# Tb - brightness temperature (K)
#
def Tbright(lam,Flam):
# constants
kB = 1.38064852e-23 # m**2 kg s**-2 K**-1
h = 6.62607015e-34 # kg m**2 s**-1
c = 2.99792458e8 # m s**-1
# conversions to mks
lam0 = lam/1.e6
Ilam0 = Flam*1.e6/np.pi
# brightness temperature
Tb = h*c/kB/lam0/np.log(1 + 2*h*c**2/Ilam0/lam0**5)
return Tb
#
#
# compute transit depth spectrum using Robinson (2017) formalism
#
# inputs:
#
# Rp - planetary radius (Rearth)
# Rs - stellar radius (Rsun)
# z - vertical altitude grid (m)
# dtau - vertical differential optical depths [Nlam x Nlay]
# p - pressure grid
# ref - include refractive floor? (T/F)
# pref - pressure location of refractive floor
#
#
# outputs:
#
# td - transit depth spectrum
#
def transit_depth(Rp,Rs,z,dtau,p,ref,pref=-1):
Nlev = dtau.shape[1] + 1 # number of levels
Nlam = dtau.shape[0] # number of wavelengths
Re = 6.378e6 # earth radius (m)
Rsun = 6.957e8 # solar radius (m)
Nlay = Nlev-1
# grid of impact parameters
b = Rp*Re + z
bm = 0.5*(b[1:] + b[:-1])
# geometric path distribution
bs = b*b
bms = bm*bm
dn = bms[None,:] - bs[0:Nlay,None]
Pb = np.divide(4*np.power(np.repeat(bm[:,np.newaxis],Nlay,axis=1),2),dn)
izero = np.where(dn < 0)
Pb[izero] = 0
Pb = np.power(Pb,0.5)
# integrate along slant paths, compute transmission
tau = np.transpose(np.dot(Pb,np.transpose(dtau)))
t = np.exp(-tau)
# refractive floor
if ref:
iref = np.where(p[0:Nlay] >= pref)
t[:,iref] = 0.
# integrate over annuli
A = b[:-1]**2 - b[1:]**2 # annulus area
td = np.dot(1-t,A) + (Rp*Re)**2
td = td/(Rs*Rsun)**2
return td
#
# refractive floor location from analytic expression in robinson et al. (2017)
#
def refract_floor(nu,t,Rs,a,Rp,m,grav):
Re = 6.378e6 # earth radius (m)
Rjup = 6.991e7 # ju[iter radius (m)
Na = 6.0221408e23 # avogradro's number
return 23e2*(1.23e-4/nu)*(np.mean(t)/130)**1.5*(Rs)*(5.2/a)*(Rjup/Rp/Re)**0.5*(2.2/Na/np.mean(m)/1e3)**0.5*(24.8/np.mean(grav))**0.5
#
#
# spectral grid routine, general
#
# inputs:
#
# x_min - minimum spectral cutoff
# x_max - maximumum spectral cutoff
# dx - if set, adopts fixed spacing of width dx
# res - if set, uses fixed or spectrally varying resolving power
# lamr - if set, uses spectrally varying resolving power and
# lamr must have same size as res
#
# outputs:
#
# x - center of spectral gridpoints
# Dx - spectral element width
#
# notes:
#
# in case of spectrally-varying resolving power, we use the
# derivative of the resolving power at x_i to find the resolving
# power at x_i+1. this sets up a quadratic equation that relates
# the current spectral element to the next.
#
def spectral_grid(x_min,x_max,res = -1,dx = -1,lamr = -1):
# constant resolution case
if ( np.any(dx != -1) ):
x = np.arange(x_min,x_max,dx)
if (max(x) + dx == x_max):
x = np.concatenate((x,[x_max]))
Dx = np.zeros(len(x))
Dx[:] = dx
# scenarios with constant or non-constant resolving power
if ( np.any( res != -1) ):
if ( np.any( lamr == -1) ): # constant resolving power
x,Dx = spectral_grid_fixed_res(x_min,x_max,res)
else: #spectrally-varying resolving power
# function for interpolating spectral resolution
res_interp = interpolate.interp1d(lamr,res,fill_value="extrapolate")
# numerical derivative and interpolation function
drdx = np.gradient(res,lamr)
drdx_interp = interpolate.interp1d(lamr,drdx,fill_value="extrapolate")
# initialize
x = [x_min]
Dx = [x_min/res_interp(x_min)]
i = 0
# loop until x_max is reached
while (x[i] < x_max):
resi = res_interp(x[i])
resp = drdx_interp(x[i])
a = 2*resp
b = 2*resi - 1 - 4*x[i]*resp - resp/resi*x[i]
c = 2*resp*x[i]**2 + resp/resi*x[i]**2 - 2*x[i]*resi - x[i]
if (a != 0 and resp*x[i]/resi > 1.e-6 ):
xi1 = (-b + np.sqrt(b*b - 4*a*c))/2/a
else:
xi1 = (1+2*resi)/(2*resi-1)*x[i]
Dxi = 2*(xi1 - x[i] - xi1/2/res_interp(xi1))
x = np.concatenate((x,[xi1]))
i = i+1
if (max(x) > x_max):
x = x[0:-1]
Dx = x/res_interp(x)
return np.squeeze(x),np.squeeze(Dx)
#
#
# spectral grid routine, fixed resolving power
#
# inputs:
#
# res - spectral resolving power (x/dx)
# x_min - minimum spectral cutoff
# x_max - maximumum spectral cutoff
#
# outputs:
#
# x - center of spectral gridpoints
# Dx - spectral element width
#
def spectral_grid_fixed_res(x_min,x_max,res):
#
x = [x_min]
fac = (1 + 2*res)/(2*res - 1)
i = 0
while (x[i]*fac < x_max):
x = np.concatenate((x,[x[i]*fac]))
i = i + 1
Dx = x/res
#
return np.squeeze(x),np.squeeze(Dx)
#
#
# tchebyshev points and weights (Webber et al., 2015)
#
# inputs:
#
# n - number of tchebyshev points
#
# outputs:
#
# x - points
# w - weights
#
def tchebyshev_pts(n):
i = np.arange(1,n+1,step=1)
x = np.cos(np.pi*i/(n+1))
w = np.pi/(n+1)*(np.sin(np.pi*i/(n+1)))**2
return x,w
#
#
# rayleigh phase function (normalized so that integral over dcosTh from -1 to 1 is 2)
#
# inputs:
#
# cosTh - cosine of scattering angle
#
# outputs:
#
# phase function at cosTh
#
def pray(cosTh):
return 3/4*(1+cosTh**2)
#
#
# henyey-greenstein phase function (normalized so that integral over dcosTh from -1 to 1 is 2)
#
# inputs:
#
# g - asymmetry parameter
# cosTh - cosine of scattering angle
#
# outputs:
#
# phase function at cosTh
#
def pHG(g,cosTh):
id = np.copy(g)
id[:] = 1.
return np.divide((id-np.multiply(g,g)),np.power(id+np.multiply(g,g)-2*cosTh*g,1.5))
#
#
# henyey-greenstein phase function integrated from y to +1
#
# inputs:
#
# g - asymmetry parameter
# cosTh - cosine of scattering angle
#
# outputs:
#
# integral of HG phase function from cosTh to +1
#
def pHG_int(g,cosTh):
id = np.copy(g)
id[:] = 1.
soln = np.copy(g)
soln[:] = 0.
iz = np.where(g == 0)
if iz[0].size !=0:
soln[iz] = 1-cosTh
iz = np.where(g != 0)
if iz[0].size !=0:
soln[iz] = np.divide((id[iz]-np.multiply(g[iz],g[iz])),np.multiply(g[iz],np.power(id[iz]+np.multiply(g[iz],g[iz])-2*g[iz],0.5)))
soln[iz] = soln[iz] - np.divide((id[iz]-np.multiply(g[iz],g[iz])),np.multiply(g[iz],np.power(id[iz]+np.multiply(g[iz],g[iz])-2*cosTh*g[iz],0.5)))
return soln
#
#
# three-moment expansion of HG phase function (normalized so that integral from -1 to 1 is 2)
#
# inputs:
#
# g - asymmetry parameter
# cosTh - cosine of scattering angle
#
# outputs:
#
# phase function at cosTh
#
def pHG3(g,cosTh):
id = np.copy(g)
id[:,:] = 1.
return id + 3*g*cosTh + 5/2*np.multiply(g,g)*(3*cosTh**2 - 1)
#
#
# three-moment expansion of HG phase function integrated from y to +1
#
# inputs:
#
# g - asymmetry parameter
# cosTh - cosine of scattering angle
#
# outputs:
#
# integral of three-moment HG phase function from cosTh to +1
#
def pHG3_int(g,cosTh):
id = np.copy(g)
id[:] = 1.
soln = -0.5*(cosTh-1)*(5*np.multiply(g,g)*cosTh*(cosTh+1) + 3*g*(cosTh+1) + 2*id)
return soln
|
hablabxREPO_NAMErfastPATH_START.@rfast_extracted@rfast-main@rfast_routines.py@.PATH_END.py
|
{
"filename": "plot_floodfill.py",
"repo_name": "scikit-image/scikit-image",
"repo_path": "scikit-image_extracted/scikit-image-main/doc/examples/segmentation/plot_floodfill.py",
"type": "Python"
}
|
"""
==========
Flood Fill
==========
Flood fill is an algorithm to identify and/or change adjacent values in an
image based on their similarity to an initial seed point [1]_. The conceptual
analogy is the 'paint bucket' tool in many graphic editors.
.. [1] https://en.wikipedia.org/wiki/Flood_fill
Basic example
-------------
First, a basic example where we will change a checkerboard square from white
to mid-gray.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data, filters, color, morphology
from skimage.segmentation import flood, flood_fill
checkers = data.checkerboard()
# Fill a square near the middle with value 127, starting at index (76, 76)
filled_checkers = flood_fill(checkers, (76, 76), 127)
fig, ax = plt.subplots(ncols=2, figsize=(10, 5))
ax[0].imshow(checkers, cmap=plt.cm.gray)
ax[0].set_title('Original')
ax[1].imshow(filled_checkers, cmap=plt.cm.gray)
ax[1].plot(76, 76, 'wo') # seed point
ax[1].set_title('After flood fill')
plt.show()
##############################################################################
# Advanced example
# ----------------
#
# Because standard flood filling requires the neighbors to be strictly equal,
# its use is limited on real-world images with color gradients and noise.
# The `tolerance` keyword argument widens the permitted range about the initial
# value, allowing use on real-world images.
#
# Here we will experiment a bit on the cameraman. First, turning his coat from
# dark to light.
cameraman = data.camera()
# Change the cameraman's coat from dark to light (255). The seed point is
# chosen as (155, 150)
light_coat = flood_fill(cameraman, (155, 150), 255, tolerance=10)
fig, ax = plt.subplots(ncols=2, figsize=(10, 5))
ax[0].imshow(cameraman, cmap=plt.cm.gray)
ax[0].set_title('Original')
ax[0].axis('off')
ax[1].imshow(light_coat, cmap=plt.cm.gray)
ax[1].plot(150, 155, 'ro') # seed point
ax[1].set_title('After flood fill')
ax[1].axis('off')
plt.show()
##############################################################################
# The cameraman's coat is in varying shades of gray. Only the parts of the coat
# matching the shade near the seed value is changed.
#
# Experimentation with tolerance
# ------------------------------
#
# To get a better intuitive understanding of how the tolerance parameter
# works, here is a set of images progressively increasing the parameter with
# seed point in the upper left corner.
output = []
for i in range(8):
tol = 5 + 20 * i
output.append(flood_fill(cameraman, (0, 0), 255, tolerance=tol))
# Initialize plot and place original image
fig, ax = plt.subplots(nrows=3, ncols=3, figsize=(12, 12))
ax[0, 0].imshow(cameraman, cmap=plt.cm.gray)
ax[0, 0].set_title('Original')
ax[0, 0].axis('off')
# Plot all eight different tolerances for comparison.
for i in range(8):
m, n = np.unravel_index(i + 1, (3, 3))
ax[m, n].imshow(output[i], cmap=plt.cm.gray)
ax[m, n].set_title(f'Tolerance {5 + 20 * i}')
ax[m, n].axis('off')
ax[m, n].plot(0, 0, 'bo') # seed point
fig.tight_layout()
plt.show()
##############################################################################
# Flood as mask
# -------------
#
# A sister function, `flood`, is available which returns a mask identifying
# the flood rather than modifying the image itself. This is useful for
# segmentation purposes and more advanced analysis pipelines.
#
# Here we segment the nose of a cat. However, multi-channel images are not
# supported by flood[_fill]. Instead we Sobel filter the red channel to
# enhance edges, then flood the nose with a tolerance.
cat = data.chelsea()
cat_sobel = filters.sobel(cat[..., 0])
cat_nose = flood(cat_sobel, (240, 265), tolerance=0.03)
fig, ax = plt.subplots(nrows=3, figsize=(10, 20))
ax[0].imshow(cat)
ax[0].set_title('Original')
ax[0].axis('off')
ax[1].imshow(cat_sobel)
ax[1].set_title('Sobel filtered')
ax[1].axis('off')
ax[2].imshow(cat)
ax[2].imshow(cat_nose, cmap=plt.cm.gray, alpha=0.3)
ax[2].plot(265, 240, 'wo') # seed point
ax[2].set_title('Nose segmented with `flood`')
ax[2].axis('off')
fig.tight_layout()
plt.show()
##############################################################################
# Flood-fill in HSV space and mask post-processing
# ------------------------------------------------
#
# Since flood fill operates on single-channel images, we transform here the
# image to the HSV (Hue Saturation Value) space in order to flood pixels of
# similar hue.
#
# In this example we also show that it is possible to post-process the binary
# mask returned by :func:`skimage.segmentation.flood` thanks to the functions
# of :mod:`skimage.morphology`.
img = data.astronaut()
img_hsv = color.rgb2hsv(img)
img_hsv_copy = np.copy(img_hsv)
# flood function returns a mask of flooded pixels
mask = flood(img_hsv[..., 0], (313, 160), tolerance=0.016)
# Set pixels of mask to new value for hue channel
img_hsv[mask, 0] = 0.5
# Post-processing in order to improve the result
# Remove white pixels from flag, using saturation channel
mask_postprocessed = np.logical_and(mask, img_hsv_copy[..., 1] > 0.4)
# Remove thin structures with binary opening
mask_postprocessed = morphology.binary_opening(mask_postprocessed, np.ones((3, 3)))
# Fill small holes with binary closing
mask_postprocessed = morphology.binary_closing(mask_postprocessed, morphology.disk(20))
img_hsv_copy[mask_postprocessed, 0] = 0.5
fig, ax = plt.subplots(1, 2, figsize=(8, 4))
ax[0].imshow(color.hsv2rgb(img_hsv))
ax[0].axis('off')
ax[0].set_title('After flood fill')
ax[1].imshow(color.hsv2rgb(img_hsv_copy))
ax[1].axis('off')
ax[1].set_title('After flood fill and post-processing')
fig.tight_layout()
plt.show()
|
scikit-imageREPO_NAMEscikit-imagePATH_START.@scikit-image_extracted@scikit-image-main@doc@examples@segmentation@plot_floodfill.py@.PATH_END.py
|
{
"filename": "_xperiod0.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/ohlc/_xperiod0.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class Xperiod0Validator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="xperiod0", parent_name="ohlc", **kwargs):
super(Xperiod0Validator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@ohlc@_xperiod0.py@.PATH_END.py
|
{
"filename": "recon_bin.py",
"repo_name": "exoclime/HELIOS-K",
"repo_path": "HELIOS-K_extracted/HELIOS-K-master/tools/recon_bin.py",
"type": "Python"
}
|
import numpy as np
import argparse
import matplotlib.pylab as plt
from numpy.polynomial.chebyshev import chebval, chebfit
'''
This script reconstructs sampled cbin files, to check the sampling
Date: January 2020
Author: Simon Grimm
'''
#change here the bin size:
fig = plt.figure()
def main(M):
data = np.loadtxt('%s.dat' % M)
data_c = np.loadtxt('%s_cbin.dat' % M)
print('%s_cbin.dat' % M)
print('%s_r.dat' % M)
#data = np.loadtxt('1H2-16O__POKAZATEL_e0DAT/Out_00000_42000_01500_n800.dat')
#data_c = np.loadtxt('1H2-16O__POKAZATEL_cbin/Out_01500_n800_cbin.dat')
#data = np.loadtxt('12C2-1H2__HITRAN2016_e0DAT/Out_00000_10000_00100_n800.dat')
#data_c = np.loadtxt('12C2-1H2__HITRAN2016_cbin/Out_00100_n800_cbin.dat')
#data = np.loadtxt('14N-1H__Yueqi_e0DAT/Out_00000_17000_01500_n800.dat')
#data_c = np.loadtxt('14N-1H__Yueqi_cbin/Out_01500_n800_cbin.dat')
kStart = data[0,0]
Nbins = len(data_c)
binSize0 = int(len(data) / Nbins)
binSize = int(binSize0 * 1.0) #here the Chebyshev output can be reduced by a factor e.g. 0.1
print("number of nu points:",len(data), Nbins, binSize, kStart)
for binIndex in range(0, Nbins):
print(binIndex)
k = data[(binIndex * binSize0):(binIndex + 1) * binSize0, 1]
x0 = data[(binIndex * binSize0):(binIndex + 1) * binSize0, 0]
ks = np.sort(k)
x = np.linspace(0, 1.0, num=binSize, endpoint=True)
#extract Chebyshev coefficients
c = data_c[binIndex,2:]
#extract starting point in x of opacity function
xs = data_c[binIndex,1]
#print(x)
print(c)
print(c[0], xs)
#rescale x to the standard Chebychev polynomial range [-1:1]
x1 = x * 2.0 - 1.0
k_res = chebval(x1,c,tensor=False)
x2 = x * (1.0 - xs) + xs
k_res = np.exp(k_res)
plt.plot(x0, k, c="blue", label='Full')
plt.plot(x0, ks, c="red", label='Sorted')
plt.plot(x2*(binSize0 - 1) + binIndex * binSize0 + kStart, k_res, c="green", label='Cheb Poly')
if(binIndex == 0):
plt.legend()
plt.xlabel(r'$\nu$')
plt.ylabel(r'k($\nu$)')
plt.yscale('log')
#plt.xscale('log')
plt.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-M', '--M', type=str,
help='Species', default = '')
args = parser.parse_args()
M = args.M
if(M == ''):
print("Error, no species specified, run python exomol.py -M <id>")
main(M)
|
exoclimeREPO_NAMEHELIOS-KPATH_START.@HELIOS-K_extracted@HELIOS-K-master@tools@recon_bin.py@.PATH_END.py
|
{
"filename": "test_bar.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/io/formats/style/test_bar.py",
"type": "Python"
}
|
import io
import numpy as np
import pytest
from pandas import (
NA,
DataFrame,
read_csv,
)
pytest.importorskip("jinja2")
def bar_grad(a=None, b=None, c=None, d=None):
"""Used in multiple tests to simplify formatting of expected result"""
ret = [("width", "10em")]
if all(x is None for x in [a, b, c, d]):
return ret
return ret + [
(
"background",
f"linear-gradient(90deg,{','.join([x for x in [a, b, c, d] if x])})",
)
]
def no_bar():
return bar_grad()
def bar_to(x, color="#d65f5f"):
return bar_grad(f" {color} {x:.1f}%", f" transparent {x:.1f}%")
def bar_from_to(x, y, color="#d65f5f"):
return bar_grad(
f" transparent {x:.1f}%",
f" {color} {x:.1f}%",
f" {color} {y:.1f}%",
f" transparent {y:.1f}%",
)
@pytest.fixture
def df_pos():
return DataFrame([[1], [2], [3]])
@pytest.fixture
def df_neg():
return DataFrame([[-1], [-2], [-3]])
@pytest.fixture
def df_mix():
return DataFrame([[-3], [1], [2]])
@pytest.mark.parametrize(
"align, exp",
[
("left", [no_bar(), bar_to(50), bar_to(100)]),
("right", [bar_to(100), bar_from_to(50, 100), no_bar()]),
("mid", [bar_to(33.33), bar_to(66.66), bar_to(100)]),
("zero", [bar_from_to(50, 66.7), bar_from_to(50, 83.3), bar_from_to(50, 100)]),
("mean", [bar_to(50), no_bar(), bar_from_to(50, 100)]),
(2.0, [bar_to(50), no_bar(), bar_from_to(50, 100)]),
(np.median, [bar_to(50), no_bar(), bar_from_to(50, 100)]),
],
)
def test_align_positive_cases(df_pos, align, exp):
# test different align cases for all positive values
result = df_pos.style.bar(align=align)._compute().ctx
expected = {(0, 0): exp[0], (1, 0): exp[1], (2, 0): exp[2]}
assert result == expected
@pytest.mark.parametrize(
"align, exp",
[
("left", [bar_to(100), bar_to(50), no_bar()]),
("right", [no_bar(), bar_from_to(50, 100), bar_to(100)]),
("mid", [bar_from_to(66.66, 100), bar_from_to(33.33, 100), bar_to(100)]),
("zero", [bar_from_to(33.33, 50), bar_from_to(16.66, 50), bar_to(50)]),
("mean", [bar_from_to(50, 100), no_bar(), bar_to(50)]),
(-2.0, [bar_from_to(50, 100), no_bar(), bar_to(50)]),
(np.median, [bar_from_to(50, 100), no_bar(), bar_to(50)]),
],
)
def test_align_negative_cases(df_neg, align, exp):
# test different align cases for all negative values
result = df_neg.style.bar(align=align)._compute().ctx
expected = {(0, 0): exp[0], (1, 0): exp[1], (2, 0): exp[2]}
assert result == expected
@pytest.mark.parametrize(
"align, exp",
[
("left", [no_bar(), bar_to(80), bar_to(100)]),
("right", [bar_to(100), bar_from_to(80, 100), no_bar()]),
("mid", [bar_to(60), bar_from_to(60, 80), bar_from_to(60, 100)]),
("zero", [bar_to(50), bar_from_to(50, 66.66), bar_from_to(50, 83.33)]),
("mean", [bar_to(50), bar_from_to(50, 66.66), bar_from_to(50, 83.33)]),
(-0.0, [bar_to(50), bar_from_to(50, 66.66), bar_from_to(50, 83.33)]),
(np.nanmedian, [bar_to(50), no_bar(), bar_from_to(50, 62.5)]),
],
)
@pytest.mark.parametrize("nans", [True, False])
def test_align_mixed_cases(df_mix, align, exp, nans):
# test different align cases for mixed positive and negative values
# also test no impact of NaNs and no_bar
expected = {(0, 0): exp[0], (1, 0): exp[1], (2, 0): exp[2]}
if nans:
df_mix.loc[3, :] = np.nan
expected.update({(3, 0): no_bar()})
result = df_mix.style.bar(align=align)._compute().ctx
assert result == expected
@pytest.mark.parametrize(
"align, exp",
[
(
"left",
{
"index": [[no_bar(), no_bar()], [bar_to(100), bar_to(100)]],
"columns": [[no_bar(), bar_to(100)], [no_bar(), bar_to(100)]],
"none": [[no_bar(), bar_to(33.33)], [bar_to(66.66), bar_to(100)]],
},
),
(
"mid",
{
"index": [[bar_to(33.33), bar_to(50)], [bar_to(100), bar_to(100)]],
"columns": [[bar_to(50), bar_to(100)], [bar_to(75), bar_to(100)]],
"none": [[bar_to(25), bar_to(50)], [bar_to(75), bar_to(100)]],
},
),
(
"zero",
{
"index": [
[bar_from_to(50, 66.66), bar_from_to(50, 75)],
[bar_from_to(50, 100), bar_from_to(50, 100)],
],
"columns": [
[bar_from_to(50, 75), bar_from_to(50, 100)],
[bar_from_to(50, 87.5), bar_from_to(50, 100)],
],
"none": [
[bar_from_to(50, 62.5), bar_from_to(50, 75)],
[bar_from_to(50, 87.5), bar_from_to(50, 100)],
],
},
),
(
2,
{
"index": [
[bar_to(50), no_bar()],
[bar_from_to(50, 100), bar_from_to(50, 100)],
],
"columns": [
[bar_to(50), no_bar()],
[bar_from_to(50, 75), bar_from_to(50, 100)],
],
"none": [
[bar_from_to(25, 50), no_bar()],
[bar_from_to(50, 75), bar_from_to(50, 100)],
],
},
),
],
)
@pytest.mark.parametrize("axis", ["index", "columns", "none"])
def test_align_axis(align, exp, axis):
# test all axis combinations with positive values and different aligns
data = DataFrame([[1, 2], [3, 4]])
result = (
data.style.bar(align=align, axis=None if axis == "none" else axis)
._compute()
.ctx
)
expected = {
(0, 0): exp[axis][0][0],
(0, 1): exp[axis][0][1],
(1, 0): exp[axis][1][0],
(1, 1): exp[axis][1][1],
}
assert result == expected
@pytest.mark.parametrize(
"values, vmin, vmax",
[
("positive", 1.5, 2.5),
("negative", -2.5, -1.5),
("mixed", -2.5, 1.5),
],
)
@pytest.mark.parametrize("nullify", [None, "vmin", "vmax"]) # test min/max separately
@pytest.mark.parametrize("align", ["left", "right", "zero", "mid"])
def test_vmin_vmax_clipping(df_pos, df_neg, df_mix, values, vmin, vmax, nullify, align):
# test that clipping occurs if any vmin > data_values or vmax < data_values
if align == "mid": # mid acts as left or right in each case
if values == "positive":
align = "left"
elif values == "negative":
align = "right"
df = {"positive": df_pos, "negative": df_neg, "mixed": df_mix}[values]
vmin = None if nullify == "vmin" else vmin
vmax = None if nullify == "vmax" else vmax
clip_df = df.where(df <= (vmax if vmax else 999), other=vmax)
clip_df = clip_df.where(clip_df >= (vmin if vmin else -999), other=vmin)
result = (
df.style.bar(align=align, vmin=vmin, vmax=vmax, color=["red", "green"])
._compute()
.ctx
)
expected = clip_df.style.bar(align=align, color=["red", "green"])._compute().ctx
assert result == expected
@pytest.mark.parametrize(
"values, vmin, vmax",
[
("positive", 0.5, 4.5),
("negative", -4.5, -0.5),
("mixed", -4.5, 4.5),
],
)
@pytest.mark.parametrize("nullify", [None, "vmin", "vmax"]) # test min/max separately
@pytest.mark.parametrize("align", ["left", "right", "zero", "mid"])
def test_vmin_vmax_widening(df_pos, df_neg, df_mix, values, vmin, vmax, nullify, align):
# test that widening occurs if any vmax > data_values or vmin < data_values
if align == "mid": # mid acts as left or right in each case
if values == "positive":
align = "left"
elif values == "negative":
align = "right"
df = {"positive": df_pos, "negative": df_neg, "mixed": df_mix}[values]
vmin = None if nullify == "vmin" else vmin
vmax = None if nullify == "vmax" else vmax
expand_df = df.copy()
expand_df.loc[3, :], expand_df.loc[4, :] = vmin, vmax
result = (
df.style.bar(align=align, vmin=vmin, vmax=vmax, color=["red", "green"])
._compute()
.ctx
)
expected = expand_df.style.bar(align=align, color=["red", "green"])._compute().ctx
assert result.items() <= expected.items()
def test_numerics():
# test data is pre-selected for numeric values
data = DataFrame([[1, "a"], [2, "b"]])
result = data.style.bar()._compute().ctx
assert (0, 1) not in result
assert (1, 1) not in result
@pytest.mark.parametrize(
"align, exp",
[
("left", [no_bar(), bar_to(100, "green")]),
("right", [bar_to(100, "red"), no_bar()]),
("mid", [bar_to(25, "red"), bar_from_to(25, 100, "green")]),
("zero", [bar_from_to(33.33, 50, "red"), bar_from_to(50, 100, "green")]),
],
)
def test_colors_mixed(align, exp):
data = DataFrame([[-1], [3]])
result = data.style.bar(align=align, color=["red", "green"])._compute().ctx
assert result == {(0, 0): exp[0], (1, 0): exp[1]}
def test_bar_align_height():
# test when keyword height is used 'no-repeat center' and 'background-size' present
data = DataFrame([[1], [2]])
result = data.style.bar(align="left", height=50)._compute().ctx
bg_s = "linear-gradient(90deg, #d65f5f 100.0%, transparent 100.0%) no-repeat center"
expected = {
(0, 0): [("width", "10em")],
(1, 0): [
("width", "10em"),
("background", bg_s),
("background-size", "100% 50.0%"),
],
}
assert result == expected
def test_bar_value_error_raises():
df = DataFrame({"A": [-100, -60, -30, -20]})
msg = "`align` should be in {'left', 'right', 'mid', 'mean', 'zero'} or"
with pytest.raises(ValueError, match=msg):
df.style.bar(align="poorly", color=["#d65f5f", "#5fba7d"]).to_html()
msg = r"`width` must be a value in \[0, 100\]"
with pytest.raises(ValueError, match=msg):
df.style.bar(width=200).to_html()
msg = r"`height` must be a value in \[0, 100\]"
with pytest.raises(ValueError, match=msg):
df.style.bar(height=200).to_html()
def test_bar_color_and_cmap_error_raises():
df = DataFrame({"A": [1, 2, 3, 4]})
msg = "`color` and `cmap` cannot both be given"
# Test that providing both color and cmap raises a ValueError
with pytest.raises(ValueError, match=msg):
df.style.bar(color="#d65f5f", cmap="viridis").to_html()
def test_bar_invalid_color_type_error_raises():
df = DataFrame({"A": [1, 2, 3, 4]})
msg = (
r"`color` must be string or list or tuple of 2 strings,"
r"\(eg: color=\['#d65f5f', '#5fba7d'\]\)"
)
# Test that providing an invalid color type raises a ValueError
with pytest.raises(ValueError, match=msg):
df.style.bar(color=123).to_html()
# Test that providing a color list with more than two elements raises a ValueError
with pytest.raises(ValueError, match=msg):
df.style.bar(color=["#d65f5f", "#5fba7d", "#abcdef"]).to_html()
def test_styler_bar_with_NA_values():
df1 = DataFrame({"A": [1, 2, NA, 4]})
df2 = DataFrame([[NA, NA], [NA, NA]])
expected_substring = "style type="
html_output1 = df1.style.bar(subset="A").to_html()
html_output2 = df2.style.bar(align="left", axis=None).to_html()
assert expected_substring in html_output1
assert expected_substring in html_output2
def test_style_bar_with_pyarrow_NA_values():
pytest.importorskip("pyarrow")
data = """name,age,test1,test2,teacher
Adam,15,95.0,80,Ashby
Bob,16,81.0,82,Ashby
Dave,16,89.0,84,Jones
Fred,15,,88,Jones"""
df = read_csv(io.StringIO(data), dtype_backend="pyarrow")
expected_substring = "style type="
html_output = df.style.bar(subset="test1").to_html()
assert expected_substring in html_output
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@io@formats@style@test_bar.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/scipy/sparse/linalg/isolve/__init__.py",
"type": "Python"
}
|
"Iterative Solvers for Sparse Linear Systems"
from __future__ import division, print_function, absolute_import
#from info import __doc__
from .iterative import *
from .minres import minres
from .lgmres import lgmres
from .lsqr import lsqr
from .lsmr import lsmr
from ._gcrotmk import gcrotmk
__all__ = [s for s in dir() if not s.startswith('_')]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@scipy@sparse@linalg@isolve@__init__.py@.PATH_END.py
|
{
"filename": "uvot_sed.py",
"repo_name": "KarlenS/swift-uvot-analysis-tools",
"repo_path": "swift-uvot-analysis-tools_extracted/swift-uvot-analysis-tools-master/uvot_sed.py",
"type": "Python"
}
|
#!/Users/karlen/anaconda2/envs/astroconda/bin/python
import os
import re
import subprocess
import gzip
import numpy as np
from shutil import copyfile,copyfileobj
from source_position import PositionExtractor
from uvot_photometry import MeasureSource
from astropy.time import Time
from tqdm import tqdm
class MakeSED(object):
def __init__(self,filelist):
self.filelist = filelist
self.filters = ['vv','bb','uu','w1','w2','m2']
self.sortedpaths = None
def sortFilelistByFilter(self):
fdict = dict.fromkeys(self.filters,np.array([],dtype=str))
filelistarr = np.array(self.filelist)
for filtr in fdict.keys():
indx = np.where([re.search(filtr, path) for path in filelistarr])[0]
if np.size(indx) > 0:
fdict[filtr] = np.append(fdict[filtr],filelistarr[indx])
return fdict
def combineFits(self,startdate, enddate):
sumfile = 'summed_image.fits'
sumfilegz = 'summed_image.gz'
filepaths = []
for filtr,paths in self.sortedpaths.items():
firstfile = True
extfile = '%s_%s-%s_%s' %(filtr,startdate.mjd,enddate.mjd,sumfile)
combfile = 'comb_%s' % extfile
nfiles = 0
print 'Working on %s filter' % (filtr)
for f in tqdm(paths):
measurer = MeasureSource(f)
obstime,aspflag = measurer.get_observation_time()
if (obstime-startdate).value > 0 and (enddate-obstime).value > 0:
#print '%s is within the time range.' %f
nfiles += 1
if firstfile:
copyfile(f,'%s_%s-%s_%s' %(filtr,startdate.mjd,enddate.mjd,sumfilegz))
with gzip.open('%s_%s-%s_%s' %(filtr,startdate.mjd,enddate.mjd,sumfilegz),'rb') as f_in:
with open(extfile,'wb') as f_out:
copyfileobj(f_in,f_out)
os.remove('%s_%s-%s_%s' %(filtr,startdate.mjd,enddate.mjd,sumfilegz))
firstfile = False
elif aspflag:
self.runFappend(f,extfile)
else:
print 'FUUUUCK THIS FILE: %s' %f
continue
if nfiles == 0:
print 'Filter %s had no files to combine' %filtr
continue
else:
sp = PositionExtractor() #make a utils file so don't have to use a whole class for a simple function
sp.run_uvotimsum(extfile,combfile)
filepaths.append(combfile)
return filepaths
def runFappend(self,inputFile,mergedFile):
#print 'appending %s to %s' %(inputFile,mergedFile)
tmp = subprocess.Popen(["fappend",inputFile,mergedFile], stdout=subprocess.PIPE)
tmp = tmp.communicate()
|
KarlenSREPO_NAMEswift-uvot-analysis-toolsPATH_START.@swift-uvot-analysis-tools_extracted@swift-uvot-analysis-tools-master@uvot_sed.py@.PATH_END.py
|
{
"filename": "main.py",
"repo_name": "cdslaborg/paramonte",
"repo_path": "paramonte_extracted/paramonte-main/example/fortran/pm_cosmology/getHubbleParamNormedSq/main.py",
"type": "Python"
}
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import glob
import sys
linewidth = 2
fontsize = 17
marker ={ "CK" : "-"
, "IK" : "."
, "RK" : "-"
}
xlab = { "CK" : "redshift: z ( real/imaginary components )"
, "IK" : "redshift: z ( integer-valued )"
, "RK" : "redshift: z ( real-valued )"
}
legends = [ r"$(\Omega_M, \Omega_\Lambda, \Omega_R, \Omega_K) = (0.3, 0.7, 0.0, 0.0)$"
, r"$(\Omega_M, \Omega_\Lambda, \Omega_R, \Omega_K) = (1.0, 0.0, 0.0, 0.0)$"
, r"$(\Omega_M, \Omega_\Lambda, \Omega_R, \Omega_K) = (0.0, 1.0, 0.0, 0.0)$"
, r"$(\Omega_M, \Omega_\Lambda, \Omega_R, \Omega_K) = (.30, .30, .40, 0.0)$"
, r"$(\Omega_M, \Omega_\Lambda, \Omega_R, \Omega_K) = (.25, .25, .25, .25)$"
]
for kind in ["IK", "CK", "RK"]:
pattern = "*." + kind + ".txt"
fileList = glob.glob(pattern)
if len(fileList) == 1:
df = pd.read_csv(fileList[0], delimiter = ", ")
fig = plt.figure(figsize = 1.25 * np.array([6.4, 4.8]), dpi = 200)
ax = plt.subplot()
if kind == "CK":
plt.plot( df.values[:, 0] - 1
, df.values[:,1:6]
, marker[kind]
, linewidth = linewidth
#, color = "r"
)
plt.plot( df.values[:, 1] - 1
, df.values[:,1:6]
, marker[kind]
, linewidth = linewidth
#, color = "blue"
)
else:
plt.plot( df.values[:, 0] - 1
, df.values[:,1:6]
, marker[kind]
, linewidth = linewidth
#, color = "r"
)
ax.legend ( legends
, fontsize = fontsize
)
plt.xticks(fontsize = fontsize - 2)
plt.yticks(fontsize = fontsize - 2)
ax.set_xlabel(xlab[kind], fontsize = 17)
ax.set_ylabel("The Hubble Parameter [km / s / Mpc]", fontsize = 17)
#ax.set_xlim([0.0, 5])
#ax.set_ylim([0.05, 0.5])
#ax.set_xscale("log")
ax.set_yscale("log")
plt.grid(visible = True, which = "both", axis = "both", color = "0.85", linestyle = "-")
ax.tick_params(axis = "y", which = "minor")
ax.tick_params(axis = "x", which = "minor")
plt.tight_layout()
plt.savefig(fileList[0].replace(".txt",".png"))
elif len(fileList) > 1:
sys.exit("Ambiguous file list exists.")
|
cdslaborgREPO_NAMEparamontePATH_START.@paramonte_extracted@paramonte-main@example@fortran@pm_cosmology@getHubbleParamNormedSq@main.py@.PATH_END.py
|
{
"filename": "_maxpoints.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scattergl/stream/_maxpoints.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class MaxpointsValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="maxpoints", parent_name="scattergl.stream", **kwargs
):
super(MaxpointsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
max=kwargs.pop("max", 10000),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scattergl@stream@_maxpoints.py@.PATH_END.py
|
{
"filename": "lenser_galaxy.py",
"repo_name": "DrexelLenser/Lenser",
"repo_path": "Lenser_extracted/Lenser-master/lenser_galaxy.py",
"type": "Python"
}
|
"""
Module: lenser_galaxy
.. synopsis: Holds a real galaxy image, or a model galaxy image and model parameters
.. module author: Evan J. Arena <evan.james.arena@drexel.edu>
"""
import numpy as np
from astropy.io import fits
from astropy.convolution import convolve_fft
from scipy import optimize
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
class Galaxy(object):
"""
Galaxy class:
.. Holds the list of parameters used in the modified S\'ersic model.
.. There are six galaxy shape parameters and (up to) seven lens parameters:
.. .. p = {xc,yc,ns,rs,q,phi,psi11,psi12,psi22,psi111,psi112,psi122,psi222}
.. Galaxy().generateImage() function:
.. .. Holds the modified S\'ersic model
.. .. Points to the Lens() class and performs the lensing coordinate deprojection
.. .. Points to the Image() class to create a two-dimensional image of the model
.. .. Performs PSF convolution if a PSF is available
"""
def __init__(self, xc=0., yc=0., ns=0.5, rs=1., q=1., phi=0., galaxyLens=None, galaxyQuadrupole=None):
self.name=''
self.xc = xc
self.yc = yc
self.ns = ns
self.rs = rs
self.q = q
self.phi = phi
self.galaxyLens = galaxyLens
self.galaxyQuadrupole = galaxyQuadrupole
def setPar(self, val, type):
"""
Set the value for each galaxy shape parameter
.. xc: centroid x coordinate
.. yc: centroid y coordinate
.. ns: seric index
.. rs: characteristic radius
.. q: semi-major-to-semi-minor axis ratio
.. phi: orientation angle
"""
if (type == 'xc'):
self.xc = val
elif (type == 'yc'):
self.yc = val
elif (type == 'ns'):
self.ns = val
elif (type == 'rs'):
self.rs = val
elif (type == 'q'):
self.q = val
elif (type == 'phi'):
self.phi = val
def setPsi2(self, psi2new):
"""
Set the values for the psi2 lensing array
.. These include the covergence and shear terms
.. psi2 = [psi11, psi12, psi22]
"""
self.galaxyLens.setPsi2(psi2new)
def setPsi3(self, psi3new):
"""
Set the values for the psi3 lensing array
.. These include the flexion terms
.. psi3 = [psi111, psi112, psi122, psi222]
"""
self.galaxyLens.setPsi3(psi3new)
def setLens(self, newlens):
"""
Set the entire lensing array with one call
... psi = [psi2, psi3]
"""
self.galaxyLens = newlens
def setQuadrupole(self, quadrupolenew):
"""
Set the quadrupole moments Q_ij for an Image (or MultiImage, averaged over all
available epochs and bands) for ease of access, so they only need to be calculated
once rather than calling Image().getMoments() multiple times
quadrupolenew = (Q11, Q12, Q22)
"""
self.GalaxyQuadrupole = quadrupolenew
def generateImage(self, nx, ny, lens=False, I0=1.,
noise1=0, noise2=0, seed=None,
background=0, psfmap=None):
"""
Create a galaxy image
.. Holds the modified S\'ersic model
.. Points to the Lens class and performs the lensing coordinate deprojection
.. Points to the Image class to create a two-dimensional image of the model
.. Performs PSF convolution if a PSF is available
"""
# Lay down the coordinate system
# .. The coordinates correspond to the bottom-left of each pixel
# .. The coordinate system's origin is the centroid of the galaxy
y0,x0 = np.mgrid[0:nx,0:ny]
delx = nx/2
dely = ny/2
# theta: the lens-plane coordinates
thetax = x0-self.xc-0.5-delx
thetay = y0-self.yc-0.5-dely
# Deproject theta --> beta: the source-plane coordinates
if (lens == True) and (self.galaxyLens != None):
betax,betay = self.galaxyLens.deproject(thetax,thetay)
else:
betax = thetax
betay = thetay
# x and y are the centroid-subtracted source-plane coordinates rotated
# appropriately by an orientation angle phi
x = betax*np.cos(self.phi)+betay*np.sin(self.phi)
y = betay*np.cos(self.phi)-betax*np.sin(self.phi)
# theta_prime is the radial coordinate,
# and q is the semimajor-to-semiminoraxis ratio of the galaxy
theta_prime = np.sqrt(x**2/self.q**2+y**2)
# Create the galaxy model datamap using the modified S\'ersic intensity profile
# .. Note that the model is normalized to have I0=1 initially
datamap = np.exp(-(theta_prime/self.rs)**(1/self.ns))
# If a PSF is present, convolve it with the galaxy model
if psfmap is not None:
datamap = convolve_fft(datamap, psfmap,
normalize_kernel = False, psf_pad = False,
nan_treatment = 'fill')#, fft_pad = False )
# Multiply the datamap by I0
datamap = I0*datamap
# If you wish to simulate a realistic galaxy image,
# (as opposed to using this function to simply create a galaxy model),
# you likely want to generate some random noise and add a background
# the noise is generated via noise=random.normal(0,sqrt(noise1**2+noise2**2*f))
# .. note that by default, noise = 0.
# .. note that by default, background = 0
# .. You should use these default valuesiIf you are creating a galaxy model
# .. as opposed to simulating a galaxy.
if seed != None:
np.random.seed(seed)
noisemap = np.sqrt(noise1**2+noise2**2*abs(datamap))
datamap = datamap+noisemap*np.random.normal(size=datamap.shape)+background
# Create an Image of this galaxy
myImage = Image(self.name,datamap,noisemap)
return myImage
class Lens(object):
"""
Lens class:
.. Handles the lensing coordinate deprojection
.. Temporarily holds the (up to) seven lens parameters before they are passed into the Galaxy() class.
"""
def __init__(self, psi2=[0,0,0], psi3=[0,0,0,0]):
self.psi2 = psi2
self.psi3 = psi3
def deproject(self, thetax, thetay):
"""
Lensing coordinate deprojection
.. Note: thetax, thetay can be numbers or numpy objects.
"""
betax = thetax
betay = thetay
fact = [0.5,1,0.5]
for i in range(2):
betax = betax-self.psi2[i]*thetax**(1-i)*thetay**(i)
betay = betay-self.psi2[i+1]*thetax**(1-i)*thetay**(i)
for i in range(3):
betax = betax-fact[i]*self.psi3[i]*thetax**(2-i)*thetay**(i)
betay = betay-fact[i]*self.psi3[i+1]*thetax**(2-i)*thetay**(i)
return betax, betay
def setPsi2(self, psi2new):
self.psi2 = psi2new
def setPsi3(self, psi3new):
self.psi3 = psi3new
class Image(object):
"""
Image class:
.. Holds various two-dimensional arrays referred to as "maps"
.. .. datamap:
.. .. .. Corresponds to the science image postage stamp of a galaxy.
.. .. .. Can either by a real galaxy image from a FITS file, which can be handled with lenser_fits.py,
or it can be a model galaxy image, generated by Galaxy().generateImage()
.. .. noisemap:
.. .. .. rms noise in the galaxy image.
.. .. .. Can either be a real noisemap from a FITS file, which can be handled with lenser_fits.py,
or, in the absense of a noisemap, Image() generates one.
.. .. weightmap:
.. .. .. Inverse variance (1/noisemap**2) weighting of image noise
.. .. .. NOTE: One should only supply either a noisemap or a weightmap
.. .. psfmap:
.. .. .. Point-spread function (PSF) associated with galaxy image.
.. .. .. If one is not provided, PSF convolution is ignored throughout Lenser.
.. Holds various two-dimensional arrays referred to as "masks"
.. .. segmentation mask:
.. .. .. Obtained from the SExtractor segmentation map and lenser_fits.py
.. .. .. Bitmask where all non-galaxy pixels = 0 (galaxy pixels and background pixels = 1)
.. .. background mask:
.. .. .. Obtained from the SExtractor segmentation map and lenser_fits.py
.. .. .. Bitmask where only background pixels = 1
.. .. object mask (optional):
.. .. .. Obtained from the SExtractor segmentation map and lenser_fits.py
.. .. .. Bitmask where only galaxy pixels = 1
.. .. ubersegmentation mask (optional):
.. .. .. Obtained from lenser_fits.py
.. .. .. Bitmask where any pixel that is closer to another object than the galaxy = 0
.. .. weighted ubersegmentation mask (optional):
.. .. .. weightmap multiplied by the ubersegmentation mask
.. .. elliptical mask:
.. .. .. Generated so as to include only relevant pixels in the input image, reducing error from sources
near the edge of the postage stamp. During this process, we also estimate: (i). the background map
and (ii). the noisemap, in the case that a noisemap is not already provided.
Option to subtract the background from the datamap.
"""
def __init__(self, name=None, datamap=None,
noisemap=None, wtmap=None,
ellipmask=None, segmask=None,
ubersegmask=None, wtubersegmask=None,
bgmask=None, objmask=None,
psfmap=None,
gain=None, sky=None):
self.name = name
self.datamap = datamap
self.noisemap = noisemap
self.wtmap = wtmap
self.ellipmask = ellipmask
self.segmask = segmask
self.ubersegmask = ubersegmask
self.wtubersegmask = wtubersegmask
self.bgmask = bgmask
self.objmask = objmask
self.psfmap = psfmap
# Optional, in case where noisemaps or weightmaps are not provided:
self.gain = gain # Gain in galaxy image
self.sky = sky # Sky-level in galaxy image
# Weighted mask, calculated for convinence for use in lenser_aim.py
self.wtMask = None
# Dimensions of datamap
if datamap is not None:
self.nx = self.datamap.shape[0]
self.ny = self.datamap.shape[1]
def getName(self):
"""
Get string of the galaxy name
"""
return self.name
def getLaTeXName(self):
"""
Get galaxy name in LaTeX formatting
"""
return r'${\rm '+self.name.replace('_',r'~')+'}$'
def getMap(self, type='data'):
"""
Return various maps
"""
if type == 'data':
return self.datamap
elif type == 'noise':
return self.noisemap
elif type == 'wt':
if self.wtmap is not None:
return self.wtmap
elif self.noisemap is not None:
return 1/(self.noisemap)**2.
elif type == 'ellipmask':
return self.ellipmask
elif type == 'segmask':
return self.segmask
elif type == 'uberseg':
return self.ubersegmask
elif type == 'totalmask':
if self.ellipmask is not None:
if self.ubersegmask is not None:
return self.ellipmask*self.ubersegmask
elif self.segmask is not None:
return self.ellipmask*self.segmask
else:
if self.ubersegmask is not None:
return self.ubersegmask
elif self.segmask is not None:
return self.segmask
elif type == 'wt_totalmask':
if self.wtmap is not None:
wt = self.wtmap
elif self.noisemap is not None:
wt = 1/(self.noisemap)**2.
if self.ellipmask is not None:
if self.ubersegmask is not None:
return wt*self.ellipmask*self.ubersegmask
elif self.segmask is not None:
return wt*self.ellipmask*self.segmask
else:
if self.ubersegmask is not None:
return wt*self.ubersegmask
elif self.segmask is not None:
return wt*self.segmask
else:
return wt
elif type == 'psf':
return self.psfmap
elif type == 'bgmask':
return self.bgmask
def plot(self, type='data', show=False, save=True):
"""
Plot individual maps.
.. We multiply the datamap by available masks for better visualization
(otherwise extraneous pixels have overpowering brightness).
"""
if (type == 'data'):
plt.imshow(self.datamap*self.getMap(type='totalmask'),cmap='gray',origin='lower')
plt.title(self.getLaTeXName())
elif (type == 'noise'):
if self.noisemap is not None:
plt.imshow(self.noisemap,cmap='gray',origin='lower')
plt.title(self.getLaTeXName()+' '+r'${\rm noise~map}$')
elif (type == 'wt'):
if self.wtmap is not None:
plt.imshow(self.wtmap,cmap='gray',origin='lower')
plt.title(self.getLaTeXName()+' '+r'${\rm weight~map}$')
elif (type == 'totalmask'):
plt.imshow(self.getMap(type='totalmask'),cmap='gray',origin='lower')
plt.title(self.getLaTeXName()+' '+r'${\rm mask~map}$')
elif (type == 'psf'):
if self.psfmap is not None:
plt.imshow(self.psfmap,cmap='gray',origin='lower')
plt.title(self.getLaTeXName()+' '+r'${\rm PSF~map}$')
if save == True:
plt.savefig(self.getName()+'_'+type+'.pdf', format='pdf')
if show == True:
plt.show()
def getMoments(self, Qijkl, id=None):
"""
Get the (n + m) image moments <x^n y^m>.
The moments, up to order four, are
.. Order 1: <x>, <y>
.. Order 2: <xx>, <xy>, <yy>
.. Order 3: <xxx>, <xxy>, <xyy>, <yyy>
.. Order 4: <xxxx>, <xxxy>, <xxyy>, <xyyy>, <yyyy>
"""
# Lay down the coordinate system
# .. The coordinates correspond to the bottom-left of each pixel
y,x = np.mgrid[0:self.nx,0:self.ny]
x = x-0.5
y = y-0.5
centroid = np.zeros(2)
order1 = np.zeros(2)
order2 = np.zeros(3)
order3 = np.zeros(4)
order4 = np.zeros(5)
if id == None:
f0 = np.sum(self.datamap*self.getMap('totalmask'))
centroid[0] = np.sum(self.datamap*x*self.getMap('totalmask'))/f0
centroid[1] = np.sum(self.datamap*y*self.getMap('totalmask'))/f0
dx = x-centroid[0]
dy = y-centroid[1]
for idx in range(2):
order1[idx] = np.sum(self.datamap*pow(dx,1-idx)*pow(dy,idx)*self.getMap('totalmask'))/f0
for idx in range(3):
order2[idx] = np.sum(self.datamap*pow(dx,2-idx)*pow(dy,idx)*self.getMap('totalmask'))/f0
for idx in range(4):
order3[idx] = np.sum(self.datamap*pow(dx,3-idx)*pow(dy,idx)*self.getMap('totalmask'))/f0
for idx in range(5):
order4[idx] = np.sum(self.datamap*pow(dx,4-idx)*pow(dy,idx)*self.getMap('totalmask'))/f0
elif id != None:
f0 = np.sum(self.datamap[id])
centroid[0] = np.sum(self.datamap[id]*x[id])/f0
centroid[1] = np.sum(self.datamap[id]*y[id])/f0
dx = x[id]-centroid[0]
dy = y[id]-centroid[1]
for idx in range(2):
order1[idx] = np.sum(self.datamap[id]*pow(dx,1-idx)*pow(dy,idx))/f0
for idx in range(3):
order2[idx] = np.sum(self.datamap[id]*pow(dx,2-idx)*pow(dy,idx))/f0
for idx in range(4):
order3[idx] = np.sum(self.datamap[id]*pow(dx,3-idx)*pow(dy,idx))/f0
for idx in range(5):
order4[idx] = np.sum(self.datamap[id]*pow(dx,4-idx)*pow(dy,idx))/f0
if Qijkl == 'f0':
return f0
elif Qijkl == 'x':
return x
elif Qijkl == 'y':
return y
elif Qijkl == 'xc':
return centroid[0]
elif Qijkl == 'yc':
return centroid[1]
elif Qijkl == 'Q1':
return order1[0]
elif Qijkl == 'Q2':
return order1[1]
elif Qijkl == 'Q11':
return order2[0]
elif Qijkl == 'Q12':
return order2[1]
elif Qijkl == 'Q22':
return order2[2]
elif Qijkl == 'Q111':
return order3[0]
elif Qijkl == 'Q112':
return order3[1]
elif Qijkl == 'Q122':
return order3[2]
elif Qijkl == 'Q222':
return order3[3]
elif Qijkl == 'Q1111':
return order4[0]
elif Qijkl == 'Q1112':
return order4[1]
elif Qijkl == 'Q1122':
return order4[2]
elif Qijkl == 'Q1222':
return order4[3]
elif Qijkl == 'Q2222':
return order4[4]
elif Qijkl == 'all':
return f0, x, y, centroid, order1, order2, order3, order4
elif Qijkl == 'x,y,centroid,order2':
return x, y, centroid, order2
elif Qijkl == 'centroid,order2':
return centroid, order2
elif Qijkl == 'order2':
return order2
elif Qijkl =='order2,order3,order4':
return order2, order3, order4
def generateEllipticalMask(self, subtractBackground=True):
"""
Here we generate the elliptical mask.
During this process, we also estimate:
(i). the background
(ii). the noisemap, in the case that a noisemap is not already provided
to Lenser and read in through lenser_fits.py.
The background is then subtraced from the datamap. The background is not
itself a global variable.
"""
# Background calculation if a segmentation map is provided
if self.bgmask is not None:
# Get background pixels and their corresponding indices
id_bg = np.where(self.bgmask==1)
bg_pix = self.datamap[id_bg]
# Lay down the coordinate system
# .. The coordinates correspond to the bottom-left of each pixel
y,x = np.mgrid[0:self.nx,0:self.ny]
x = x-0.5
y = y-0.5
# Functional form of a background with z-intercept bg0, and slope in x and y
def bgGradient(coords, bg0, mx, my):
x = coords[0]
y = coords[1]
return (bg0 + mx*(x-self.nx/2.) + my*(y-self.ny/2.))
# Do a best fit for the background map
bg0_guess = np.median(self.datamap[id_bg])
mx_guess = 0.
my_guess = 0.
pars_guess = np.asarray([bg0_guess, mx_guess, my_guess])
x_id_bg = x[id_bg]
y_id_bg = y[id_bg]
coords_id_bg = np.array((x_id_bg, y_id_bg))
popt,pcov = optimize.curve_fit(bgGradient, coords_id_bg, bg_pix, pars_guess)
coords = np.array((x,y))
bg = bgGradient(coords,*popt)
# Background calculation if a segmentation map is not provided to Lenser
elif self.bgmask is None:
# Since we do not have a background mask, we want to create one of sorts.
# We will create a mask that goes around the entire edge of the postage
# stamp and has a thickness of 10% of the width and height of the stamp
# for the thicknesses in the x and y direction, respecitvely.
bg_mask = np.zeros(self.datamap.shape)
# 10% buffer
xbuf = int(0.1*self.nx)
ybuf = int(0.1*self.ny)
# Bins at top of stamp:
bg_mask[0:ybuf] = 1
# Bins at bottom of the stamp
bg_mask[-(1+ybuf):] = 1
# Bins at the left of the stamp
bg_mask[:,0:xbuf] = 1
# Bins at the right of the stamp
bg_mask[:,-(1+xbuf):] = 1
# Background mask
self.bgmask=bg_mask
# Get background pixels and their corresponding indices
id_bg = np.where(self.bgmask ==1)
bg_pix = self.datamap[id_bg]
# Lay down the coordinate system
# .. The coordinates correspond to the bottom-left of each pixel
y,x = np.mgrid[0:self.nx,0:self.ny]
x = x-0.5
y = y-0.5
# Functional form of a background with z-intercept bg0, and slope in x and y
def bgGradient(coords, bg0, mx, my):
x = coords[0]
y = coords[1]
return (bg0 + mx*(x-self.nx/2.) + my*(y-self.ny/2.))
# Do a best fit for the background map
bg0_guess = np.median(self.datamap[id_bg])
mx_guess = 0.
my_guess = 0.
pars_guess = np.asarray([bg0_guess, mx_guess, my_guess])
x_id_bg = x[id_bg]
y_id_bg = y[id_bg]
coords_id_bg = np.array((x_id_bg,y_id_bg))
popt,pcov = optimize.curve_fit(bgGradient, coords_id_bg, bg_pix, pars_guess)
coords = np.array((x,y))
bg = bgGradient(coords,*popt)
# Subtract the background from the datamap (if requested)
if (subtractBackground == True):
self.datamap = self.datamap-bg
# Calculate noisemap (if one is not provided)
# .. The assumption here is that the noise contributions are a flat sky noise and
# .. a Poisson noise
if self.noisemap is None and self.wtmap is None and self.wtubersegmask is None:
if self.gain is None:
id_bg = np.where(self.bgmask==1)
noise1 = np.ma.std(self.datamap[id_bg])*np.ones(self.datamap.shape)
noise2 = 0 #noise2 = np.sqrt(abs(self.datamap*self.segmask))
self.noisemap = np.sqrt(noise1**2.+noise2**2.)
else:
counts = self.datamap*self.getMap('totalmask')
sky = self.sky*self.segmask
id_bg = np.where(self.bgmask==1)
noise1 = np.ma.std(self.datamap[id_bg])*np.ones(self.datamap.shape)
noise2 = np.sqrt(abs(counts+sky+bg)/self.gain)
self.noisemap = np.sqrt(noise1**2.+noise2**2.)
# Calculate the elliptical mask
# .. nsig is a heuristical number
nsig_list = np.array((2.5,2.75,3,3.25,3.5,3.75,4.0))
for i in range(len(nsig_list)):
nsig = nsig_list[i]
if self.wtmap is not None:
id=np.where(self.datamap*self.getMap('totalmask') > nsig*(1/(np.sqrt(abs(self.wtmap))))*self.getMap('totalmask'))
elif self.noisemap is not None:
id=np.where(self.datamap*self.getMap('totalmask') > nsig*self.noisemap*self.getMap('totalmask'))
for i in range(3):
x, y, centroid, order2 = self.getMoments('x,y,centroid,order2', id)
xc = centroid[0]
yc = centroid[1]
Q11 = order2[0]
Q12 = order2[1]
Q22 = order2[2]
chi1 = (Q11-Q22)/(Q11+Q22)
chi2 = 2*Q12/(Q11+Q22)
chisq = chi1**2+chi2**2
phi = np.arctan2(chi2,chi1)/2.
q = np.sqrt((1+np.sqrt(chisq))/(1-np.sqrt(chisq)))
x1 = (x-xc)*np.cos(phi)+(y-yc)*np.sin(phi)
y1 = (y-yc)*np.cos(phi)-(x-xc)*np.sin(phi)
# Elliptical mask:
id=np.where((x1/np.sqrt(1+chisq))**2+(y1/np.sqrt(1-chisq))**2 < nsig**2*(Q11+Q22))
if len(id[0]>10):
self.ellipmask = np.zeros(self.datamap.shape)
self.ellipmask[id] = 1
break
else:
continue
if len(id[0]>10):
self.ellipmask = np.zeros(self.datamap.shape)
self.ellipmask[id] = 1
else:
self.ellipmask = np.ones(self.datamap.shape)
class MultiImage(object):
"""
Multi-Image class:
.. Creates a list that holdes multiple Image() instantiations for a single galaxy.
.. For use in multi-band and/or multi-epoch fitting.
"""
def __init__(self, namelist=None, datalist=None,
noiselist=None, wtlist=None,
ellipmasklist=None, seglist=None,
uberseglist=None, wtuberseglist=None,
bgmasklist=None, objmasklist=None,
psflist=None,
generateEllipticalMask_bool=True,
subtractBackground_bool=True):
self.namelist = namelist
self.datalist = datalist
try:
Nonelist = [None for i in range(len(datalist))]
except:
Nonelist = None
if noiselist == None:
noiselist = Nonelist
self.noiselist = noiselist
if wtlist == None:
wtlist = Nonelist
self.wtlist = wtlist
if ellipmasklist == None:
ellipmasklist = Nonelist
self.ellipmasklist = ellipmasklist
if seglist == None:
seglist = Nonelist
self.seglist = seglist
if uberseglist == None:
uberseglist = Nonelist
self.uberseglist = uberseglist
if wtuberseglist == None:
wtuberseglist = Nonelist
self.wtuberseglist = wtuberseglist
if bgmasklist == None:
bgmasklist = Nonelist
self.bgmasklist = bgmasklist
if objmasklist == None:
objmasklist = Nonelist
self.objmasklist = objmasklist
if psflist == None:
psflist = Nonelist
self.psflist = psflist
try:
self.generateEllipticalMask_bool = generateEllipticalMask_bool
self.subtractBackground_bool = subtractBackground_bool
# Get number of epochs (i.e. number of observations for a single galaxy, be them different
# exposures in a single band, or across multiple bands)
self.N_epochs = len(datalist)
# Generate list of Image() instances
self.Imagelist = []
self.generateImagelist()
except:
self.Imagelist = Nonelist
def generateImagelist(self):
"""
Loop over all available epochs and create Image() instance for each one.
"""
for i in range(self.N_epochs):
im = Image(name = self.namelist[i], datamap=self.datalist[i],
noisemap = self.noiselist[i], wtmap = self.wtlist[i],
ellipmask = self.ellipmasklist[i], segmask = self.seglist[i],
ubersegmask = self.uberseglist[i], wtubersegmask = self.wtuberseglist[i],
bgmask = self.bgmasklist[i], objmask = self.objmasklist[i],
psfmap = self.psflist[i])
if self.generateEllipticalMask_bool == True:
im.generateEllipticalMask(subtractBackground = self.subtractBackground_bool)
self.Imagelist.append(im)
|
DrexelLenserREPO_NAMELenserPATH_START.@Lenser_extracted@Lenser-master@lenser_galaxy.py@.PATH_END.py
|
{
"filename": "paper.md",
"repo_name": "finagle29/dbsp_drp",
"repo_path": "dbsp_drp_extracted/dbsp_drp-main/paper/paper.md",
"type": "Markdown"
}
|
---
title: 'DBSP_DRP: A Python package for automated spectroscopic data reduction of DBSP data'
tags:
- Python
- astronomy
- data reduction
- spectroscopy
authors:
- name: Milan Sharma Mandigo-Stoba^[Present address: Department of Physics and Astronomy, University of California, Los Angeles.]
given-names: Milan Sharma
surname: Mandigo-Stoba
orcid: 0000-0003-1118-3132
affiliation: "1,2"
- name: Christoffer Fremling
orcid: 0000-0002-4223-103X
affiliation: 2
- name: Mansi M. Kasliwal
orcid: 0000-0002-5619-4938
affiliation: 2
affiliations:
- name: Schmidt Academy of Software Engineering, California Institute of Technology
index: 1
- name: Division of Physics, Mathematics and Astronomy, California Institute of Technology
index: 2
date: 26 October 2021
bibliography: paper.bib
nocite: |
@Lunnan2020
---
# Summary
<!--
A summary describing the high-level functionality and purpose of the software
for a diverse, non-specialist audience.
-->
In astronomy, the spectrum of light emitted from astrophysical sources is of
great use, allowing astronomers to classify objects and measure their
properties.
To measure the spectrum of a source, astronomers use spectrographs, in which
dispersive elements spatially separate the incoming light by wavelength, and
detectors, most commonly CCDs, image this dispersed light.
But to do science with the spectrum, the 2D image in pixel coordinates taken by
the CCD must be converted into a 1D spectrum of flux vs. wavelength.
This process of converting 2D CCD images into 1D spectra is called data
reduction.
To increase the signal-to-noise ratio, astronomers can take multiple exposures
of the same object and coadd their 1D spectra to reveal faint absorption lines
or increase the precision with which an important emission line can be measured.
Many spectrographs have multiple paths that light can go through, and multiple
detectors, each measuring a particular part of the spectrum, to increase the
wavelength range that can be captured in a single exposure, or to allow the
high resolution observation of distinct wavelength ranges.
If two detectors cover an overlapping region, caused by partial reflectance of
a dichroic (wavelength-dependent beam splitter), then the spectra from each
detector need to be spliced together, combining the light collected by each
detector.
DBSP_DRP is a python package that provides fully automated data reduction of
data taken by the Double Spectrograph (DBSP) at the 200-inch Hale Telescope at
Palomar Observatory [@Oke&Gunn1982].
The underlying data reduction functionality to extract 1D spectra, perform flux
calibration and correction for atmospheric absorption, and coadd spectra
together is provided by PypeIt [@Prochaska2020].
The new functionality that DBSP_DRP brings is in orchestrating the complex data
reduction process by making smart decisions so that no user input is required
after verifying the correctness of the metadata in the raw FITS files in a
table-like GUI.
Though the primary function of DBSP_DRP is to automatically reduce an entire
night of data without user input, it has the flexibility for astronomers to
fine-tune the data reduction with GUIs for manually identifying the faintest
objects, as well as exposing the full set of PypeIt parameters to be tweaked
for users with particular science needs.
DBSP_DRP also handles some of the occasional quirks specific to DBSP, such as
swapping FITS header cards, adding (an) extra null byte/s to FITS files making
them not conform to the FITS specification, and not writing the coordinates of
the observation to file.
Additionally, DBSP_DRP contains a quicklook script for making real-time
decisions during an observing run, and it can open a GUI displaying a minimally
reduced exposure in under 15 seconds.
Docker containers are available for ease of deploying DBSP_DRP in its quicklook
configuration (without some large atmospheric model files) or in its full
configuration.
# Statement of Need
<!--
A Statement of Need section that clearly illustrates the research purpose of
the software.
-->
Palomar Observatory, located near San Diego, CA, is a multinational observatory
with a broad user base.
Users come from large and small institutions, and their observing experience
ranges from novice to expert.
One responsibility for serving such a diverse user base is to provide software
data reduction pipelines for the most frequently used instruments, such as the
Palomar Double Spectrograph (DBSP).
Although DBSP was commissioned in 1982, it remains the workhorse instrument of
the 200” Hale Telescope.
It is used on 42% of the nights in a year, comprising nearly all of the
valuable “dark” (moonless) time.
In previous years, standard astronomical practice left the data reduction up to
the user.
However, attitudes in instrument-building have shifted since DBSP was built.
A pipeline is now considered an indispensable component of an astronomical
instrument.
In fact, the difference between a good pipeline and a great pipeline means the
difference between counting some of the photons vs. counting all of the photons.
Spectroscopy is a severe bottleneck in time-domain astronomy; currently less
than 10% of discoveries are spectroscopically classified.
Without a pipeline, data reduction is a difficult process and the standard
method without a pipeline is to use IRAF [@IRAF86;@IRAF93], a 35-year-old
program on which development and maintenance was discontinued in 2013 and whose
use is discouraged by many in the field (e.g. @Ogaz2018).
Needless to say, data reduction sans existing pipeline is extremely
time-consuming.
There is a clear need for a modern and stable automated data reduction pipeline
for DBSP.
During observing runs, one would like to be able to quickly inspect data as it
is taken, in order to ensure that it is of sufficient quality to do the desired
science with.
For objects whose brightness may have changed between a previous observation
and the current observing run, or for nights with highly variable cloud cover,
the observer may be unsure how long of an exposure is needed to produce quality
data.
For very faint objects, objects in crowded fields, or objects with uncertain
positions (e.g. due to high or uncertain motion across the sky), the observer
may not even be sure that the telescope is pointed at the right object!
A quicklook functionality, which can do a rudimentary reduction to correct for
instrumental signatures and subtract light from the sky, revealing the spectra
of the objects observed, can answer questions of exposure time and whether the
object observed is the right one.
DBSP_DRP is currently being used by the ZTF Bright Transient Survey
[@Fremling2020;@Perley2020], the ZTF Census of the Local Universe [@De2020],
and a program investigating ZTF Superluminous Supernovae
(Lunnan et al., 2020; Chen et al., in preparation).
@Ravi2021arXiv is the first (known) publication that used DBSP_DRP for data
reduction.
The development of DBSP_DRP also lays the groundwork towards a fully automated
pipeline for the Next Generation Palomar Spectrograph that is planned to be
deployed on the Palomar 200-inch Hale Telescope in 2023.
# Acknowledgements
M.S.M.-S. acknowledges funding from the Schmidt Academy of Software Engineering,
which is supported by the generosity of Eric and Wendy Schmidt by
recommendation of the Schmidt Futures program.
We thank the following members of the time domain astronomy group at Caltech
for beta-testing and providing valuable feedback during the development of this
pipeline: Andy Tzanidakis, Lin Yan, Aishwarya Dahiwale, Yuhan Yao, Yashvi
Sharma, and Igor Andreoni.
M.S.M.-S. is extremely grateful to the welcoming, friendly, and helpful team of
developers on the PypeIt team, without whom this package would not exist.
This research made use of Astropy,[^2] a community-developed core Python
package for Astronomy [@astropy:2013;@astropy:2018].
[^2]: http://www.astropy.org
# References
|
finagle29REPO_NAMEdbsp_drpPATH_START.@dbsp_drp_extracted@dbsp_drp-main@paper@paper.md@.PATH_END.py
|
{
"filename": "_line.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/graph_objs/layout/mapbox/layer/_line.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseLayoutHierarchyType as _BaseLayoutHierarchyType
import copy as _copy
class Line(_BaseLayoutHierarchyType):
# class properties
# --------------------
_parent_path_str = "layout.mapbox.layer"
_path_str = "layout.mapbox.layer.line"
_valid_props = {"dash", "dashsrc", "width"}
# dash
# ----
@property
def dash(self):
"""
Sets the length of dashes and gaps (mapbox.layer.paint.line-
dasharray). Has an effect only when `type` is set to "line".
The 'dash' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["dash"]
@dash.setter
def dash(self, val):
self["dash"] = val
# dashsrc
# -------
@property
def dashsrc(self):
"""
Sets the source reference on Chart Studio Cloud for dash .
The 'dashsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["dashsrc"]
@dashsrc.setter
def dashsrc(self, val):
self["dashsrc"] = val
# width
# -----
@property
def width(self):
"""
Sets the line width (mapbox.layer.paint.line-width). Has an
effect only when `type` is set to "line".
The 'width' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["width"]
@width.setter
def width(self, val):
self["width"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
dash
Sets the length of dashes and gaps
(mapbox.layer.paint.line-dasharray). Has an effect only
when `type` is set to "line".
dashsrc
Sets the source reference on Chart Studio Cloud for
dash .
width
Sets the line width (mapbox.layer.paint.line-width).
Has an effect only when `type` is set to "line".
"""
def __init__(self, arg=None, dash=None, dashsrc=None, width=None, **kwargs):
"""
Construct a new Line object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.mapbox.layer.Line`
dash
Sets the length of dashes and gaps
(mapbox.layer.paint.line-dasharray). Has an effect only
when `type` is set to "line".
dashsrc
Sets the source reference on Chart Studio Cloud for
dash .
width
Sets the line width (mapbox.layer.paint.line-width).
Has an effect only when `type` is set to "line".
Returns
-------
Line
"""
super(Line, self).__init__("line")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.layout.mapbox.layer.Line
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.mapbox.layer.Line`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("dash", None)
_v = dash if dash is not None else _v
if _v is not None:
self["dash"] = _v
_v = arg.pop("dashsrc", None)
_v = dashsrc if dashsrc is not None else _v
if _v is not None:
self["dashsrc"] = _v
_v = arg.pop("width", None)
_v = width if width is not None else _v
if _v is not None:
self["width"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@graph_objs@layout@mapbox@layer@_line.py@.PATH_END.py
|
{
"filename": "_customdata.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/densitymapbox/_customdata.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CustomdataValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="customdata", parent_name="densitymapbox", **kwargs):
super(CustomdataValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@densitymapbox@_customdata.py@.PATH_END.py
|
{
"filename": "test_comparison_basic.py",
"repo_name": "GalSim-developers/GalSim",
"repo_path": "GalSim_extracted/GalSim-main/devel/external/test_sersic_highn/test_comparison_basic.py",
"type": "Python"
}
|
# Copyright (c) 2012-2023 by the GalSim developers team on GitHub
# https://github.com/GalSim-developers
#
# This file is part of GalSim: The modular galaxy image simulation toolkit.
# https://github.com/GalSim-developers/GalSim
#
# GalSim is free software: redistribution and use in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions, and the disclaimer given in the accompanying LICENSE
# file.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions, and the disclaimer given in the documentation
# and/or other materials provided with the distribution.
#
import sys
import logging
import galsim
"""A simple Python test script to demonstrate use of the galsim.utilities.compare_dft_vs_photon_*
functions.
This script generates a model galaxy and PSF, and then compares the rendering of this object by both
photon shooting and DFT methods, by calling the GSObject `drawShoot()` and `draw()` methods
respectively.
There are two functions that do this in galsim.utilities:
i) galsim.utilities.compare_dft_vs_photon_object
ii) galsim.utilities.compare_dft_vs_photon_config
i) allows the object and optional convolving PSF to be specified directly as GSObject instances.
However, as these are not picklable, these tests can only run in single core mode.
ii) provides multi-core processing functionality, but requires that the object and optional
convolving PSF are specified via a `config` dictionary (see, e.g., examples/demo8.py).
The two methods don't provide identical results, because the `object` version uses only one random
generator sequence to generate all the photons, whereas the `config` version uses a number of
differently seeded random number generators, one for each image. One purpose of this script was
a quick sanity check of their overall consistency, as well as being a demonstration of these testing
utility functions.
"""
# Make the galaxy and PSF objects elliptical Sersic and Moffat, storing all param vals here
# in top level scope
galn = 3.3
galhlr = 0.9
psfbeta = 3.40
psffwhm = 0.85
g1gal = -0.23
g2gal = -0.17
g1psf = +0.03
g2psf = +0.01
# Set a pixel scale (e.g. in arcsec), and image size
dx = 0.27
imsize = 48
# Random seed
rseed = 111333555
# Value of wmult parameter
wmult = 4.
# Value of test tolerance parameters
tol_ellip = 3.e-5
tol_size = 1.e-4
n_photons_test= (int(1e6), int(3.e6), int(1.e7))
def test_comparison_object(np):
logging.basicConfig(level=logging.WARNING, stream=sys.stdout)
logger = logging.getLogger("test_comparison_object")
logger.info("Running basic tests of comparison scripts using objects")
# Build a trial galaxy
gal = galsim.Sersic(galn, half_light_radius=galhlr)
gal.applyShear(g1=g1gal, g2=g2gal)
# And an example PSF
psf = galsim.Moffat(beta=psfbeta, fwhm=psffwhm)
psf.applyShear(g1=g1psf, g2=g2psf)
# Try a single core run
print "Starting tests using config file with N_PHOTONS = "+str(np)
res1 = galsim.utilities.compare_dft_vs_photon_object(
gal, psf_object=psf, rng=galsim.BaseDeviate(rseed), size=imsize, pixel_scale=dx,
abs_tol_ellip=tol_ellip, abs_tol_size=tol_size, n_photons_per_trial=np)
print res1
return
def test_comparison_config(np):
logging.basicConfig(level=logging.WARNING, stream=sys.stdout)
logger = logging.getLogger("test_comparison_config")
logger.info("Running basic tests of comparison scripts using config")
# Set up a config dict to replicate the GSObject spec above
config = {}
config['gal'] = {
"type" : "Sersic",
"n" : galn,
"half_light_radius" : galhlr,
"ellip" : {
"type" : "G1G2",
"g1" : g1gal,
"g2" : g2gal
}
}
config['psf'] = {
"type" : "Moffat",
"beta" : psfbeta,
"fwhm" : psffwhm,
"ellip" : {
"type" : "G1G2",
"g1" : g1psf,
"g2" : g2psf
}
}
config['image'] = {
'size' : imsize,
'pixel_scale' : dx,
'random_seed' : rseed,
'wmult' : wmult
}
# Use an automatically-determined N core run setting
print "Starting tests using config file with N_PHOTONS = "+str(np)
res8 = galsim.utilities.compare_dft_vs_photon_config(
config, n_photons_per_trial=np, nproc=-1, logger=logger, abs_tol_ellip=tol_ellip,
abs_tol_size=tol_size)
print res8
return
if __name__ == "__main__":
for n_photons in n_photons_test:
# First run the config version, then the (slower, single core) object version: see docstring
# in module header for more info.
test_comparison_config(n_photons)
test_comparison_object(n_photons)
|
GalSim-developersREPO_NAMEGalSimPATH_START.@GalSim_extracted@GalSim-main@devel@external@test_sersic_highn@test_comparison_basic.py@.PATH_END.py
|
{
"filename": "test_cassandra.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/tests/integration_tests/document_loaders/test_cassandra.py",
"type": "Python"
}
|
"""
Test of Cassandra document loader class `CassandraLoader`
"""
import os
from typing import Any, Iterator
import pytest
from langchain_core.documents import Document
from langchain_community.document_loaders.cassandra import CassandraLoader
CASSANDRA_DEFAULT_KEYSPACE = "docloader_test_keyspace"
CASSANDRA_TABLE = "docloader_test_table"
@pytest.fixture(autouse=True, scope="session")
def keyspace() -> Iterator[str]:
import cassio
from cassandra.cluster import Cluster
from cassio.config import check_resolve_session, resolve_keyspace
from cassio.table.tables import PlainCassandraTable
if any(
env_var in os.environ
for env_var in [
"CASSANDRA_CONTACT_POINTS",
"ASTRA_DB_APPLICATION_TOKEN",
"ASTRA_DB_INIT_STRING",
]
):
cassio.init(auto=True)
session = check_resolve_session()
else:
cluster = Cluster()
session = cluster.connect()
keyspace = resolve_keyspace() or CASSANDRA_DEFAULT_KEYSPACE
cassio.init(session=session, keyspace=keyspace)
session.execute(
(
f"CREATE KEYSPACE IF NOT EXISTS {keyspace} "
f"WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 1}}"
)
)
# We use a cassio table by convenience to seed the DB
table = PlainCassandraTable(
table=CASSANDRA_TABLE, keyspace=keyspace, session=session
)
table.put(row_id="id1", body_blob="text1")
table.put(row_id="id2", body_blob="text2")
yield keyspace
session.execute(f"DROP TABLE IF EXISTS {keyspace}.{CASSANDRA_TABLE}")
async def test_loader_table(keyspace: str) -> None:
loader = CassandraLoader(table=CASSANDRA_TABLE)
expected = [
Document(
page_content="Row(row_id='id1', body_blob='text1')",
metadata={"table": CASSANDRA_TABLE, "keyspace": keyspace},
),
Document(
page_content="Row(row_id='id2', body_blob='text2')",
metadata={"table": CASSANDRA_TABLE, "keyspace": keyspace},
),
]
assert loader.load() == expected
assert await loader.aload() == expected
async def test_loader_query(keyspace: str) -> None:
loader = CassandraLoader(
query=f"SELECT body_blob FROM {keyspace}.{CASSANDRA_TABLE}"
)
expected = [
Document(page_content="Row(body_blob='text1')"),
Document(page_content="Row(body_blob='text2')"),
]
assert loader.load() == expected
assert await loader.aload() == expected
async def test_loader_page_content_mapper(keyspace: str) -> None:
def mapper(row: Any) -> str:
return str(row.body_blob)
loader = CassandraLoader(table=CASSANDRA_TABLE, page_content_mapper=mapper)
expected = [
Document(
page_content="text1",
metadata={"table": CASSANDRA_TABLE, "keyspace": keyspace},
),
Document(
page_content="text2",
metadata={"table": CASSANDRA_TABLE, "keyspace": keyspace},
),
]
assert loader.load() == expected
assert await loader.aload() == expected
async def test_loader_metadata_mapper(keyspace: str) -> None:
def mapper(row: Any) -> dict:
return {"id": row.row_id}
loader = CassandraLoader(table=CASSANDRA_TABLE, metadata_mapper=mapper)
expected = [
Document(
page_content="Row(row_id='id1', body_blob='text1')",
metadata={
"table": CASSANDRA_TABLE,
"keyspace": keyspace,
"id": "id1",
},
),
Document(
page_content="Row(row_id='id2', body_blob='text2')",
metadata={
"table": CASSANDRA_TABLE,
"keyspace": keyspace,
"id": "id2",
},
),
]
assert loader.load() == expected
assert await loader.aload() == expected
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@tests@integration_tests@document_loaders@test_cassandra.py@.PATH_END.py
|
{
"filename": "sp_utils.py",
"repo_name": "sophiasosafiscella/highfluencetiming",
"repo_path": "highfluencetiming_extracted/highfluencetiming-main/sp_utils.py",
"type": "Python"
}
|
import matplotlib.pyplot as plt
import matplotlib.transforms as mtransforms
from mpl_toolkits.mplot3d import Axes3D
import seaborn as sns
import pypulse as pyp
import numpy as np
import pandas as pd
import pypulse
import glob
import string
import os
import sys
from lmfit import Model, Parameters
#from scipy.optimize import curve_fit
from scipy.integrate import trapezoid
from scipy.signal import peak_widths, find_peaks
from tqdm import tqdm
def make_plots(dir):
# Make plots of the single pulses
for file in glob.glob(dir + "/*npy")[0:1]:
data = np.load(file)
print(data.shape)
for sp_index in range(data.shape[0]):
plt.plot(data[sp_index, :])
plt.xlabel("Bins")
plt.ylabel("Intensity")
plt.title("Pulse " + str(sp_index) + " out of " + str(data.shape[0]))
plt.savefig(dir + '/single_pulses/' + str(sp_index) + '.png')
plt.show()
plt.close
def count_sp(files):
n_sp: int = 0
for file in tqdm(files):
ar = pyp.Archive(file, verbose=False)
n_sp += ar.getNsubint()
N_bin: int = ar.getNbin()
return n_sp, N_bin
def calculate_rms(files, n_sp, n_chan):
# Create an array to store the RMS values
arr = np.arange(0, 100)
rms_values = np.full((n_sp, n_chan), np.nan)
# Iterate over the files
n: int = 0
for k, file in tqdm(enumerate(files)):
ar = pyp.Archive(file, verbose=False)
# Extract the data form the observation
data = ar.getData() # (128 single pulses x 128 frequency channels x 2048 phase bins) -> in this order
# Iterate over the sub-integrations
for i in range(ar.getNsubint()):
# Iterate over the frequency channels
for j in range(n_chan):
sp = pyp.SinglePulse(data[i, j, :], opw=arr)
rms_values[n, j] = sp.getOffpulseNoise()
n += 1
return rms_values
def calculate_sp_snr(files, n_sp):
print("Calculating SP SNR")
# Create an array to store the sn values
arr = np.arange(0, 50)
snr_values = np.full(n_sp, np.nan)
# Iterate over the files
n: int = 0
for file in tqdm(files):
ar = pyp.Archive(file, verbose=False)
ar.fscrunch()
# Extract the data form the observationsp = pyp.SinglePulse(data[i, j, :], opw=arr)
data = ar.getData() # (128 single pulses x 128 frequency channels x 2048 phase bins) -> in this order
# Iterate over the sub-integrations
for i in range(ar.getNsubint()):
sp = pyp.SinglePulse(data[i, :], opw=arr)
snr_values[n] = sp.getSN()
print(snr_values[n])
print(n)
# if np.isnan(snr_values[n]):
# print("Error found calculating SNR")
# print(snr_values[n])
# print(data[i, :])
n += 1
return snr_values
def get_average_pulse(pulses_files, nbins):
av_pulse_profile = np.zeros(nbins)
for i, file in tqdm(enumerate(pulses_files)):
data = pyp.Archive(file, verbose=False).fscrunch().getData()
if np.any(np.isnan(data)):
print(f"Found NaN in i={i}, file = {file}")
print(data)
sys.exit()
av_pulse_profile += np.average(data, axis=0)
av_pulse_profile /= len(pulses_files)
return av_pulse_profile
def find_energy_windows(template_data, window_factor, bins_factor: float, plot:bool = False):
# finds the peaks in the template data
peaks_pos, properties = find_peaks(template_data, distance=50, width=10, prominence=0.2)
energy_margins = np.zeros((len(peaks_pos), 2), dtype=float)
for i, peak in enumerate(peaks_pos):
peak_width = peak_widths(template_data, np.array([peak]), rel_height=0.5)[0][0]
energy_margins[i, 0] = peak - window_factor * peak_width # left margin
energy_margins[i, 1] = peak + window_factor * peak_width # right margin
if plot:
sns.set_style("darkgrid")
sns.set_context("paper")
fig, ax = plt.subplots()
x = np.array([*range(len(template_data))])
ax.plot(x, template_data)
ax.scatter(peaks_pos, template_data[peaks_pos], c="red", label='peaks')
for left, right in zip(energy_margins[:, 0], energy_margins[:, 1]):
ax.fill_between(x, min(template_data), max(template_data),
where=((x < right) & (x > left)), color="C1", alpha=0.4)
ax.set_xlabel("Bins")
ax.set_ylabel("Intensity")
plt.title("Pulse components' windows")
plt.savefig("./figures/energy_windows.pdf")
plt.show()
plt.close()
return np.rint(np.divide(energy_margins, bins_factor)).astype(int)
def find_windows(template_file: str, # name of the template fits_file
pulses_directory: str, # directory containing the single pulses
results_dir: str, # Directory with the clusters_toas
files: str, # Names of the Archive files
window_percentage: float, # percentage of the window we'll use for the main peak window
windows_factor: float, # scale factor for the energy windows
bscrunching_factor: float = 4,
plot=False):
# find the peak of the template
template = pypulse.Archive(template_file)
# template.bscrunch(factor=bscrunching_factor)
template_data = template.getData()
template_peak_pos = np.argmax(template_data)
offpulse = pypulse.SinglePulse(template_data, windowsize=int(template.getNbin() // 8)).calcOffpulseWindow()
offpulsewindow = [min(offpulse), max(offpulse)]
# find the average of the pulses
av_pulse_file = glob.glob(results_dir + "av_pulse_profile.npy")
if len(av_pulse_file) == 0:
print("Calculating average pulse...")
average_pulse_data = get_average_pulse(files[0:100], nbins=512)
np.save(results_dir + "av_pulse_profile.npy", average_pulse_data)
else:
average_pulse_data = np.load(results_dir + "av_pulse_profile.npy")
av_pulse_peak_pos = np.argmax(average_pulse_data)
print(f"Av pulse peak pos = {av_pulse_peak_pos}")
# If the template has 2048 bins and the pulses have 512, we divide:
bins_ratio = int(len(template_data) / len(average_pulse_data))
if bins_ratio != 1:
print("bins_ratio = " + str(bins_ratio))
template_data = template_data[0:len(template_data):bins_ratio]
template_peak_pos = round(template_peak_pos / bins_ratio)
# in case we want to plot
if plot:
fig, ax = plt.subplots()
bins512 = range(len(average_pulse_data))
ax.plot(bins512, average_pulse_data, c="C0", label="Average pulse")
ax.scatter(bins512, average_pulse_data, c="C0")
# ax.plot(bins512, template_data, c="C1", label="Template")
# ax.scatter(bins512, template_data, c="C1")
ax.set_xlim([template_peak_pos - 50, template_peak_pos + 50])
ax.axvline(x=av_pulse_peak_pos, ls="--", c='C4', label="Average peak = " + str(template_peak_pos))
ax.axvline(x=template_peak_pos, ls="--", c='C3', label="Template peak = " + str(av_pulse_peak_pos))
plt.title("Comparison of template and average")
plt.legend(loc="upper left")
plt.show()
# calculate the offset between the peaks and correct the template peak position
offset = template_peak_pos - av_pulse_peak_pos
template_peak_pos -= offset
# Get the pulse window as a fraction of the pulse phase: 10 or 15%,or 12.5% (1/8) of the pulse phase
width = int(len(bins512) / 100.0 * window_percentage)
left_margin = int(template_peak_pos - int(width / 2))
right_margin = int(template_peak_pos + int(width / 2))
# find the energy windows
energy_windows = find_energy_windows(template_data, windows_factor, bins_ratio, plot=False)
if plot:
sns.set_style("darkgrid")
sns.set_context("paper", font_scale=1.4)
fig, ax = plt.subplots(2, 1, sharex=True, sharey=True)
plt.subplots_adjust(wspace=0, hspace=0)
x = np.array([*bins512])
ax[0].plot(x, template_data)
ax[0].axvline(x=template_peak_pos, ls="--", c='k')
ax[0].axvline(x=left_margin, ls=":", c="grey")
ax[0].axvline(x=right_margin, ls=":", c="grey")
ax[0].fill_between(x, min(template_data), max(template_data),
where=((x < right_margin) & (x > left_margin)), color="C1", alpha=0.4)
# ax[0].set_xlabel("Bins")
ax[0].set_ylabel("Intensity")
ax[0].title.set_text("Pulse Windows")
peaks_pos, properties = find_peaks(template_data, distance=50, width=10, prominence=0.2)
ax[1].plot(x, template_data)
ax[1].scatter(peaks_pos, template_data[peaks_pos], c="red", label='peaks')
for left, right in zip(energy_windows[:, 0], energy_windows[:, 1]):
ax[1].fill_between(x, min(template_data), max(template_data),
where=((x < right) & (x > left)), color="C2", alpha=0.4)
ax[1].set_xlabel("Bins")
ax[1].set_ylabel("Intensity")
# ax[1].title.set_text("Pulse components' windows")
for n, sub in enumerate(ax):
sub.text(0.05, 0.8, string.ascii_uppercase[n], transform=sub.transAxes,
size=20)
plt.savefig("./windows.pdf")
plt.show()
plt.close
return np.vstack((offpulsewindow, [left_margin, right_margin], energy_windows))
def gaussian(x, amplitude, mean, stddev):
return amplitude * np.exp(-((x - mean)**2 / (2 * stddev**2))) # + baseline
#Define the Gaussian function
def gauss(x, a, x0, sigma, H):
return H + a*np.exp(-(x-x0)**2/(2*sigma**2))
def estimate_peak(window_data, windows, baseline, window_index, plot=False):
# Remove the baseline
window_data -= baseline
# Find the index of the peak of the (observed) pulse
peak_index = np.argmax(window_data) # + windows[1, 0] # Index (in the window) of the biggest peak in the pulse window
# Create bins in the pulse window
x_data = np.arange(len(window_data)) # + windows[1, 0]
gmodel = Model(gauss)
params = Parameters()
params.add("x0", value=peak_index, min=x_data[4], max=x_data[-4])
params.add("a", value=0.01, min=0.0001)
# print("peak index = " + str(peak_index))
# print("max in window = " + str(np.max(window_data)))
params.add("sigma", value=1.0, min=0.01, max=x_data[-1])
params.add("H", value=0.0)
# params.add("baseline", value=baseline)
# params['baseline'].vary = False
result = gmodel.fit(window_data, params, x=x_data)
factor: int = 1
new_x = np.arange(x_data[0], x_data[-1], 1.0/factor)
new_y = gauss(new_x, a=result.params["a"].value,
x0=result.params["x0"].value,
sigma=result.params["sigma"].value,
H=result.params["H"].value)
# print(result.params["a"].value, result.params["x0"].value, result.params["sigma"].value, result.params["H"].value)
# peak_amp = np.max(new_y) - baseline
peak_amp = np.max(window_data) - baseline
# peak_pos = new_x[np.argmax(new_y)]
# peak_width = peak_widths(window_data, np.array([np.argmax(window_data)]), rel_height=0.5)[0][0]
# peak_idx = [np.argmax(new_y)]
gauss_peak_idx, _ = find_peaks(new_y)
peak_pos = new_x[gauss_peak_idx] + windows[1, 0]
results_FWHM = peak_widths(new_y, gauss_peak_idx, rel_height=0.5)
peak_width = results_FWHM[0]
if len(gauss_peak_idx) < 1:
return np.nan, np.nan, 0.0
if plot:
plt.close()
sns.set_style("darkgrid")
sns.set_context("paper", font_scale=1.4)
plt.plot(new_x[gauss_peak_idx] + windows[1, 0] , new_y[gauss_peak_idx], "x")
plt.plot(x_data + windows[1, 0], window_data, c="#636EFA", label="Original") # c="#f29ad8"
plt.scatter(x_data + windows[1, 0], window_data, c="#636EFA") # c="#f29ad8"
plt.plot(new_x + windows[1, 0], new_y, c="#EF553B", label="Fit") # c="#e305ad",
plt.axvline(x=peak_pos, ls="--", c='k', label="Peak position")
# plt.axvline(x=windows[1, 0], ls=":", c="grey")
# plt.axvline(x=windows[1, 1], ls=":", c="grey")
# ax.fill_between(new_x, 0, peak_amp,
# where=(new_x < peak_pos + peak_width/20) &
# (new_x > peak_pos - peak_width/20),
# color='#B6E880', alpha=0.3, label="Width") # color="#f9dd9a",
# plt.axvline(x=[*results_FWHM[2]][0], c='red')
# plt.axvline(x=[*results_FWHM[3]][0], c='red')
plt.hlines([*results_FWHM[1]][0], [*results_FWHM[2]][0]/factor + windows[1, 0], [*results_FWHM[3]][0]/factor + windows[1, 0], color="C2", lw=2, label="FWHM")
# textstr = '\n'.join((
# r'$\mathrm{Position}=%i$' % (peak_pos,),
# r'$\mathrm{Amplitude}=%.4f$' % (peak_amp,),
# r'$\mathrm{Width}=%.4f$' % (peak_width,)))
# ,r'baseline =%.4f' % (baseline,)))
# these are matplotlib.patch.Patch properties
props = dict(boxstyle='round', facecolor='white', alpha=0.5)
# place a text box in upper left in axes coords
# ax.text(0.70, 0.95, textstr, transform=ax.transAxes, fontsize=12,
# verticalalignment='top', bbox=props)
plt.xlabel("Bins")
plt.ylabel("Intensity")
# ax.set_title("Pulse " + str(window_index))
plt.legend(loc='upper left')
# plt.savefig("./clusters_toas/windows/pulse_" + str(window_index) + ".png")
plt.tight_layout()
plt.savefig("./figures/fits/pulse_fit_" + str(window_index) + ".png")
plt.show()
plt.close()
# print(peak_amp, peak_pos, peak_width)
return peak_amp, peak_pos, peak_width
def get_energy(pulse_data, windows, baseline):
energy: float = 0.0
for left, right in zip(windows[2:, 0], windows[2:, 1]):
energy += (trapezoid(pulse_data[left: right], dx=1) - baseline)
return energy
# @jit(nopython=True) # Set "nopython" mode for best performance, equivalent to @njit
def get_params(data_file, windows, results_dir, plot=False):
# Left and right margins of the main pulse window
pulses_df = pd.read_pickle(data_file)
pulses_data = pulses_df.to_numpy()
window_data = pulses_data[:, windows[1, 0]:windows[1, 1]]
n_pulses = window_data.shape[0]
# peak_pos = np.argmax(window_data, axis=1) # position of the peak in each single pulse
lse_peak_amp = np.full((n_pulses), np.nan)
lse_pos = np.full((n_pulses), np.nan)
lse_width = np.full((n_pulses), np.nan)
lse_energy = np.full((n_pulses), np.nan)
for i in tqdm(range(n_pulses)):
# windows[0:0] and windows[0:1] are the left and right edges
# of the off-pulse window, respectively
baseline = np.average(pulses_data[i, windows[0, 0]:windows[0, 1]])
lse_peak_amp[i], lse_pos[i], lse_width[i] = estimate_peak(window_data[i, :], windows,
np.average(window_data[i, 0:4]), i, plot=plot)
lse_energy[i] = get_energy(pulses_data[i, :], windows, baseline)
# if lse_energy[i] > 100.0 or lse_peak_amp[i] > 100.0 or lse_energy[i] < -30.0:
# sns.set_style("whitegrid", {'axes.grid': False})
# fig = plt.figure()
# plt.title(i)
# plt.plot(pulses_data[i, :])
# plt.savefig(results_dir + "/plots/" + str(i) + ".png")
# plt.show()
# plt.close()
org_features = pd.DataFrame(data={'Pos': lse_pos, 'Width': lse_width, 'Amp': lse_peak_amp, 'Energy': lse_energy},
index=pulses_df.index)
# org_features = np.vstack((lse_pos, lse_width, lse_peak_amp, lse_energy)).T
# features = StandardScaler().fit_transform(org_features)
# Drop the rows where the width is 0.0 or where any of the features is NaN
return org_features.loc[~((org_features['Width'] == 0.0))].dropna(how='any', ignore_index=True)
|
sophiasosafiscellaREPO_NAMEhighfluencetimingPATH_START.@highfluencetiming_extracted@highfluencetiming-main@sp_utils.py@.PATH_END.py
|
{
"filename": "_traceref.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram/error_y/_traceref.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TracerefValidator(_plotly_utils.basevalidators.IntegerValidator):
def __init__(
self, plotly_name="traceref", parent_name="histogram.error_y", **kwargs
):
super(TracerefValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "style"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram@error_y@_traceref.py@.PATH_END.py
|
{
"filename": "furlanetto2017.py",
"repo_name": "mirochaj/ares",
"repo_path": "ares_extracted/ares-main/input/litdata/furlanetto2017.py",
"type": "Python"
}
|
from numpy import inf
# Calibration set!
energy = \
{
'pop_fstar': None,
'pop_fstar_max': 0.1, # fstar <= this value
# SFE (through mass loading factor)
'pop_sfr_model': 'mlf-func',
'pop_mlf': 'pq[0]',
'pq_func[0]': 'pl_evolN',
'pq_func_var[0]': 'Mh',
'pq_func_var2[0]': '1+z',
##
# Steve's Equation 13.
##
'pq_func_par0[0]': 1.,
'pq_func_par1[0]': 10**11.5,
'pq_func_par2[0]': -2./3.,
'pq_func_par3[0]': 9.,
'pq_func_par4[0]': -1.,
'pop_lum_per_sfr': 1e-28,
}
ereg = energy
momentum = energy.copy()
momentum['pop_fstar_max'] = 0.2
momentum['pq_func_par0[0]'] = 5. # actually not sure what Steve uses here.
momentum['pq_func_par2[0]'] = -1./3.
momentum['pq_func_par4[0]'] = -0.5
mreg = momentum
fshock = \
{
# Massive end
'pop_fshock': 'pq[1]',
'pq_func[1]': 'pl_evolN',
'pq_func_var[1]': 'Mh',
'pq_func_var2[1]': '1+z',
'pq_val_ceil[1]': 1.0, # fshock <= 1
# Steve's Equation 6 (from Faucher-Giguere+ 2011)
'pq_func_par0[1]': 0.47,
'pq_func_par1[1]': 1e12,
'pq_func_par2[1]': -0.25,
'pq_func_par3[1]': 4.,
'pq_func_par4[1]': 0.38,
}
|
mirochajREPO_NAMEaresPATH_START.@ares_extracted@ares-main@input@litdata@furlanetto2017.py@.PATH_END.py
|
{
"filename": "read_gwf.ipynb",
"repo_name": "ThibeauWouters/TurboPE-BNS",
"repo_path": "TurboPE-BNS_extracted/TurboPE-BNS-main/real_events/deprecated_runs/GW190425_NRTidalv2/read_gwf.ipynb",
"type": "Jupyter Notebook"
}
|
# Read GWF files
```python
import psutil
p = psutil.Process()
p.cpu_affinity([0])
import warnings
warnings.filterwarnings("ignore", "Wswiglal-redir-stdio")
import lal
from gwpy.timeseries import TimeSeries
from lalframe.utils.frtools import get_channels
import jax
import jax.numpy as jnp
import numpy as np
import matplotlib.pyplot as plt
from jimgw.single_event.detector import H1, L1, V1
```
2024-03-14 03:17:02.987096: W external/xla/xla/service/platform_util.cc:198] unable to create StreamExecutor for CUDA:1: failed initializing StreamExecutor for CUDA device ordinal 1: INTERNAL: failed call to cuDevicePrimaryCtxRetain: CUDA_ERROR_OUT_OF_MEMORY: out of memory; total memory reported: 84974239744
```python
data_path = "/home/thibeau.wouters/gw-datasets/GW190425/"
# New attempt
data_dict = {"L1":{"data": data_path + "L-L1_GWOSC_16KHZ_R1-1240213455-4096.gwf",
"psd": data_path + "glitch_median_PSD_forLI_L1_srate8192.txt",
"channel": "L1:GWOSC-16KHZ_R1_STRAIN"},
"V1":{"data": data_path + "V-V1_GWOSC_16KHZ_R1-1240213455-4096.gwf",
"psd": data_path + "glitch_median_PSD_forLI_V1_srate8192.txt",
"channel": "V1:GWOSC-16KHZ_R1_STRAIN"}
}
# Original data
data_dict = {"L1":{"data": data_path + "L-L1_HOFT_C01_T1700406_v3-1240211456-4096.gwf",
"psd": data_path + "glitch_median_PSD_forLI_L1_srate8192.txt",
"channel": "L1:DCS-CALIB_STRAIN_CLEAN_C01_T1700406_v3"},
"V1":{"data": data_path + "V-V1Online_T1700406_v3-1240214000-2000.gwf",
"psd": data_path + "glitch_median_PSD_forLI_V1_srate8192.txt",
"channel": "V1:Hrec_hoft_16384Hz_T1700406_v3"}
}
```
```python
gps = 1240215503.017147
trigger_time = gps
fmin = 20
fmax = 2048
minimum_frequency = fmin
maximum_frequency = fmax
T = 128
duration = T
post_trigger_duration = 2
epoch = duration - post_trigger_duration
f_ref = fmin
tukey_alpha = 2 / (T / 2)
print(tukey_alpha)
gps_start_pad = duration-2
gps_end_pad = 2
# L1.load_data_from_frame(trigger_time=trigger_time,
# gps_start_pad=duration-2,
# gps_end_pad=2,
# frame_file_path=data_dict["L1"]["data"],
# channel_name=data_dict["L1"]["channel"],
# f_min=fmin,
# f_max=fmax,
# tukey_alpha=tukey_alpha)
# V1.load_data_from_frame(trigger_time=trigger_time,
# gps_start_pad=duration-2,
# gps_end_pad=2,
# frame_file_path=data_dict["V1"]["data"],
# channel_name=data_dict["V1"]["channel"],
# f_min=fmin,
# f_max=fmax,
# tukey_alpha=tukey_alpha)
# L1.psd = L1.load_psd(L1.frequencies, data_dict["L1"]["psd"])
# V1.psd = V1.load_psd(V1.frequencies, data_dict["V1"]["psd"])
```
0.03125
## Playing with hdf5 files
```python
example_gwf_file = data_dict["L1"]["data"]
channel = data_dict["L1"]["channel"]
data_td = TimeSeries.read(example_gwf_file, channel,
start = trigger_time - gps_start_pad,
end = trigger_time + gps_end_pad)
print(data_td.value)
print(np.mean(data_td.value))
# data_td = TimeSeries.read(example_gwf_file, channel)
# print(data_td.value)
# print(np.mean(data_td.value))
```
[ 1.41329866e+265 1.41329866e+265 1.41329866e+265 ...
-2.00532910e+043 -3.89918427e+043 -1.36630243e+043]
nan
/home/thibeau.wouters/miniconda3/envs/jim/lib/python3.10/site-packages/numpy/core/_methods.py:118: RuntimeWarning: overflow encountered in reduce
ret = umr_sum(arr, axis, dtype, out, keepdims, where=where)
/home/thibeau.wouters/miniconda3/envs/jim/lib/python3.10/site-packages/numpy/core/_methods.py:118: RuntimeWarning: invalid value encountered in reduce
ret = umr_sum(arr, axis, dtype, out, keepdims, where=where)
```python
V1.data
```
Array([nan+nanj, nan+nanj, nan+nanj, ..., nan+nanj, nan+nanj,
nan+nanj], dtype=complex64)
```python
# Plot the two ifo data streams
fig, ax = plt.subplots(2, 1, figsize=(12, 8), sharex=True)
ax[0].plot(L1.frequencies, L1.data, label="L1")
ax[1].plot(V1.frequencies, V1.data, label="V1")
ax[0].set_ylabel("Strain")
ax[1].set_ylabel("Strain")
ax[1].set_xlabel("Frequency [Hz]")
ax[0].legend()
ax[1].legend()
plt.show()
```
/home/thibeau.wouters/miniconda3/envs/jim/lib/python3.10/site-packages/jax/_src/array.py:390: ComplexWarning: Casting complex values to real discards the imaginary part
return np.asarray(self._value, dtype=dtype)

## txt files?
```python
# txt_file = data_path + "L-L1_GWOSC_16KHZ_R1-1240213455-4096.txt"
# # Read the data
# data = np.loadtxt(txt_file)
# data
```
## hdf5 files
```python
import h5py
```
```python
h5_file = data_path + "V-V1_GWOSC_16KHZ_R1-1240213455-4096.hdf5"
with h5py.File(h5_file, "r") as f:
print(f.keys())
# Meta:
print(f['meta'].keys())
print(f['meta']['Description'][()])
print(f['meta']['DescriptionURL'][()])
print(f['meta']['Detector'][()])
print(f['meta']['Detector'][()])
duration = f['meta']['Duration'][()]
gps_start = f['meta']['GPSstart'][()]
print(f['meta']['Observatory'][()])
print(f['meta']['Type'][()])
print(f['meta']['UTCstart'][()])
# data = f['strain']['Strain'][()]
```
<KeysViewHDF5 ['meta', 'quality', 'strain']>
<KeysViewHDF5 ['Description', 'DescriptionURL', 'Detector', 'Duration', 'GPSstart', 'Observatory', 'Type', 'UTCstart']>
b'Strain data time series from Virgo'
b'http://www.gw-openscience.org/'
b'V1'
b'V1'
4096
1240213455
b'V'
b'StrainTimeSeries'
b'2019-04-25T07:43:57'
```python
data = TimeSeries.read(h5_file, format="hdf5.gwosc")
print(np.mean(data.value))
data = TimeSeries.read(h5_file, format="hdf5.gwosc",
start = trigger_time - gps_start_pad,
end = trigger_time + gps_end_pad)
print(np.mean(data.value))
```
9.29630999831302e-25
-1.0329245334379674e-23
```python
```
|
ThibeauWoutersREPO_NAMETurboPE-BNSPATH_START.@TurboPE-BNS_extracted@TurboPE-BNS-main@real_events@deprecated_runs@GW190425_NRTidalv2@read_gwf.ipynb@.PATH_END.py
|
{
"filename": "asciidata_test.py",
"repo_name": "spacetelescope/stsdas_stripped",
"repo_path": "stsdas_stripped_extracted/stsdas_stripped-master/stsdas/pkg/analysis/slitless/axe/axe_asciidata/asciidata_test.py",
"type": "Python"
}
|
"""
Unittest classes for the asciidata module
@author: Martin Kuemmel, Jonas Haase
@organization: Space Telescope - European Coordinating Facility (ST-ECF)
@license: Gnu Public Licence
@contact: mkuemmel@eso.org
@since: 2005/09/13
$LastChangedBy: jhaase $
$LastChangedDate: 2008-02-27 16:45:36 +0100 (Wed, 27 Feb 2008) $
$HeadURL: http://astropy.scipy.org/svn/astrolib/trunk/asciidata/Lib/asciidata_test.py $
"""
from __future__ import absolute_import
__version__ = "Version 1.1 $LastChangedRevision: 389 $"
import unittest
from . import asciidata, asciifunction
import os
class Test_AsciiData(unittest.TestCase):
"""
A test class for the asciidata module
"""
def setUp(self):
"""
Automatic set up for the class
set up data used in the tests.
setUp is called before each test function execution.
"""
# define the data
self.data = """ 105.2 323.4 star 20
102.4 529.0 galaxy 21
834.1 343.7 galaxy 23"""
# define a test file
# delete in case it just exists
self.testfile = 'test_file.tmp'
if os.path.isfile(self.testfile):
os.unlink(self.testfile)
# open the test file
tfile = open(self.testfile, 'w')
# fill data into the test file
tfile.write(self.data)
#close the test file
tfile.close()
# create the test instance
self.tdata = asciifunction.open(self.testfile)
def tearDown(self):
"""
Automatic destruction after test
"""
# explicitly destroy important class data
del self.data
del self.tdata
# remove the file
if os.path.isfile(self.testfile):
os.unlink(self.testfile)
def testSimplePrint(self):
"""
Simple check against input
The test transforms the asciidata instance back into
an ascii string. This string is then compared against
the input string, which is the basis for the
asciidata instance.
"""
# transform the instance back into
# a string
out_data = str(self.tdata)
# compare against the original string
self.assertEqual(out_data, self.data)
def testDimension(self):
"""
Check the number of columns and rows
This test checks whether the instance reports
the correct number of rows and columns.
"""
# check the number of rows
self.assertEqual(self.tdata.nrows, 3)
# check the number of columns
self.assertEqual(self.tdata.ncols, 4)
def testElements(self):
"""
Check the individual elements in the table
"""
# check the first column
self.assertEqual(self.tdata[0][0], 105.2)
self.assertEqual(self.tdata[0][1], 102.4)
self.assertEqual(self.tdata[0][2], 834.1)
# check the second column
self.assertEqual(self.tdata[1][0], 323.4)
self.assertEqual(self.tdata[1][1], 529.0)
self.assertEqual(self.tdata[1][2], 343.7)
# check the third column
self.assertEqual(self.tdata[2][0], ' star')
self.assertEqual(self.tdata[2][1], 'galaxy')
self.assertEqual(self.tdata[2][2], 'galaxy')
# check the fourth column
self.assertEqual(self.tdata[3][0], 20)
self.assertEqual(self.tdata[3][1], 21)
self.assertEqual(self.tdata[3][2], 23)
def testColumnType(self):
"""
Check the column types
The method checks the different column types
against the types which should be there.
"""
# just check all column type against
# their proper types
self.assertEqual(self.tdata[0].get_type(), type(1.0))
self.assertEqual(self.tdata[1].get_type(), type(1.0))
self.assertEqual(self.tdata[2].get_type(), type('test'))
self.assertEqual(self.tdata[3].get_type(), type(1))
def testValueInput(self):
"""
Test the correct change of table values
Go a representative number of table elements. Change the
element content and check whether the correct number is
read out.
"""
# go over the first two columns
for c_index in range(2):
# go over each row
for r_index in range(self.tdata.nrows):
# compute an arbitrary float
number = c_index * r_index * 1.23456
# insert the float
self.tdata[c_index][r_index] = number
# read and check the number
self.assertEqual(self.tdata[c_index][r_index], number)
# go over each row
for r_index in range(self.tdata.nrows):
# form a string
string = str(c_index * r_index +1)
# insert the string
self.tdata[2][r_index] = string
# read and check the string
self.assertEqual(self.tdata[2][r_index], string)
# go over each row
for r_index in range(self.tdata.nrows):
# form an integer
string = c_index * r_index
# insert the string
self.tdata[3][r_index] = string
# read and check the string
self.assertEqual(self.tdata[3][r_index], string)
def testNoneInput(self):
"""
Test the insertion of None as table input
The method iterates over each table element. The
value 'None' is written in each element. Then the
table element is read and compared to 'None'.
"""
# go over each column
for c_index in range(self.tdata.ncols):
# go over each row
for r_index in range(self.tdata.nrows):
# insert None into the element
self.tdata[c_index][r_index] = None
# read and check the value against None
self.assertEqual(self.tdata[c_index][r_index], None)
def testTypeConversion(self):
"""
Test the automatic type conversion of table columns
Change the column type by insering lower type elements
in to a table column. Check if the column type
is really adapted.
"""
# test float --> string
self.assertEqual(self.tdata[0].get_type(), type(1.0))
self.tdata[0][0] = 'tostring'
self.assertEqual(self.tdata[0].get_type(), type('a'))
# test int --> float
self.assertEqual(self.tdata[3].get_type(), type(1))
self.tdata[3][1] = 1.5
self.assertEqual(self.tdata[3].get_type(), type(1.0))
# test float --> string
self.assertEqual(self.tdata[3].get_type(), type(1.0))
self.tdata[3][1] = 'change again'
self.assertEqual(self.tdata[3].get_type(), type('a'))
def testValueConversion(self):
"""
Test the automatic type conversion of table entries
Insert a table value into a lower type column.
Then the type of the value has to adapt to the
column type.
"""
# integer element into float column --> float element
self.tdata[0][0] = 1
self.assertEqual(self.tdata[0][0], 1.0)
# integer element into string column --> string element
self.tdata[2][0] = 1
self.assertEqual(self.tdata[2][0].strip(), '1')
# float element into string column --> string element
self.tdata[2][1] = 1.0
self.assertEqual(self.tdata[2][1].strip(), '1.0')
def testColumnCreation(self):
"""
Test the creation of a new column
Create new columns of different type. Inser values.
Check the inserted values as well as the type of the
inserted values.
"""
# store the initial number of columns
ncols = self.tdata.ncols
# create a new column with floats
self.tdata['new_float'][0] = 0.0
# fill values into the new column
for index in range(self.tdata.nrows):
self.tdata['new_float'][index] = float(index)
# check the column type
self.assertEqual(self.tdata['new_float'].get_type(), type(1.0))
# check the values and types
for index in range(self.tdata.nrows):
self.assertEqual(self.tdata['new_float'][index], float(index))
self.assertEqual(type(self.tdata['new_float'][index]), type(1.0))
# create a new column with integers
self.tdata['new_int'][0] = 0
# fill values into the new column
for index in range(self.tdata.nrows):
self.tdata['new_int'][index] = index
# check the column type
self.assertEqual(self.tdata['new_int'].get_type(), type(1))
# check the values and types
for index in range(self.tdata.nrows):
self.assertEqual(self.tdata['new_int'][index], index)
self.assertEqual(type(self.tdata['new_int'][index]), type(1))
# create a new column with integers
self.tdata['new_string'][0] = 'a'
# fill values into the new column
for index in range(self.tdata.nrows):
self.tdata['new_string'][index] = str(index)
# check the column type
self.assertEqual(self.tdata['new_string'].get_type(), type('a'))
# check the values and types
for index in range(self.tdata.nrows):
self.assertEqual(self.tdata['new_string'][index], str(index))
self.assertEqual(type(self.tdata['new_string'][index]), type('a'))
# check the new number of columns
self.assertEqual(self.tdata.ncols, ncols+3)
def testAppendColumn(self):
"""
Test to append new columns
Append columns via the 'append()' method of the class.
Test the value insertion and the correct type definition
"""
# store the initial number of columns
ncols = self.tdata.ncols
# create a new column with integers
self.tdata.append('testInt')
# check the new number of columns
self.assertEqual(self.tdata.ncols, ncols+1)
# fill values into the new column
for index in range(self.tdata.nrows):
self.tdata['testInt'][index] = int(index)
# check the column type
self.assertEqual(self.tdata['testInt'].get_type(), type(1))
# check the values and types
for index in range(self.tdata.nrows):
self.assertEqual(self.tdata['testInt'][index], int(index))
self.assertEqual(type(self.tdata['testInt'][index]), type(1))
# create a new column with floats
self.tdata.append('testFloat')
# check the new number of columns
self.assertEqual(self.tdata.ncols, ncols+2)
# fill values into the new column
for index in range(self.tdata.nrows):
self.tdata['testFloat'][index] = float(index)
# check the column type
self.assertEqual(self.tdata['testFloat'].get_type(), type(1.0))
# check the values and types
for index in range(self.tdata.nrows):
self.assertEqual(self.tdata['testFloat'][index], int(index))
self.assertEqual(type(self.tdata['testFloat'][index]), type(1.0))
# create a new column with strings
self.tdata.append('testString')
# check the new number of columns
self.assertEqual(self.tdata.ncols, ncols+3)
# fill values into the new column
for index in range(self.tdata.nrows):
self.tdata['testString'][index] = 'a'
# check the column type
self.assertEqual(self.tdata['testString'].get_type(), type('a'))
# check the values and types
for index in range(self.tdata.nrows):
self.assertEqual(self.tdata['testString'][index], 'a')
self.assertEqual(type(self.tdata['testString'][index]), type('a'))
def testDeleteColumn(self):
"""
Test to delete columns
Create a new column, and then check whether
it is possible to destroy it again.
"""
# store the initial number of columns
ncols = self.tdata.ncols
# create a new column
self.tdata.append('testColumn')
# check the new number of columns
self.assertEqual(self.tdata.ncols, ncols+1)
# fill values into the new column
for index in range(self.tdata.nrows):
self.tdata['testColumn'][index] = float(index)
# delete the new column
del self.tdata['testColumn']
# check the new number of columns
self.assertEqual(self.tdata.ncols, ncols)
# delete one of the old columns
del self.tdata[0]
# check the new number of columns
self.assertEqual(self.tdata.ncols, ncols-1)
# delete one of the old columns
del self.tdata[0]
# check the new number of columns
self.assertEqual(self.tdata.ncols, ncols-2)
# check the changed column numbers
self.assertEqual(self.tdata[0][1], 'galaxy')
# delete one of the old columns
del self.tdata[-1]
# check the new number of columns
self.assertEqual(self.tdata.ncols, ncols-3)
def testInsertRows(self):
"""
Test to insert rows
Several rows are inserted at the end or in the
middle in between 'old' columns. The changing
total number of rows is monitored. Also
a table element is checked.
"""
# store the initial number of rows
nrows = self.tdata.nrows
# insert two rows at the end
self.tdata.insert(2,-1)
# check the new number of rows
self.assertEqual(self.tdata.nrows, nrows+2)
# insert two rows at the beginning
self.tdata.insert(2,0)
# check the new number of rows
self.assertEqual(self.tdata.nrows, nrows+4)
# check a table element
self.assertEqual(self.tdata[2][2], ' star')
def testDeleteRows(self):
"""
Test to delete rows
The method deletes some rows from all the columns.
The number of rows is tested, also one element is
veryfied.
"""
# store the initial number of rows
nrows = self.tdata.nrows
# delete two rows at the beginning
self.tdata.delete(0,2)
# check the new number of rows
self.assertEqual(self.tdata.nrows, nrows-2)
# check a table element
self.assertEqual(self.tdata[2][0], 'galaxy')
def testStrip(self):
"""
test of strip,lstrip,rstrip
The method inserts emty rows in start and end of table and strips them off again
"""
# store the initial number of rows
nrows = self.tdata.nrows
# insert two rows at the end
self.tdata.insert(2,-1)
# insert three rows at the beginning
self.tdata.insert(3,0)
# check the new number of rows
self.assertEqual(self.tdata.nrows, nrows+5)
# strip the first three off again
self.tdata.lstrip()
# check they are gone
self.assertEqual(self.tdata.nrows, nrows+2)
# strip the trailing
self.tdata.rstrip()
# back to normal
self.assertEqual(self.tdata.nrows, nrows)
# once more with strip
self.tdata.insert(3,-1)
self.tdata.insert(4,0)
self.tdata.strip()
self.assertEqual(self.tdata.nrows, nrows)
def testFindColumn(self):
"""
Test to find the column number
Derive the column number from the column name.
Do this for default column names as well as for
fresh columns with arbitrary column names.
"""
# store the initial number of columns
ncols = self.tdata.ncols
# find and test a default column name
self.assertEqual(self.tdata.find('column1'), 0)
# find and test a default column name
self.assertEqual(self.tdata.find('column3'), 2)
# append a new column, find and test the column number
self.tdata.append('new_column')
self.assertEqual(self.tdata.find('new_column'), ncols)
# create a new column, find and test the column number
self.tdata['new_column2'][0] = 1.0
self.assertEqual(self.tdata.find('new_column2'), ncols+1)
def testWriteTo(self):
"""
Test the writing to a different file
Test to write an instance to a file. Read it in
again and then compare it to the original string
"""
# define a second test file
# delete in case it just exists
tfile = 'test_file2.tmp'
if os.path.isfile(tfile):
os.unlink(tfile)
# write the data to the test file
self.tdata.writeto(tfile)
# read the table from the test file
ttable = asciifunction.open(tfile)
# delete the test file
if os.path.isfile(tfile):
os.unlink(tfile)
# compare against the original string
self.assertEqual(str(ttable), self.data)
def testFlush(self):
"""
Test writing a modified instance to a file
Modify the instance, write it back to its
original file, and test whether the content
of this file is correct.
"""
# delete the first row
self.tdata.delete(0)
# write the instance back to the original file
self.tdata.flush()
# read in the modified table from the old file
ttable = asciifunction.open(self.testfile)
# compare against the original string
self.assertEqual(str(ttable), """ 102.4 529.0 galaxy 21
834.1 343.7 galaxy 23""")
def testNewNull(self):
"""
Test providing a new Null string
Give a new Null string for the instance.
Set a few table elements to zero.
Check the correct writing of the None-entries
with the new Null string.
"""
# fill the second row with None entries
for index in range(self.tdata.ncols):
self.tdata[index][1] = None
# change the null string
self.tdata.newnull('<*>')
# write the instance to a string, and copmpare
# to the exspected stirng with the new Null string
self.assertEqual(str(self.tdata), """ 105.2 323.4 star 20
<*> <*> <*> <*>
834.1 343.7 galaxy 23""")
def testInfo(self):
"""
Test the info on an instance
The method tests the info method of the
asciidata class. The info-string is compared
against the hardcoded, correct string.
"""
ref_string = """File: test_file.tmp
Ncols: 4
Nrows: 3
Delimiter: None
Null value: ['Null', 'NULL', 'None', '*']
Comment: #
Column name: column1
Column type: <type 'float'>
Column format: ['% 5.1f', '%6s']
Column null value : ['Null']
Column name: column2
Column type: <type 'float'>
Column format: ['% 5.1f', '%6s']
Column null value : ['Null']
Column name: column3
Column type: <type 'str'>
Column format: ['% 6s', '%6s']
Column null value : ['Null']
Column name: column4
Column type: <type 'int'>
Column format: ['%5i', '%5s']
Column null value : ['Null']
"""
# check against the correct string
self.assertEqual(self.tdata.info(), self.tdata.info())
def testColumnCopy(self):
"""
Test to copy table columns
Test the ability to create a deep copy
of a table column. Check further the
the number of rows and the preservance
of the content.
"""
# make a deep copy of the column
floatCol = self.tdata[0].copy()
# change the original values
for index in range(self.tdata.nrows):
self.tdata[0][index] = 0.0
# check the number of rows
self.assertEqual(floatCol.get_nrows(), 3)
# check the elements in the copy
self.assertEqual(floatCol[0], 105.2)
self.assertEqual(floatCol[1], 102.4)
self.assertEqual(floatCol[2], 834.1)
# make a deep copy of the column
intCol = self.tdata[3].copy()
# change the original values
for index in range(self.tdata.nrows):
self.tdata[3][index] = 0
# check the number of rows
self.assertEqual(intCol.get_nrows(), 3)
# check the elements in the copy
self.assertEqual(intCol[0], 20)
self.assertEqual(intCol[1], 21)
self.assertEqual(intCol[2], 23)
# make a deep copy of the column
strCol = self.tdata[2].copy()
# change the original values
for index in range(self.tdata.nrows):
self.tdata[2][index] = 'NN'
# check the number of rows
self.assertEqual(strCol.get_nrows(), 3)
# check the elements in the copy
self.assertEqual(strCol[0], ' star')
self.assertEqual(strCol[1], 'galaxy')
self.assertEqual(strCol[2], 'galaxy')
def testReformat(self):
"""
Test changing column formats
Change the format of some columns and check whether
they are reformatted correctly.
"""
# the correct string after reformatting
reformat_data = """105.20 323.4 star 20
102.40 529.0 galaxy 21
834.10 343.7 galaxy 23"""
# change the column formats
self.tdata[0].reformat('%6.2f')
self.tdata[2].reformat('%8s')
self.tdata[3].reformat('%3i')
# check the string against the reference string
self.assertEqual(str(self.tdata), reformat_data)
def testRenameCol(self):
"""
Test the renaming of columns
Give columns a new name and check whether
they are stored correctly.
"""
# rename a column
self.tdata[2].rename('newname')
# check whether the new name is found
self.assertEqual(self.tdata.find('newname'), 2)
# rename the column again
self.tdata['newname'].rename('newnewname')
# check whether it is found again
self.assertEqual(self.tdata.find('newnewname'), 2)
def testGetFormat(self):
"""
Test the retrieval of column formats
Get the different column formats and check
whether they are correct
"""
# go over each column and check the format
self.assertEqual(self.tdata[0].get_format(), '% 5.1f')
self.assertEqual(self.tdata[1].get_format(), '% 5.1f')
self.assertEqual(self.tdata[2].get_format(), '% 6s')
self.assertEqual(self.tdata[3].get_format(), '%5i')
def testNoneHeader(self):
"""
Test the retrieval of column formats
Get the different column formats and check
whether they are correct
"""
self.assertEqual(str(self.tdata.header), '')
def testResetHeader(self):
"""
Reset the header
"""
# add something to the header
self.tdata.header.append(' A new header entry!')
# check whether it is still in the header
self.assertEqual(str(self.tdata.header), '# A new header entry!\n')
# make a new string representing the whole
# class and test against it
# hdata = '# A new header entry!\n' + self.data
self.tdata.header.reset()
self.assertEqual(str(self.tdata),self.data)
def testAppendHeader(self):
"""
Append something to the header
"""
# add something to the header
self.tdata.header.append(' A new header entry!')
# check whether it is still in the header
self.assertEqual(str(self.tdata.header), '# A new header entry!\n')
# make a new string representing the whole
# class and test against it
hdata = '# A new header entry!\n' + self.data
self.assertEqual(str(self.tdata),hdata)
def testToHTML(self):
"""
Write the instance as table to an HTML-file
"""
# check whether you can write to html
self.assert_(self.tdata.writetohtml())
# do it again, just to get the filename
html_file = self.tdata.writetohtml()
# remove the file
if os.path.isfile(html_file):
os.unlink(html_file)
# give a filename as input
html_file = 'my_html.html'
# check whether you can write to a dedicated file
self.assert_(self.tdata.writetohtml(html_file))
# remove the file
if os.path.isfile(html_file):
os.unlink(html_file)
# check whether you can give attributes
self.assert_(self.tdata.writetohtml(html_file,
tr_attr='id="my_tr"',
td_attr='id="my_td"'))
# remove the file
if os.path.isfile(html_file):
os.unlink(html_file)
def testToLatex(self):
"""
Write the instance to as table a latex-file
"""
# check whether you can write to a latex file
self.assert_(self.tdata.writetolatex())
# do it again, just to get the filename
latex_file = self.tdata.writetolatex()
# remove the file
if os.path.isfile(latex_file):
os.unlink(latex_file)
# give a filename as input
html_file = 'my_latex.tex'
# check whether you can write to a dedicated file
self.assert_(self.tdata.writetohtml(latex_file))
# remove the file
if os.path.isfile(latex_file):
os.unlink(latex_file)
class Test_AsciiDataII(unittest.TestCase):
"""
A second test class for the asciidata module
"""
def tmpFileFromString(self, data):
"""
Store a string into a temporary file
The method creates a named temporary file and writes
a string given on input into it.
The file reference to the temporary file is returned
for further use of it.
"""
import tempfile
# create an open test file
tfile = tempfile.NamedTemporaryFile()
# fill data into the test file and flush
tfile.write(data)
tfile.flush()
# return the file reference
return tfile
def testNullDefault(self):
"""
Test the default for 'None' in the input
As default there exist the strings '*', 'None',
'Null' and 'NULL' as markers for entries with
a None-value.
"""
# define the data
data = """ * 323.4 star 20
102.4 Null galaxy 21
834.1 343.7 NULL None"""
# derive the test file object
tfile = self.tmpFileFromString(data)
# derive the name of the test file object
testfile = tfile.name
# create the test instance
tdata = asciifunction.open(testfile)
# test the various representations of the
# None element such as '*', 'None', ...
self.assertEqual(tdata[0][0],None)
self.assertEqual(tdata[1][1],None)
self.assertEqual(tdata[2][2],None)
self.assertEqual(tdata[3][2],None)
# check one of the column types
self.assertEqual(tdata[0].get_type(), type(1.0))
def testNullInput(self):
"""
Test a non-default None value
The string can be given which should be interpreted
as a None entry in the table. This is tested
By loading in a table with a non-default
None value.
"""
# define the data
data = """ !! 323.4 star 20
102.4 123.5 galaxy !!
834.1 343.7 comet 25"""
# derive the test file object
tfile = self.tmpFileFromString(data)
# derive the name of the test file object
testfile = tfile.name
# create the test instance, specifying
# the null value
tdata = asciifunction.open(testfile, null='!!')
# test the correct loading of the None element
self.assertEqual(tdata[0][0],None)
self.assertEqual(tdata[3][1],None)
# convert the element back to a string
out_string = str(tdata)
# find the first Null string
first = out_string.find('!!')
# check that it is found
self.assert_(first > -1)
# find the second null string
second = out_string.find('!!', first+1)
# check that it is found
self.assert_(second > -1)
# find the second null string
third = out_string.find('!!',second+1)
# now it should not be in any more
self.failIf(third > -1)
def testCommentDefault(self):
"""
Test the default comment string
Test whether lines marked with the default
comment string are recognized correctly.
Also check whether comments at the beginning
of the file are inserted into the header,
and ignored within the file
"""
# define the data
data = """# This is a comment
#
* 323.4 star 20
# 102.4 Null galaxy 21
834.1 343.7 NULL None"""
# derive the test file object
tfile = self.tmpFileFromString(data)
# derive the name of the test file object
testfile = tfile.name
# create the test instance
tdata = asciifunction.open(testfile)
# check the number of columns
# and the number of rows
self.assertEqual(tdata.nrows, 2)
self.assertEqual(tdata.ncols, 4)
# test the various representations of the
# None element such as '*', 'None', ...
self.assertEqual(tdata[0][0],None)
self.assertEqual(tdata[2][1],None)
self.assertEqual(tdata[3][1],None)
# check one of the column types
self.assertEqual(tdata[0].get_type(), type(1.0))
# check whether it is still in the header
self.assertEqual(str(tdata.header), '# This is a comment\n#\n')
# convert the instance back to a string
out_string = str(tdata)
# find the header in the string
first = out_string.find('# This is a comment\n#\n')
self.assertEqual(first, 0)
# add something to the header
tdata.header.append(' A new header entry!')
# convert the instance back to a string
out_string = str(tdata)
# check the new, modified header in the string
first = out_string.find('# This is a comment\n#\n# A new header entry!\n')
self.assertEqual(first, 0)
# check for another string character
second = out_string.find('#', 26)
# now it should not be in any more
self.failIf(second > -1)
def testNewComment(self):
"""
Test the changing of the comment string
The comment string is changed to a different
value. Then the table is converted to a string.
The correct representation of the new comment
in the string is checked.
"""
# define the data
data = """# This is a comment
#
* 323.4 star 20
# 102.4 Null galaxy 21
834.1 343.7 NULL None"""
# derive the test file object
tfile = self.tmpFileFromString(data)
# derive the name of the test file object
testfile = tfile.name
# create the test instance
tdata = asciifunction.open(testfile)
# check the number of columns
# and the number of rows
self.assertEqual(tdata.nrows, 2)
# change the comment strng to '?'
tdata.newcomment_char('?')
# check whether it is still in the header
self.assertEqual(str(tdata.header), '? This is a comment\n?\n')
# convert the instance back to a string
out_string = str(tdata)
# find the header in the string
first = out_string.find('? This is a comment\n?\n')
self.assertEqual(first, 0)
# add something to the header
tdata.header.append('A new header entry!')
# convert the instance back to a string
out_string = str(tdata)
# check the new, modified header in the string
first = out_string.find('? This is a comment\n?\n?A new header entry!\n')
self.assertEqual(first, 0)
# check for another string character
second = out_string.find('?', 26)
# now it should not be in any more
self.failIf(second > -1)
def testCommentInput(self):
"""
Test an input comment string
Test whether lines marked with a non-default
comment string are recognized correctly.
Also check whether comments at the beginning
of the file are inserted into the header,
and ignored within the file
"""
# define the data
data = """@ This is a comment
@
* 323.4 star 20
@ 102.4 Null galaxy 21
834.1 343.7 NULL None"""
# derive the test file object
tfile = self.tmpFileFromString(data)
# derive the name of the test file object
testfile = tfile.name
# create the test instance
tdata = asciifunction.open(testfile, comment_char='@')
# check the number of columns
# and the number of rows
self.assertEqual(tdata.nrows, 2)
self.assertEqual(tdata.ncols, 4)
# test the various representations of the
# None element such as '*', 'None', ...
self.assertEqual(tdata[0][0],None)
self.assertEqual(tdata[2][1],None)
self.assertEqual(tdata[3][1],None)
# check one of the column types
self.assertEqual(tdata[0].get_type(), type(1.0))
# check whether it is still in the header
self.assertEqual(str(tdata.header), '@ This is a comment\n@\n')
# convert the instance back to a string
out_string = str(tdata)
# find the header in the string
first = out_string.find('@ This is a comment\n@\n')
self.assertEqual(first, 0)
# add something to the header
tdata.header.append('A new header entry!')
# convert the instance back to a string
out_string = str(tdata)
# check the new, modified header in the string
first = out_string.find('@ This is a comment\n@\n@A new header entry!\n')
self.assertEqual(first, 0)
# check for another string character
second = out_string.find('@', 26)
# now it should not be in any more
self.failIf(second > -1)
def testDelimiterInput(self):
"""
Test a non-default delimiter
Test whether data given with a non-default delimiter
is loaded correctly. Also assure that the non-default
delimiter is written out OK.
"""
# define the data
data = """# This is a comment
#
* | 323.4 | star | 20
# 102.4 | Null |galaxy | 21
834.1 |343.7 |NULL | None"""
# derive the test file object
tfile = self.tmpFileFromString(data)
# derive the name of the test file object
testfile = tfile.name
# create the test instance
tdata = asciifunction.open(testfile, delimiter='|')
# check the number of columns
# and the number of rows
self.assertEqual(tdata.nrows, 2)
self.assertEqual(tdata.ncols, 4)
# check one of the column types
self.assertEqual(tdata[0].get_type(), type(1.0))
# check whether it is still in the header
self.assertEqual(str(tdata.header), '# This is a comment\n#\n')
# convert the instance back to a string
out_string = str(tdata)
# find the header in the string
first = out_string.find('# This is a comment\n#\n')
self.assertEqual(first, 0)
# add something to the header
tdata.header.append('A new header entry!')
# convert the instance back to a string
out_string = str(tdata)
# check the new, modified header in the string
first = out_string.find('|')
self.assert_(first > -1)
def testNewDelimiter(self):
"""
Test the change of the delimiter
Read in a table, change the delimiter.
Then check whether the delimiter appears
as often as necessary.
"""
# define the data
data = """# This is a comment
#
* 323.4 star 20
102.4 Null galaxy 21
834.1 343.7 NULL None"""
# derive the test file object
tfile = self.tmpFileFromString(data)
# derive the name of the test file object
testfile = tfile.name
# create the test instance
tdata = asciifunction.open(testfile)
# assign a new delimiter
tdata.newdelimiter('?')
# convert the instance back to a string
out_string = str(tdata)
# count how often the new delimiter
# is in the string
tot_num = out_string.count('?')
# check the number
self.assertEqual(tot_num, 9)
def testIterator(self):
"""
Test the iterator ofer the columns and elements
"""
# define the data
data = """# This is a comment
#
1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0"""
# derive the test file object
tfile = self.tmpFileFromString(data)
# derive the name of the test file object
testfile = tfile.name
# create the test instance
tdata = asciifunction.open(testfile)
# initialize the sum
sum = 0.0
# iterate over columns
for columns in tdata:
# iterate over column elements
for elem in columns:
sum += elem
# check the number
self.assertEqual(sum, 12.0)
def testHeader(self):
"""
Test the header methods
"""
# define the data
data = """# This is a comment
#
1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0"""
# derive the test file object
tfile = self.tmpFileFromString(data)
# derive the name of the test file object
testfile = tfile.name
# create the test instance
tdata = asciifunction.open(testfile)
# check the length of the header
self.assertEqual(len(tdata.header), 2)
# check the length of the header
self.assertEqual(tdata.header[0], ' This is a comment\n')
tdata.header.reset()
# check the length of the header
self.assertEqual(len(tdata.header), 0)
# put a new, empty header item
tdata.header.append('')
# replace the empty item with a 'full' item
tdata.header[0] = ' Now with some content!!'
str_table = str(tdata)
first = str_table.find(' Now with some content!!')
# check the length of the header
self.assertEqual(first, 1)
# delete the only header entry
del tdata.header[0]
# check the length of the header
self.assertEqual(len(tdata.header), 0)
def testHeaderIterator(self):
"""
Test the header methods
"""
# define the data
data = """# This is the first comment
# This is the second comment
# This is the third comment
1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0
1.0 1.0 1.0 1.0"""
# derive the test file object
tfile = self.tmpFileFromString(data)
# derive the name of the test file object
testfile = tfile.name
# create the test instance
tdata = asciifunction.open(testfile)
# check the length of the header
self.assertEqual(len(tdata.header), 3)
# iterate over the header
for aline in tdata.header:
# check for non-zero entry
self.assert_(len(aline) > 0)
class Test_NullData(unittest.TestCase):
"""
A test class for the NullData class
"""
def setUp(self):
"""
Automatic set up for the class
set up data used in the tests.
setUp is called before each test function execution.
"""
# create a table with only null entries
self.tdata = asciifunction.create(4, 5)
# fill in some values
for ii in range(self.tdata.nrows):
# integers
self.tdata[0][ii] = ii
# floats
self.tdata[1][ii] = float(ii)
# a string
self.tdata[2][ii] = str(ii)+'a'
# another float
self.tdata[3][ii] = float(ii)*float(ii)
def testBasics(self):
"""
Basic tests on the nulldata-table
Check the dimension of the table,
fill in some data and check the
column format.
"""
# check the number of columns and rows
self.assertEqual(self.tdata.ncols, 4)
self.assertEqual(self.tdata.nrows, 5)
# check the column types
self.assertEqual(self.tdata[0].get_type(), type(1))
self.assertEqual(self.tdata[1].get_type(), type(1.0))
self.assertEqual(self.tdata[2].get_type(), type('test'))
self.assertEqual(self.tdata[3].get_type(), type(1.0))
def testToSEx(self):
"""
Check the transformation to SExtractor format
"""
import tempfile
# create an open test file
tfile = tempfile.NamedTemporaryFile()
# change some column names
self.tdata[0].rename('Seq')
self.tdata[1].rename('Float')
self.tdata[2].rename('String')
self.tdata[3].rename('Float2')
# change the format
self.tdata.toSExtractor()
# write the object to a file
self.tdata.writeto(tfile.name)
# read in the file to a new object
adata = asciifunction.open(tfile.name)
# check whether the new name
# is loaded, too
cindex = adata.find('Seq')
self.assertEqual(cindex, 0)
# check whether the new name
# is loaded, too
cindex = adata.find('Float')
self.assertEqual(cindex, 1)
# check whether the new name
# is loaded, too
cindex = adata.find('String')
self.assertEqual(cindex, 2)
# check whether the new name
# is loaded, too
cindex = adata.find('Float2')
self.assertEqual(cindex, 3)
def testWriteTo(self):
"""
Check the options for 'writeto()'
"""
import tempfile
# create an open test file
tfile = tempfile.NamedTemporaryFile()
# change some column names
self.tdata[0].rename('Seq')
self.tdata[1].rename('Float')
self.tdata[2].rename('String')
self.tdata[3].rename('Float2')
# write the object to a file
self.tdata.writeto(tfile.name, colInfo=1)
# read in the file to a new object
adata = asciifunction.open(tfile.name)
# check whether the new name
# is loaded, too
cindex = adata.find('Seq')
self.assertEqual(cindex, 0)
# check whether the new name
# is loaded, too
cindex = adata.find('Float')
self.assertEqual(cindex, 1)
# check whether the new name
# is loaded, too
cindex = adata.find('String')
self.assertEqual(cindex, 2)
# check whether the new name
# is loaded, too
cindex = adata.find('Float2')
self.assertEqual(cindex, 3)
class Test_StrangeInput(unittest.TestCase):
"""
A test class for strange input
"""
def setUp(self):
"""
Automatic set up for the class
set up data used in the tests.
setUp is called before each test function execution.
"""
# create a table with only null entries
self.tdata = asciifunction.create(1, 1)
def testBasics(self):
"""
Basic tests on the nulldata-table
Check the dimension of the table,
fill in some data and check the
column format.
"""
# fille a strange data in
self.tdata[0][0] = '3E6'
# check that the content is correct
self.assertEqual(self.tdata[0].get_format(), '% 6.0e')
class Test_SExData(unittest.TestCase):
"""
A test class for the NullData class
"""
def setUp(self):
"""
Automatic set up for the class
set up data used in the tests.
setUp is called before each test function execution.
"""
# create a table with only null entries
self.tdata = asciifunction.createSEx(4, 5)
# fill in some values
for ii in range(self.tdata.nrows):
# integers
self.tdata[0][ii] = ii
# floats
self.tdata[1][ii] = float(ii)
# a string
self.tdata[2][ii] = str(ii)+'a'
# another float
self.tdata[3][ii] = float(ii)*float(ii)
def testBasics(self):
"""
Basic tests on the nulldata-table
Check the dimension of the table,
fill in some data and check the
column format.
"""
# check the number of columns and rows
self.assertEqual(self.tdata.ncols, 4)
self.assertEqual(self.tdata.nrows, 5)
# check the column types
self.assertEqual(self.tdata[0].get_type(), type(1))
self.assertEqual(self.tdata[1].get_type(), type(1.0))
self.assertEqual(self.tdata[2].get_type(), type('test'))
self.assertEqual(self.tdata[3].get_type(), type(1.0))
def testOutFormat(self):
"""
Test the write out format
"""
import tempfile
# create an open test file
tfile = tempfile.NamedTemporaryFile()
# change some column names
self.tdata[0].rename('Seq')
self.tdata[1].rename('Float')
self.tdata[2].rename('String')
self.tdata[3].rename('Float2')
# write the object to a file
self.tdata.writeto(tfile.name)
# read in the file to a new object
adata = asciifunction.open(tfile.name)
# check whether the new name
# is loaded, too
cindex = adata.find('Seq')
self.assertEqual(cindex, 0)
# check whether the new name
# is loaded, too
cindex = adata.find('Float')
self.assertEqual(cindex, 1)
# check whether the new name
# is loaded, too
cindex = adata.find('String')
self.assertEqual(cindex, 2)
# check whether the new name
# is loaded, too
cindex = adata.find('Float2')
self.assertEqual(cindex, 3)
class Test_AsciiFits(unittest.TestCase):
"""
A test class for all fits related methods
"""
def setUp(self):
"""
Store a string into a temporary file
The method creates a named temporary file and writes
a string given on input into it.
The file reference to the temporary file is returned
for further use of it.
"""
import tempfile
# define the data
data = """ 105.2 323.4 star 20
102.4 529.0 galaxy 21
834.1 343.7 galaxy 23"""
# create an open test file
self.tfile = tempfile.NamedTemporaryFile()
# fill data into the test file and flush
self.tfile.write(data)
self.tfile.flush()
# create the test instance
self.tdata = asciifunction.open(self.tfile.name)
def testBasics(self):
"""
Basic tests on the nulldata-table
Check the dimension of the table,
fill in some data and check the
column format.
"""
import tempfile
from astropy.io import fits as pyfits
# write the AsciiData instance to a fits file
fits_name = self.tdata.writetofits()
# check whether the fits file exists
self.assert_(os.path.isfile(fits_name))
# open the fits file
fits = pyfits.open(fits_name)
# check the number of fits HDU's
self.assertEqual(len(fits), 2)
# extract the data
tdata = fits[1].data
# check the number of columns's
self.assertEqual(len(tdata.names), 4)
# check the number of rows
self.assertEqual(len(tdata.field(0)), 3)
self.assertAlmostEqual(tdata.field(0)[0], 105.2, 4)
self.assertAlmostEqual(tdata.field(0)[2], 834.1, 4)
self.assertAlmostEqual(tdata.field(1)[1], 529.0, 4)
self.assertEqual(tdata.field(2)[0].strip(), 'star')
self.assertEqual(tdata.field(2)[1], 'galaxy')
self.assertEqual(tdata.field(3)[0], 20)
self.assertEqual(tdata.field(3)[1], 21)
fits.close()
class Test_AsciiSort(unittest.TestCase):
"""
A test class for the sorting
"""
def setUp(self):
"""
Store a string into a temporary file
The method creates a named temporary file and writes
a string given on input into it.
The file reference to the temporary file is returned
for further use of it.
"""
import tempfile
# define the data
data = """1.0 20.0 15.0 aa
13.0 10.0 7.0 bb
1.0 30.0 7.0 cc
26.0 10.0 1.0 dd"""
# define the number of rows
self.NROWS_1 = 10000
self.NROWS_2 = 1000
# create an open test file
self.tfile = tempfile.NamedTemporaryFile()
# fill data into the test file and flush
self.tfile.write(data)
self.tfile.flush()
# create the test instance
self.tdata = asciifunction.open(self.tfile.name)
def testThroughoutAscending(self):
"""
Test sorting a long table with radnoms
"""
import random
# create an empty table
self.tdata = asciifunction.create(2, self.NROWS_1)
# fill the table with randoms
for index in range(self.NROWS_1):
self.tdata[0][index] = random.random()
# execute the sorting command
self.tdata.sort(0, descending=0, ordered=0)
# go along the column, check the sorting
for index in range(1,self.tdata.nrows):
self.assert_(self.tdata[0][index] >= self.tdata[0][index-1])
def testThroughoutDescending(self):
"""
Test sorting a long table with radnoms
"""
import random
# create an empty table
self.tdata = asciifunction.create(2, self.NROWS_1)
# fill the table with randoms
for index in range(self.NROWS_1):
self.tdata[0][index] = random.random()
# execute the sorting command
self.tdata.sort(0, 1, ordered=0)
# go along the column, check the sorting
for index in range(1,self.tdata.nrows):
self.assert_(self.tdata[0][index] <= self.tdata[0][index-1])
def testAscendingSort(self):
"""
Test for sorting in ascending order
"""
# execute the sorting command
self.tdata.sort(0)
# go along the column, check the sorting
for index in range(1,self.tdata.nrows):
self.assert_(self.tdata[0][index] >= self.tdata[0][index-1])
def testDescendingSort(self):
"""
Test for sorting in descending order
"""
# execute the sorting command
self.tdata.sort(0, 1)
# go along the column, check the sorting
for index in range(1,self.tdata.nrows):
self.assert_(self.tdata[0][index] <= self.tdata[0][index-1])
def testColnameSort(self):
"""
Test for sorting with a column name
"""
# execute the sorting command
self.tdata.sort('column2')
# go along the column, check the sorting
for index in range(1,self.tdata.nrows):
self.assert_(self.tdata['column2'][index] >= self.tdata['column2'][index-1])
def testCharAscSort(self):
"""
Test for ascending sort on a string column
"""
# execute the sorting command
self.tdata.sort('column3')
# go along the column, check the sorting
for index in range(1,self.tdata.nrows):
self.assert_(self.tdata['column3'][index] >= self.tdata['column3'][index-1])
def testCharDesSort(self):
"""
Test for descending sort on a string column
"""
# execute the sorting command
self.tdata.sort('column3', 1)
# go along the column, check the sorting
for index in range(1,self.tdata.nrows):
self.assert_(self.tdata['column3'][index] <= self.tdata['column3'][index-1])
def testCorrAscSort(self):
"""
Test for ascending sort on two columns
"""
# execute the sorting command on the secondary column
self.tdata.sort(1, 0, 1)
# execute the sorting command on the primary column
self.tdata.sort(0, 0, 1)
# go along the column
for index in range(1,self.tdata.nrows):
value1 = self.tdata[0][index]
value2 = self.tdata[0][index-1]
# check the sorting on the primary column
self.assert_(value1 >= value2)
# in case of equal values in the primary column
if self.tdata[0][index] == self.tdata[0][index-1]:
value1 = self.tdata[1][index]
value2 = self.tdata[1][index-1]
# check the sorting on the primary column
self.assert_(value1 >= value2)
def testThroughCorrAscSort(self):
"""
Test for ascending sort on two columns
"""
import random
# create an empty table
self.tdata = asciifunction.create(2, self.NROWS_2)
# fill the table with randoms
for index in range(self.NROWS_2):
# the second column is filled with
# all random variable
self.tdata[1][index] = random.random()
# in the first column, two rows have
# the identical random variable
if not index % 2:
old = random.random()
self.tdata[0][index] = old
# execute the sorting command on the secondary column
self.tdata.sort(1, 0, 1)
# execute the sorting command on the primary column
self.tdata.sort(0, 0, 1)
# go along the column
for index in range(1,self.tdata.nrows):
value1 = self.tdata[0][index]
value2 = self.tdata[0][index-1]
# check the sorting on the primary column
self.assert_(value1 >= value2)
# in case of equal values in the primary column
if self.tdata[0][index] == self.tdata[0][index-1]:
value1 = self.tdata[1][index]
value2 = self.tdata[1][index-1]
# check the sorting on the primary column
self.assert_(value1 >= value2)
def testCorrDesSort(self):
"""
Test for ascending sort on two columns
"""
# execute the sorting command on the secondary column
self.tdata.sort(2, 1, 1)
# execute the sorting command on the primary column
self.tdata.sort(1, 1, 1)
# go along the column
for index in range(1,self.tdata.nrows):
value1 = self.tdata[1][index]
value2 = self.tdata[1][index-1]
# check the sorting on the primary column
self.assert_(value1 <= value2)
# in case of equal values in the primary column
if self.tdata[1][index] == self.tdata[1][index-1]:
value1 = self.tdata[2][index]
value2 = self.tdata[2][index-1]
# check the sorting on the primary column
self.assert_(value1 <= value2)
def testThroughCorrDesSort(self):
"""
Test for ascending sort on two columns
"""
import random
# create an empty table
self.tdata = asciifunction.create(2, self.NROWS_2)
# fill the table with randoms
for index in range(self.NROWS_2):
# the second column is filled with
# all random variable
self.tdata[1][index] = random.random()
# in the first column, two rows have
# the identical random variable
if not index % 2:
old = random.random()
self.tdata[0][index] = old
# execute the sorting command on the secondary column
self.tdata.sort(1, 1, 1)
# execute the sorting command on the primary column
self.tdata.sort(0, 1, 1)
# go along the column
for index in range(1,self.tdata.nrows):
value1 = self.tdata[0][index]
value2 = self.tdata[0][index-1]
# check the sorting on the primary column
self.assert_(value1 <= value2)
# in case of equal values in the primary column
if self.tdata[0][index] == self.tdata[0][index-1]:
value1 = self.tdata[1][index]
value2 = self.tdata[1][index-1]
# check the sorting on the primary column
self.assert_(value1 <= value2)
class Test_AsciiStrip(unittest.TestCase):
"""
A test class for the stripping functions
"""
def setUp(self):
"""
Store a string into a temporary file
The method creates a named temporary file and writes
a string given on input into it.
The file reference to the temporary file is returned
for further use of it.
"""
import tempfile
# define the data
data = """2.0 2.0 2.0 2.0
Null Null Null Null
13.0 10.0 7.0 2.3
1.0 30.0 7.0 2.3
1.0 1.0 1.0 1.0"""
# create an open test file
self.tfile = tempfile.NamedTemporaryFile()
# fill data into the test file and flush
self.tfile.write(data)
self.tfile.flush()
# create the test instance
self.tdata = asciifunction.open(self.tfile.name)
def testRstrip(self):
orig_nrows = self.tdata.nrows
# strips off the last row
self.tdata.rstrip(1.0)
self.assertEqual(self.tdata.nrows,orig_nrows-1)
# table should remain the same
self.tdata.rstrip(30.0)
self.assertEqual(self.tdata.nrows,orig_nrows-1)
# adding two emty rows
self.tdata.insert(2,self.tdata.nrows)
self.assertEqual(self.tdata.nrows,orig_nrows+1)
# and stripping them off
self.tdata.rstrip()
self.assertEqual(self.tdata.nrows,orig_nrows-1)
def testLstrip(self):
orig_nrows = self.tdata.nrows
# strip off the first row
self.tdata.lstrip(2.0)
self.assertEqual(self.tdata.nrows,orig_nrows-1)
self.tdata.lstrip()
self.assertEqual(self.tdata.nrows,orig_nrows-2)
if __name__ == '__main__':
suite = unittest.makeSuite(Test_AsciiData)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.makeSuite(Test_AsciiDataII)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.makeSuite(Test_NullData)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.makeSuite(Test_AsciiFits)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.makeSuite(Test_AsciiSort)
unittest.TextTestRunner(verbosity=2).run(suite)
suite = unittest.makeSuite(Test_AsciiStrip)
unittest.TextTestRunner(verbosity=2).run(suite)
# unittest.main()
|
spacetelescopeREPO_NAMEstsdas_strippedPATH_START.@stsdas_stripped_extracted@stsdas_stripped-master@stsdas@pkg@analysis@slitless@axe@axe_asciidata@asciidata_test.py@.PATH_END.py
|
{
"filename": "_anglesrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatterpolar/marker/_anglesrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class AnglesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="anglesrc", parent_name="scatterpolar.marker", **kwargs
):
super(AnglesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatterpolar@marker@_anglesrc.py@.PATH_END.py
|
{
"filename": "boxedfi.py",
"repo_name": "amusecode/amuse",
"repo_path": "amuse_extracted/amuse-main/examples/publications/2013MNRAS.429..895P/disk_script/boxedfi.py",
"type": "Python"
}
|
from amuse.units import nbody_system
from amuse.units import units
import amuse.datamodel as core
from amuse.community.fi.interface import Fi
from amuse.ext.gasplummer import MakePlummerGasModel
class BoxedFi(Fi):
def __init__(self, *args, **kargs):
Fi.__init__(self, *args, **kargs)
self.escapers=core.Particles(0)
def evolve_model(self, *args, **kargs):
self.stopping_conditions.out_of_box_detection.enable()
outofbox=0.9*self.parameters.periodic_box_size/2
self.parameters.stopping_conditions_out_of_box_size = outofbox
# Fi.evolve_model(self,*args,**kargs)
self.overridden().evolve_model(*args,**kargs)
while self.stopping_conditions.out_of_box_detection.is_set():
escapers=self.particles.select_array(
lambda x,y,z: (x**2+y**2+z**2 > outofbox**2), ["x","y","z"])
print "***", len(escapers)
if len(escapers)>0:
self.escapers.add_particles(escapers)
self.particles.remove_particles(escapers)
# Fi.evolve_model(self,*args, **kargs)
self.overridden().evolve_model(*args,**kargs)
if __name__=="__main__":
Ngas=1000
conv = nbody_system.nbody_to_si(100 | units.MSun, 1 | units.parsec)
dt=conv.to_si(1|nbody_system.time)/100
print dt.in_(units.Myr)
parts=MakePlummerGasModel(Ngas,convert_nbody=conv).result
parts.h_smooth=0 | units.parsec
outofbox=0.9*10. | units.parsec
escapers=parts.select_array(
lambda x,y,z: (x**2+y**2+z**2 > outofbox**2), ["x","y","z"])
print "**",len(escapers),outofbox.in_(units.parsec)
parts.remove_particles(escapers)
print len(parts)
sph=BoxedFi(convert_nbody=conv,use_gl=True)
sph.parameters.periodic_box_size=20. | units.parsec
sph.parameters.timestep=dt
sph.parameters.self_gravity_flag=False
sph.gas_particles.add_particles(parts)
sph.start_viewer()
sph.evolve_model(dt*1000)
print len(sph.gas_particles)
print len(sph.particles)
|
amusecodeREPO_NAMEamusePATH_START.@amuse_extracted@amuse-main@examples@publications@2013MNRAS.429..895P@disk_script@boxedfi.py@.PATH_END.py
|
{
"filename": "_fontconfig_pattern.py",
"repo_name": "matplotlib/matplotlib",
"repo_path": "matplotlib_extracted/matplotlib-main/lib/matplotlib/_fontconfig_pattern.py",
"type": "Python"
}
|
"""
A module for parsing and generating `fontconfig patterns`_.
.. _fontconfig patterns:
https://www.freedesktop.org/software/fontconfig/fontconfig-user.html
"""
# This class logically belongs in `matplotlib.font_manager`, but placing it
# there would have created cyclical dependency problems, because it also needs
# to be available from `matplotlib.rcsetup` (for parsing matplotlibrc files).
from functools import lru_cache, partial
import re
from pyparsing import (
Group, Optional, ParseException, Regex, StringEnd, Suppress, ZeroOrMore, oneOf)
_family_punc = r'\\\-:,'
_family_unescape = partial(re.compile(r'\\(?=[%s])' % _family_punc).sub, '')
_family_escape = partial(re.compile(r'(?=[%s])' % _family_punc).sub, r'\\')
_value_punc = r'\\=_:,'
_value_unescape = partial(re.compile(r'\\(?=[%s])' % _value_punc).sub, '')
_value_escape = partial(re.compile(r'(?=[%s])' % _value_punc).sub, r'\\')
_CONSTANTS = {
'thin': ('weight', 'light'),
'extralight': ('weight', 'light'),
'ultralight': ('weight', 'light'),
'light': ('weight', 'light'),
'book': ('weight', 'book'),
'regular': ('weight', 'regular'),
'normal': ('weight', 'normal'),
'medium': ('weight', 'medium'),
'demibold': ('weight', 'demibold'),
'semibold': ('weight', 'semibold'),
'bold': ('weight', 'bold'),
'extrabold': ('weight', 'extra bold'),
'black': ('weight', 'black'),
'heavy': ('weight', 'heavy'),
'roman': ('slant', 'normal'),
'italic': ('slant', 'italic'),
'oblique': ('slant', 'oblique'),
'ultracondensed': ('width', 'ultra-condensed'),
'extracondensed': ('width', 'extra-condensed'),
'condensed': ('width', 'condensed'),
'semicondensed': ('width', 'semi-condensed'),
'expanded': ('width', 'expanded'),
'extraexpanded': ('width', 'extra-expanded'),
'ultraexpanded': ('width', 'ultra-expanded'),
}
@lru_cache # The parser instance is a singleton.
def _make_fontconfig_parser():
def comma_separated(elem):
return elem + ZeroOrMore(Suppress(",") + elem)
family = Regex(fr"([^{_family_punc}]|(\\[{_family_punc}]))*")
size = Regex(r"([0-9]+\.?[0-9]*|\.[0-9]+)")
name = Regex(r"[a-z]+")
value = Regex(fr"([^{_value_punc}]|(\\[{_value_punc}]))*")
prop = Group((name + Suppress("=") + comma_separated(value)) | oneOf(_CONSTANTS))
return (
Optional(comma_separated(family)("families"))
+ Optional("-" + comma_separated(size)("sizes"))
+ ZeroOrMore(":" + prop("properties*"))
+ StringEnd()
)
# `parse_fontconfig_pattern` is a bottleneck during the tests because it is
# repeatedly called when the rcParams are reset (to validate the default
# fonts). In practice, the cache size doesn't grow beyond a few dozen entries
# during the test suite.
@lru_cache
def parse_fontconfig_pattern(pattern):
"""
Parse a fontconfig *pattern* into a dict that can initialize a
`.font_manager.FontProperties` object.
"""
parser = _make_fontconfig_parser()
try:
parse = parser.parseString(pattern)
except ParseException as err:
# explain becomes a plain method on pyparsing 3 (err.explain(0)).
raise ValueError("\n" + ParseException.explain(err, 0)) from None
parser.resetCache()
props = {}
if "families" in parse:
props["family"] = [*map(_family_unescape, parse["families"])]
if "sizes" in parse:
props["size"] = [*parse["sizes"]]
for prop in parse.get("properties", []):
if len(prop) == 1:
prop = _CONSTANTS[prop[0]]
k, *v = prop
props.setdefault(k, []).extend(map(_value_unescape, v))
return props
def generate_fontconfig_pattern(d):
"""Convert a `.FontProperties` to a fontconfig pattern string."""
kvs = [(k, getattr(d, f"get_{k}")())
for k in ["style", "variant", "weight", "stretch", "file", "size"]]
# Families is given first without a leading keyword. Other entries (which
# are necessarily scalar) are given as key=value, skipping Nones.
return (",".join(_family_escape(f) for f in d.get_family())
+ "".join(f":{k}={_value_escape(str(v))}"
for k, v in kvs if v is not None))
|
matplotlibREPO_NAMEmatplotlibPATH_START.@matplotlib_extracted@matplotlib-main@lib@matplotlib@_fontconfig_pattern.py@.PATH_END.py
|
{
"filename": "verifyPtc.py",
"repo_name": "lsst/cp_verify",
"repo_path": "cp_verify_extracted/cp_verify-main/python/lsst/cp/verify/verifyPtc.py",
"type": "Python"
}
|
# This file is part of cp_verify.
#
# Developed for the LSST Data Management System.
# This product includes software developed by the LSST Project
# (http://www.lsst.org).
# See the COPYRIGHT file at the top-level directory of this distribution
# for details of code ownership.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import lsst.pex.config as pexConfig
from scipy.optimize import least_squares
from .verifyCalib import CpVerifyCalibConfig, CpVerifyCalibTask, CpVerifyCalibConnections
__all__ = ['CpVerifyPtcConfig', 'CpVerifyPtcTask']
class CpVerifyPtcConfig(CpVerifyCalibConfig,
pipelineConnections=CpVerifyCalibConnections):
"""Inherits from base CpVerifyCalibConfig."""
gainThreshold = pexConfig.Field(
dtype=float,
doc="Maximum percentage difference between PTC gain and nominal amplifier gain.",
default=5.0,
)
noiseThreshold = pexConfig.Field(
dtype=float,
doc="Maximum percentage difference between PTC readout noise and nominal "
"amplifier readout noise.",
default=5.0,
)
turnoffThreshold = pexConfig.Field(
dtype=float,
doc="Minimun full well requirement (in electrons). To be compared with the "
"reported PTC turnoff per amplifier.",
default=90000,
)
a00MinITL = pexConfig.Field(
dtype=float,
doc="Minimum a00 (c.f., Astier+19) for ITL CCDs.",
default=-4.56e-6,
)
a00MaxITL = pexConfig.Field(
dtype=float,
doc="Maximum a00 (c.f., Astier+19) for ITL CCDs.",
default=6.91e-7,
)
a00MinE2V = pexConfig.Field(
dtype=float,
doc="Minimum a00 (c.f., Astier+19) for E2V CCDs.",
default=-3.52e-6,
)
a00MaxE2V = pexConfig.Field(
dtype=float,
doc="Maximum a00 (c.f., Astier+19) for E2V CCDs.",
default=-2.61e-6,
)
def setDefaults(self):
super().setDefaults()
self.stageName = 'PTC'
def linearModel(x, m, b):
"""A linear model.
"""
return m*x + b
def modelResidual(p, x, y):
"""Model residual for fit below.
"""
return y - linearModel(x, *p)
class CpVerifyPtcTask(CpVerifyCalibTask):
"""PTC verification sub-class, implementing the verify method.
"""
ConfigClass = CpVerifyPtcConfig
_DefaultName = 'cpVerifyPtc'
def detectorStatistics(self, inputCalib, camera=None):
"""Calculate detector level statistics from the calibration.
Parameters
----------
inputCalib : `lsst.ip.isr.IsrCalib`
The calibration to verify.
camera : `lsst.afw.cameraGeom.Camera`, optional
Input camera to get detectors from.
Returns
-------
outputStatistics : `dict` [`str`, scalar]
A dictionary of the statistics measured and their values.
"""
return {}
def amplifierStatistics(self, inputCalib, camera=None):
"""Calculate detector level statistics from the calibration.
Parameters
----------
inputCalib : `lsst.ip.isr.IsrCalib`
The calibration to verify.
camera : `lsst.afw.cameraGeom.Camera`, optional
Input camera to get detectors from.
Returns
-------
outputStatistics : `dict` [`str`, scalar]
A dictionary of the statistics measured and their values.
"""
calibMetadata = inputCalib.getMetadata().toDict()
detId = calibMetadata['DETECTOR']
detector = camera[detId]
ptcFitType = calibMetadata['PTC_FIT_TYPE']
outputStatistics = {amp.getName(): {} for amp in detector}
for amp in detector:
ampName = amp.getName()
calibGain = inputCalib.gain[ampName]
outputStatistics[ampName]['PTC_GAIN'] = float(calibGain)
outputStatistics[ampName]['AMP_GAIN'] = amp.getGain()
outputStatistics[ampName]['PTC_NOISE'] = float(inputCalib.noise[ampName])
outputStatistics[ampName]['AMP_NOISE'] = amp.getReadNoise()
outputStatistics[ampName]['PTC_TURNOFF'] = float(inputCalib.ptcTurnoff[ampName])
outputStatistics[ampName]['PTC_FIT_TYPE'] = ptcFitType
outputStatistics[ampName]['PTC_ROW_MEAN_VARIANCE'] = inputCalib.rowMeanVariance[ampName].tolist()
outputStatistics[ampName]['PTC_MAX_RAW_MEANS'] = float(np.nanmax(inputCalib.rawMeans[ampName]))
# To plot Covs[ij] vs flux
rawFlux = inputCalib.rawMeans[ampName].tolist()
outputStatistics[ampName]['PTC_RAW_MEANS'] = rawFlux
rawVars = inputCalib.rawVars[ampName].tolist()
outputStatistics[ampName]['PTC_RAW_VARIANCE'] = rawVars
finalFlux = inputCalib.finalMeans[ampName].tolist()
outputStatistics[ampName]['PTC_FINAL_MEANS'] = finalFlux
finalVars = inputCalib.finalVars[ampName].tolist()
outputStatistics[ampName]['PTC_FINAL_VARIANCE'] = finalVars
mask = inputCalib.expIdMask[ampName].tolist()
outputStatistics[ampName]['PTC_EXP_ID_MASK'] = mask
covs = inputCalib.covariances[ampName]
outputStatistics[ampName]['PTC_COV_10'] = covs[:, 1, 0].tolist()
outputStatistics[ampName]['PTC_COV_01'] = covs[:, 0, 1].tolist()
outputStatistics[ampName]['PTC_COV_11'] = covs[:, 1, 1].tolist()
outputStatistics[ampName]['PTC_COV_20'] = covs[:, 2, 0].tolist()
outputStatistics[ampName]['PTC_COV_02'] = covs[:, 0, 2].tolist()
# Calculate and save the slopes and offsets from Covs[ij] vs flux
keys = ['PTC_COV_10', 'PTC_COV_01', 'PTC_COV_11', 'PTC_COV_20',
'PTC_COV_02']
maskedFlux = np.array(rawFlux)[mask]
for key in keys:
maskedCov = np.array(outputStatistics[ampName][key])[mask]
linearFit = least_squares(modelResidual, [1., 0.0],
args=(np.array(maskedFlux), np.array(maskedCov)),
loss='cauchy')
slopeKey = key + '_FIT_SLOPE'
offsetKey = key + '_FIT_OFFSET'
successKey = key + '_FIT_SUCCESS'
outputStatistics[ampName][slopeKey] = float(linearFit.x[0])
outputStatistics[ampName][offsetKey] = float(linearFit.x[1])
outputStatistics[ampName][successKey] = linearFit.success
if ptcFitType == 'EXPAPPROXIMATION':
outputStatistics[ampName]['PTC_BFE_A00'] = float(inputCalib.ptcFitPars[ampName][0])
if ptcFitType == 'FULLCOVARIANCE':
outputStatistics[ampName]['PTC_BFE_A00'] = float(inputCalib.aMatrix[ampName][0][0])
# Test from eo_pipe: github.com/lsst-camera-dh/eo-pipe;
# ptcPlotTask.py
# Slope of [variance of means of rows](electrons^2)
# vs [2*signal(electrons)/numCols]
numCols = amp.getBBox().width
mask = inputCalib.expIdMask[ampName]
rowMeanVar = inputCalib.rowMeanVariance[ampName][mask]*calibGain**2
signal = inputCalib.rawMeans[ampName][mask]*calibGain
try:
slope = sum(rowMeanVar) / sum(2.*signal/numCols)
except ZeroDivisionError:
slope = np.nan
outputStatistics[ampName]['PTC_ROW_MEAN_VARIANCE_SLOPE'] = float(slope)
return outputStatistics
def verify(self, calib, statisticsDict, camera=None):
"""Verify that the calibration meets the verification criteria.
Parameters
----------
inputCalib : `lsst.ip.isr.IsrCalib`
The calibration to verify.
statisticsDictionary : `dict` [`str`, `dict` [`str`, scalar]],
Dictionary of measured statistics. The inner dictionary
should have keys that are statistic names (`str`) with
values that are some sort of scalar (`int` or `float` are
the mostly likely types).
camera : `lsst.afw.cameraGeom.Camera`, optional
Input camera to get detectors from.
Returns
-------
outputStatistics : `dict` [`str`, `dict` [`str`, `bool`]]
A dictionary indexed by the amplifier name, containing
dictionaries of the verification criteria.
success : `bool`
A boolean indicating whether all tests have passed.
"""
verifyStats = {}
success = True
calibMetadata = calib.getMetadata().toDict()
detId = calibMetadata['DETECTOR']
detector = camera[detId]
ptcFitType = calibMetadata['PTC_FIT_TYPE']
# 'DET_SER' is of the form 'ITL-3800C-229'
detVendor = calibMetadata['DET_SER'].split('-')[0]
for amp in detector:
verify = {}
ampName = amp.getName()
calibGain = calib.gain[ampName]
diffGain = (np.abs(calibGain - amp.getGain()) / amp.getGain())*100
diffNoise = (np.abs(calib.noise[ampName] - amp.getReadNoise()) / amp.getReadNoise())*100
# DMTN-101: 16.1 and 16.2
# The fractional relative difference between the fitted PTC and the
# nominal amplifier gain and readout noise values should be less
# than a certain threshold (default: 5%).
verify['PTC_GAIN'] = bool(diffGain < self.config.gainThreshold)
verify['PTC_NOISE'] = bool(diffNoise < self.config.noiseThreshold)
# Check that the noises measured in cpPtcExtract do not evolve
# as a function of flux.
# We check that the reduced chi squared statistic between the
# noises and the mean of the noises less than 1.25 sigmas
mask = calib.expIdMask[ampName]
noiseList = calib.noiseList[ampName][mask]
expectedNoiseList = np.zeros_like(noiseList) + np.mean(noiseList)
chiSquared = np.sum((noiseList - expectedNoiseList)**2 / np.std(noiseList))
reducedChiSquared = chiSquared / len(noiseList)
verify['NOISE_SIGNAL_INDEPENDENCE'] = bool(reducedChiSquared < 1.25)
# DMTN-101: 16.3
# Check that the measured PTC turnoff is at least greater than the
# full-well requirement of 90k e-.
turnoffCut = self.config.turnoffThreshold
verify['PTC_TURNOFF'] = bool(calib.ptcTurnoff[ampName]*calibGain > turnoffCut)
# DMTN-101: 16.4
# Check the a00 value (brighter-fatter effect).
# This is a purely electrostatic parameter that should not change
# unless voltages are changed (e.g., parallel, bias voltages).
# Check that the fitted a00 parameter per CCD vendor is within a
# range motivated by measurements on data (DM-30171).
if ptcFitType in ['EXPAPPROXIMATION', 'FULLCOVARIANCE']:
# a00 is a fit parameter from these models.
if ptcFitType == 'EXPAPPROXIMATION':
a00 = calib.ptcFitPars[ampName][0]
else:
a00 = calib.aMatrix[ampName][0][0]
if detVendor == 'ITL':
a00Max = self.config.a00MaxITL
a00Min = self.config.a00MinITL
verify['PTC_BFE_A00'] = bool(a00 > a00Min and a00 < a00Max)
elif detVendor == 'E2V':
a00Max = self.config.a00MaxE2V
a00Min = self.config.a00MinE2V
verify['PTC_BFE_A00'] = bool(a00 > a00Min and a00 < a00Max)
else:
raise RuntimeError(f"Detector type {detVendor} not one of 'ITL' or 'E2V'")
# Overall success among all tests for this amp.
verify['SUCCESS'] = bool(np.all(list(verify.values())))
if verify['SUCCESS'] is False:
success = False
verifyStats[ampName] = verify
return {'AMP': verifyStats}, bool(success)
def repackStats(self, statisticsDict, dimensions):
# docstring inherited
rows = {}
rowList = []
matrixRowList = None
if self.config.useIsrStatistics:
mjd = statisticsDict["ISR"]["MJD"]
else:
mjd = np.nan
rowBase = {
"instrument": dimensions["instrument"],
"detector": dimensions["detector"],
"mjd": mjd,
}
# AMP results:
for ampName, stats in statisticsDict["AMP"].items():
rows[ampName] = {}
rows[ampName].update(rowBase)
rows[ampName]["amplifier"] = ampName
for key, value in stats.items():
rows[ampName][f"{self.config.stageName}_{key}"] = value
# VERIFY results
for ampName, stats in statisticsDict["VERIFY"]["AMP"].items():
for key, value in stats.items():
rows[ampName][f"{self.config.stageName}_VERIFY_{key}"] = value
# pack final list
for ampName, stats in rows.items():
rowList.append(stats)
return rowList, matrixRowList
|
lsstREPO_NAMEcp_verifyPATH_START.@cp_verify_extracted@cp_verify-main@python@lsst@cp@verify@verifyPtc.py@.PATH_END.py
|
{
"filename": "test_summarymetrics.py",
"repo_name": "lsst/rubin_sim",
"repo_path": "rubin_sim_extracted/rubin_sim-main/tests/maf/test_summarymetrics.py",
"type": "Python"
}
|
import unittest
import healpy as hp
import numpy as np
import rubin_sim.maf.metrics as metrics
class TestSummaryMetrics(unittest.TestCase):
def test_identity_metric(self):
"""Test identity metric."""
dv = np.arange(0, 10, 0.5)
dv = np.array(list(zip(dv)), dtype=[("testdata", "float")])
testmetric = metrics.IdentityMetric("testdata")
np.testing.assert_equal(testmetric.run(dv), dv["testdata"])
def testf_o_nv(self):
"""
Test the fONv metric.
"""
nside = 128
npix = hp.nside2npix(nside)
names = ["metricdata"]
types = [int]
data = np.zeros(npix, dtype=list(zip(names, types)))
data["metricdata"] += 826
metric = metrics.FONv(col="ack", nside=nside, n_visit=825, asky=18000.0)
slice_point = {"sid": 0}
result = metric.run(data, slice_point)
# result is recarray with 'min' and 'median' number of visits
# over the Asky area.
# All pixels had 826 visits, so that is min and median here.
min_nvis = result["value"][np.where(result["name"] == "MinNvis")]
median_nvis = result["value"][np.where(result["name"] == "MedianNvis")]
self.assertEqual(min_nvis, 826)
self.assertEqual(median_nvis, 826)
# Now update so that 13k of sky is 826, rest 0.
deginsph = 41253
npix_nk = int(npix * (13000.0 / deginsph))
data["metricdata"] = 0
data["metricdata"][:npix_nk] = 826
result = metric.run(data, slice_point)
min_nvis = result["value"][np.where(result["name"] == "MinNvis")]
median_nvis = result["value"][np.where(result["name"] == "MedianNvis")]
self.assertEqual(min_nvis, 0)
self.assertEqual(median_nvis, 826)
def testf_o_area(self):
"""Test fOArea metric."""
nside = 128
npix = hp.nside2npix(nside)
names = ["metricdata"]
types = [int]
data = np.zeros(npix, dtype=list(zip(names, types)))
data["metricdata"] += 826
metric = metrics.FOArea(col="ack", nside=nside, n_visit=825, asky=18000.0)
slice_point = {"sid": 0}
result = metric.run(data, slice_point)
# fOArea returns the area with at least Nvisits.
deginsph = 129600.0 / np.pi
np.testing.assert_almost_equal(result, deginsph)
data["metricdata"][: data.size // 2] = 0
result = metric.run(data, slice_point)
np.testing.assert_almost_equal(result, deginsph / 2.0)
def test_normalize_metric(self):
"""Test normalize metric."""
data = np.ones(10, dtype=list(zip(["testcol"], ["float"])))
metric = metrics.NormalizeMetric(col="testcol", norm_val=5.5)
result = metric.run(data)
np.testing.assert_equal(result, np.ones(10, float) / 5.5)
def test_zeropoint_metric(self):
"""Test zeropoint metric."""
data = np.ones(10, dtype=list(zip(["testcol"], ["float"])))
metric = metrics.ZeropointMetric(col="testcol", zp=5.5)
result = metric.run(data)
np.testing.assert_equal(result, np.ones(10, float) + 5.5)
def test_total_power_metric(self):
nside = 128
data = np.ones(12 * nside**2, dtype=list(zip(["testcol"], ["float"])))
metric = metrics.TotalPowerMetric(col="testcol")
result = metric.run(data)
np.testing.assert_equal(result, 0.0)
if __name__ == "__main__":
unittest.main()
|
lsstREPO_NAMErubin_simPATH_START.@rubin_sim_extracted@rubin_sim-main@tests@maf@test_summarymetrics.py@.PATH_END.py
|
{
"filename": "tornadoserver.py",
"repo_name": "miguelzuma/hi_class_public",
"repo_path": "hi_class_public_extracted/hi_class_public-master/RealSpaceInterface/tornadoserver.py",
"type": "Python"
}
|
from Calc2D.CalculationClass import Calculation
import time
import numpy as np
from concurrent.futures import ThreadPoolExecutor
from tornado.ioloop import IOLoop
from tornado import gen
import tornado.web
import tornado.websocket
import os
import os.path
import json
import unicodedata
import logging
import base64
import traceback
import sys
import config
pool = ThreadPoolExecutor(max_workers=config.MAX_THREADPOOL_WORKERS)
def generate_redshifts(redshift_config):
logging.info(redshift_config)
arrs = []
for conf in redshift_config:
log = conf["log"]
func = np.logspace if log else np.linspace
start = np.log10(conf["from"]) if log else conf["from"]
stop = np.log10(conf["to"]) if log else conf["to"]
arrs.append(func(start, stop, conf["points"]))
# Remove duplicates
return np.flip(np.unique(np.concatenate(arrs)), axis=0)
# Load available colormaps
def get_colormaps(path=config.COLORMAP_PATH):
categories = []
maps = []
order = {'Default': 1, 'Uniform': 2, 'Diverging': 3, 'Miscellaneous': 4}
cmap_directories = list(sorted(
os.listdir(os.path.join("static", path)),
key=lambda d: order[d]
))
for directory in cmap_directories:
categories.append(directory)
maps_for_category = []
for cmap in os.listdir(os.path.join("static", path, directory)):
maps_for_category.append({
'label': cmap[:cmap.rfind(".")],
'src': os.path.join(os.path.join(config.COLORMAP_PATH, directory, cmap)),
})
maps.append(maps_for_category)
return categories, maps
class SimulationHandler(tornado.web.RequestHandler):
def get(self):
categories, colormaps = get_colormaps()
self.render('RSI.html', categories=categories, colormaps=colormaps)
class DataConnection(tornado.websocket.WebSocketHandler):
def open(self):
logging.info("Client connected!")
self.calc = Calculation(kbins=config.TRANSFER_FUNCTION_CLIENT_SAMPLES)
# Send list of `k` values only once
logging.info("Sending k range to client");
self.write_message(json.dumps({
"type": "krange",
"k": self.calc.krange.tolist()
}))
def on_close(self):
logging.info("Connection was closed")
@gen.coroutine
def on_message(self, message):
message = json.loads(message)
param_type = message['type']
logging.debug("Received message from client: {}".format(message))
params = message['params']
if param_type == "Initial":
initialDataType = str(params['initialDataType'])
size = params["xScale"]
resolution = int(params["resolution"])
self.calc.resolution = resolution
self.calc.size = size
logging.info("Size: {} x {} Mpc^2, resolution: {} x {}".format(size, size, resolution, resolution))
SIlimit = params['SILimit']
if SIlimit == "None":
SIlimit = None
sigma = float(params['sigma'])
SI_ns = params['n_s']
if initialDataType == "SI":
A_s = 2.214 * 10**(-9)
else:
A_s = 1
redshift = generate_redshifts(params["redshift"])
self.calc.redshift = redshift
self.write_message(
json.dumps({
'type': 'redshift',
'redshift': redshift.tolist()
}))
logging.info("Submitting initial state generation to ThreadPoolExecutor")
yield pool.submit(self.set_initial_condition, sigma, initialDataType,
SIlimit, SI_ns, A_s)
self.send_initial_state()
self.write_message(json.dumps({'type': 'success', 'sort': 'Initial'}))
elif param_type == "Cosmo":
logging.info("Received cosmological parameters")
cosmological_parameters = params
logging.info("Submitting calculation to ThreadPoolExecutor")
messages = yield pool.submit(self.set_cosmological_parameters, cosmological_parameters)
for message in messages:
self.write_message(json.dumps(message))
elif param_type == "Start":
logging.info("Starting propagation...")
try:
for redindex, z in enumerate(self.calc.redshift):
self.send_frame(redindex)
self.write_message(json.dumps({'type': 'success', 'sort': 'Data'}))
except Exception as e:
logging.exception(e)
self.send_exception(e)
def send_frame(self, redindex):
# `extrema`: (minimum, maximum) of (real space) data
Valuenew, FValuenew, extrema = self.calc.getData(redindex)
logging.info("Sending data for redshift = {}".format(self.calc.redshift[redindex]))
# Create data to be displayed in transfer function window
TransferData, _ = self.calc.getTransferData(redindex)
self.write_message(json.dumps({'type': 'extrema', 'extrema': extrema}))
progress = float(redindex) / len(self.calc.redshift)
real = {quantity: base64.b64encode(data.astype(np.float32)) for quantity, data in Valuenew.iteritems()}
transfer = {quantity: base64.b64encode(data.astype(np.float32)) for quantity, data in TransferData.iteritems()}
self.write_message(
json.dumps({
'type': 'data',
'progress': progress,
'real': real,
'fourier': [],
'transfer': transfer,
}))
def send_initial_state(self):
Value, FValue, extrema = self.calc.getInitialData()
TransferData = np.ones(config.TRANSFER_FUNCTION_CLIENT_SAMPLES)
krange = np.zeros(config.TRANSFER_FUNCTION_CLIENT_SAMPLES)
logging.info("Sending initial data to client.")
self.write_message({
"type": "resolution",
"value": self.calc.resolution
})
extremastring = json.dumps({'type': 'extrema', 'extrema': extrema})
datastring = json.dumps({
'type': 'data',
'real': base64.b64encode(Value.astype(np.float32)),
'fourier': [],
'transfer': base64.b64encode(TransferData.astype(np.float32)),
'k': krange.tolist()
})
self.write_message(extremastring)
self.write_message(datastring)
def set_initial_condition(self, sigma, initialDataType, SIlimit, SI_ns, A_s):
try:
self.calc.setInitialConditions(
sigma=sigma,
initialDataType=initialDataType,
SIlimit=SIlimit,
SI_ns=SI_ns,
A=A_s
)
except Exception as e:
logging.exception(e)
self.send_exception(e)
def send_exception(self, e):
self.write_message(json.dumps({'type': 'exception', 'exception': traceback.format_exc()}))
def set_cosmological_parameters(self, cosmologicalParameters):
try:
messages = []
logging.info("Starting calculation...")
self.calc.setCosmologialParameters(cosmologicalParameters=cosmologicalParameters)
logging.info("Finished calculation!")
messages.append({'type': 'success', 'sort': 'Cosmo'})
messages.append({
'type': 'Cl',
'l': self.calc.tCl.l.tolist(),
'tCl': self.calc.tCl.tCl.tolist()
})
messages.append({
'type': 'mPk',
'kh': self.calc.mPk.kh.tolist(),
'Pkh': self.calc.mPk.Pkh.tolist()
})
z_of_decoupling = self.calc.z_dec
frame_of_decoupling = np.argmin(np.abs(z_of_decoupling - self.calc.redshift))
if self.calc.redshift[frame_of_decoupling] > z_of_decoupling:
frame_of_decoupling -= 1
messages.append({
'type': 'decoupling',
'frame': frame_of_decoupling,
'z': z_of_decoupling})
except Exception as e:
logging.exception(e)
self.send_exception(e)
else:
return messages
def main():
logging.getLogger().setLevel(logging.DEBUG)
application = tornado.web.Application(
[
(r"/", SimulationHandler),
(r"/datasocket", DataConnection),
],
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
debug=True,
)
PORT = config.PORT if len(sys.argv) == 1 else int(sys.argv[1])
application.listen(PORT)
logging.info("Application launched on http://localhost:{}".format(PORT))
IOLoop.instance().current().start()
if __name__ == '__main__':
main()
|
miguelzumaREPO_NAMEhi_class_publicPATH_START.@hi_class_public_extracted@hi_class_public-master@RealSpaceInterface@tornadoserver.py@.PATH_END.py
|
{
"filename": "inspect_nbody_ebf.ipynb",
"repo_name": "jan-rybizki/Galaxia_wrap",
"repo_path": "Galaxia_wrap_extracted/Galaxia_wrap-master/notebook/inspect_nbody_ebf.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import ebf
%pylab inline
```
Populating the interactive namespace from numpy and matplotlib
```python
file = ebf.read('/home/rybizki/Programme/GalaxiaData/nbody1/LSMC/MCssmc.ebf','/')
file
```
{'age': array([1.350000e+01, 1.349865e+01, 1.349730e+01, ..., 4.050000e-03,
2.700000e-03, 1.350000e-03]),
'alpha': array([0., 0., 0., ..., 0., 0., 0.]),
'feh': array([-1.92466563, -1.92338636, -1.88093037, ..., -0.49224673,
-0.48972049, -0.41753954]),
'id': array(1),
'mass': array([30000., 30000., 30000., ..., 30000., 30000., 30000.]),
'pos3': array([[ 15.85870753, -36.1688855 , -42.44482933],
[ 14.99876907, -35.26202132, -41.95136902],
[ 15.43341757, -36.06935015, -42.59099824],
...,
[ 14.71648538, -36.44159281, -42.49284659],
[ 15.30407388, -35.87169223, -42.291255 ],
[ 15.486919 , -36.96493374, -41.85361171]]),
'vel3': array([[ -38.53164829, -189.19462929, 133.04099093],
[ -31.88108163, -188.00032315, 118.73106812],
[ -15.83870308, -208.47262733, 134.75970851],
...,
[ -35.51727405, -188.34719039, 137.17249523],
[ -14.67915507, -199.4587696 , 149.33073664],
[ -29.46010085, -185.22466913, 135.75181268]])}
```python
file = ebf.read('/home/rybizki/Programme/GalaxiaData/nbody1/cluster/cluster_listASCC_101.ebf','/')
file['mass']
```
array([9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244, 9.2000244,
9.2000244, 9.2000244, 9.2000244, 9.2000244])
```python
file = ebf.read('/home/rybizki/Programme/GalaxiaData/nbody1/flat_sfr/flat_sfr_selection_down_to_5.ebf','/')
file
```
{'age': array([13.5 , 13.4865, 13.473 , 13.4595, 13.446 , 13.4325, 13.419 ,
13.4055, 13.392 , 13.3785, 13.365 , 13.3515, 13.338 , 13.3245,
13.311 , 13.2975, 13.284 , 13.2705, 13.257 , 13.2435, 13.23 ,
13.2165, 13.203 , 13.1895, 13.176 , 13.1625, 13.149 , 13.1355,
13.122 , 13.1085, 13.095 , 13.0815, 13.068 , 13.0545, 13.041 ,
13.0275, 13.014 , 13.0005, 12.987 , 12.9735, 12.96 , 12.9465,
12.933 , 12.9195, 12.906 , 12.8925, 12.879 , 12.8655, 12.852 ,
12.8385, 12.825 , 12.8115, 12.798 , 12.7845, 12.771 , 12.7575,
12.744 , 12.7305, 12.717 , 12.7035, 12.69 , 12.6765, 12.663 ,
12.6495, 12.636 , 12.6225, 12.609 , 12.5955, 12.582 , 12.5685,
12.555 , 12.5415, 12.528 , 12.5145, 12.501 , 12.4875, 12.474 ,
12.4605, 12.447 , 12.4335, 12.42 , 12.4065, 12.393 , 12.3795,
12.366 , 12.3525, 12.339 , 12.3255, 12.312 , 12.2985, 12.285 ,
12.2715, 12.258 , 12.2445, 12.231 , 12.2175, 12.204 , 12.1905,
12.177 , 12.1635, 12.15 , 12.1365, 12.123 , 12.1095, 12.096 ,
12.0825, 12.069 , 12.0555, 12.042 , 12.0285, 12.015 , 12.0015,
11.988 , 11.9745, 11.961 , 11.9475, 11.934 , 11.9205, 11.907 ,
11.8935, 11.88 , 11.8665, 11.853 , 11.8395, 11.826 , 11.8125,
11.799 , 11.7855, 11.772 , 11.7585, 11.745 , 11.7315, 11.718 ,
11.7045, 11.691 , 11.6775, 11.664 , 11.6505, 11.637 , 11.6235,
11.61 , 11.5965, 11.583 , 11.5695, 11.556 , 11.5425, 11.529 ,
11.5155, 11.502 , 11.4885, 11.475 , 11.4615, 11.448 , 11.4345,
11.421 , 11.4075, 11.394 , 11.3805, 11.367 , 11.3535, 11.34 ,
11.3265, 11.313 , 11.2995, 11.286 , 11.2725, 11.259 , 11.2455,
11.232 , 11.2185, 11.205 , 11.1915, 11.178 , 11.1645, 11.151 ,
11.1375, 11.124 , 11.1105, 11.097 , 11.0835, 11.07 , 11.0565,
11.043 , 11.0295, 11.016 , 11.0025, 10.989 , 10.9755, 10.962 ,
10.9485, 10.935 , 10.9215, 10.908 , 10.8945, 10.881 , 10.8675,
10.854 , 10.8405, 10.827 , 10.8135, 10.8 , 10.7865, 10.773 ,
10.7595, 10.746 , 10.7325, 10.719 , 10.7055, 10.692 , 10.6785,
10.665 , 10.6515, 10.638 , 10.6245, 10.611 , 10.5975, 10.584 ,
10.5705, 10.557 , 10.5435, 10.53 , 10.5165, 10.503 , 10.4895,
10.476 , 10.4625, 10.449 , 10.4355, 10.422 , 10.4085, 10.395 ,
10.3815, 10.368 , 10.3545, 10.341 , 10.3275, 10.314 , 10.3005,
10.287 , 10.2735, 10.26 , 10.2465, 10.233 , 10.2195, 10.206 ,
10.1925, 10.179 , 10.1655, 10.152 , 10.1385, 10.125 , 10.1115,
10.098 , 10.0845, 10.071 , 10.0575, 10.044 , 10.0305, 10.017 ,
10.0035, 9.99 , 9.9765, 9.963 , 9.9495, 9.936 , 9.9225,
9.909 , 9.8955, 9.882 , 9.8685, 9.855 , 9.8415, 9.828 ,
9.8145, 9.801 , 9.7875, 9.774 , 9.7605, 9.747 , 9.7335,
9.72 , 9.7065, 9.693 , 9.6795, 9.666 , 9.6525, 9.639 ,
9.6255, 9.612 , 9.5985, 9.585 , 9.5715, 9.558 , 9.5445,
9.531 , 9.5175, 9.504 , 9.4905, 9.477 , 9.4635, 9.45 ,
9.4365, 9.423 , 9.4095, 9.396 , 9.3825, 9.369 , 9.3555,
9.342 , 9.3285, 9.315 , 9.3015, 9.288 , 9.2745, 9.261 ,
9.2475, 9.234 , 9.2205, 9.207 , 9.1935, 9.18 , 9.1665,
9.153 , 9.1395, 9.126 , 9.1125, 9.099 , 9.0855, 9.072 ,
9.0585, 9.045 , 9.0315, 9.018 , 9.0045, 8.991 , 8.9775,
8.964 , 8.9505, 8.937 , 8.9235, 8.91 , 8.8965, 8.883 ,
8.8695, 8.856 , 8.8425, 8.829 , 8.8155, 8.802 , 8.7885,
8.775 , 8.7615, 8.748 , 8.7345, 8.721 , 8.7075, 8.694 ,
8.6805, 8.667 , 8.6535, 8.64 , 8.6265, 8.613 , 8.5995,
8.586 , 8.5725, 8.559 , 8.5455, 8.532 , 8.5185, 8.505 ,
8.4915, 8.478 , 8.4645, 8.451 , 8.4375, 8.424 , 8.4105,
8.397 , 8.3835, 8.37 , 8.3565, 8.343 , 8.3295, 8.316 ,
8.3025, 8.289 , 8.2755, 8.262 , 8.2485, 8.235 , 8.2215,
8.208 , 8.1945, 8.181 , 8.1675, 8.154 , 8.1405, 8.127 ,
8.1135, 8.1 , 8.0865, 8.073 , 8.0595, 8.046 , 8.0325,
8.019 , 8.0055, 7.992 , 7.9785, 7.965 , 7.9515, 7.938 ,
7.9245, 7.911 , 7.8975, 7.884 , 7.8705, 7.857 , 7.8435,
7.83 , 7.8165, 7.803 , 7.7895, 7.776 , 7.7625, 7.749 ,
7.7355, 7.722 , 7.7085, 7.695 , 7.6815, 7.668 , 7.6545,
7.641 , 7.6275, 7.614 , 7.6005, 7.587 , 7.5735, 7.56 ,
7.5465, 7.533 , 7.5195, 7.506 , 7.4925, 7.479 , 7.4655,
7.452 , 7.4385, 7.425 , 7.4115, 7.398 , 7.3845, 7.371 ,
7.3575, 7.344 , 7.3305, 7.317 , 7.3035, 7.29 , 7.2765,
7.263 , 7.2495, 7.236 , 7.2225, 7.209 , 7.1955, 7.182 ,
7.1685, 7.155 , 7.1415, 7.128 , 7.1145, 7.101 , 7.0875,
7.074 , 7.0605, 7.047 , 7.0335, 7.02 , 7.0065, 6.993 ,
6.9795, 6.966 , 6.9525, 6.939 , 6.9255, 6.912 , 6.8985,
6.885 , 6.8715, 6.858 , 6.8445, 6.831 , 6.8175, 6.804 ,
6.7905, 6.777 , 6.7635, 6.75 , 6.7365, 6.723 , 6.7095,
6.696 , 6.6825, 6.669 , 6.6555, 6.642 , 6.6285, 6.615 ,
6.6015, 6.588 , 6.5745, 6.561 , 6.5475, 6.534 , 6.5205,
6.507 , 6.4935, 6.48 , 6.4665, 6.453 , 6.4395, 6.426 ,
6.4125, 6.399 , 6.3855, 6.372 , 6.3585, 6.345 , 6.3315,
6.318 , 6.3045, 6.291 , 6.2775, 6.264 , 6.2505, 6.237 ,
6.2235, 6.21 , 6.1965, 6.183 , 6.1695, 6.156 , 6.1425,
6.129 , 6.1155, 6.102 , 6.0885, 6.075 , 6.0615, 6.048 ,
6.0345, 6.021 , 6.0075, 5.994 , 5.9805, 5.967 , 5.9535,
5.94 , 5.9265, 5.913 , 5.8995, 5.886 , 5.8725, 5.859 ,
5.8455, 5.832 , 5.8185, 5.805 , 5.7915, 5.778 , 5.7645,
5.751 , 5.7375, 5.724 , 5.7105, 5.697 , 5.6835, 5.67 ,
5.6565, 5.643 , 5.6295, 5.616 , 5.6025, 5.589 , 5.5755,
5.562 , 5.5485, 5.535 , 5.5215, 5.508 , 5.4945, 5.481 ,
5.4675, 5.454 , 5.4405, 5.427 , 5.4135, 5.4 , 5.3865,
5.373 , 5.3595, 5.346 , 5.3325, 5.319 , 5.3055, 5.292 ,
5.2785, 5.265 , 5.2515, 5.238 , 5.2245, 5.211 , 5.1975,
5.184 , 5.1705, 5.157 , 5.1435, 5.13 , 5.1165, 5.103 ,
5.0895, 5.076 , 5.0625, 5.049 , 5.0355, 5.022 , 5.0085,
4.995 , 4.9815, 4.968 , 4.9545, 4.941 , 4.9275, 4.914 ,
4.9005, 4.887 , 4.8735, 4.86 , 4.8465, 4.833 , 4.8195,
4.806 , 4.7925, 4.779 , 4.7655, 4.752 , 4.7385, 4.725 ,
4.7115, 4.698 , 4.6845, 4.671 , 4.6575, 4.644 , 4.6305,
4.617 , 4.6035, 4.59 , 4.5765, 4.563 , 4.5495, 4.536 ,
4.5225, 4.509 , 4.4955, 4.482 , 4.4685, 4.455 , 4.4415,
4.428 , 4.4145, 4.401 , 4.3875, 4.374 , 4.3605, 4.347 ,
4.3335, 4.32 , 4.3065, 4.293 , 4.2795, 4.266 , 4.2525,
4.239 , 4.2255, 4.212 , 4.1985, 4.185 , 4.1715, 4.158 ,
4.1445, 4.131 , 4.1175, 4.104 , 4.0905, 4.077 , 4.0635,
4.05 , 4.0365, 4.023 , 4.0095, 3.996 , 3.9825, 3.969 ,
3.9555, 3.942 , 3.9285, 3.915 , 3.9015, 3.888 , 3.8745,
3.861 , 3.8475, 3.834 , 3.8205, 3.807 , 3.7935, 3.78 ,
3.7665, 3.753 , 3.7395, 3.726 , 3.7125, 3.699 , 3.6855,
3.672 , 3.6585, 3.645 , 3.6315, 3.618 , 3.6045, 3.591 ,
3.5775, 3.564 , 3.5505, 3.537 , 3.5235, 3.51 , 3.4965,
3.483 , 3.4695, 3.456 , 3.4425, 3.429 , 3.4155, 3.402 ,
3.3885, 3.375 , 3.3615, 3.348 , 3.3345, 3.321 , 3.3075,
3.294 , 3.2805, 3.267 , 3.2535, 3.24 , 3.2265, 3.213 ,
3.1995, 3.186 , 3.1725, 3.159 , 3.1455, 3.132 , 3.1185,
3.105 , 3.0915, 3.078 , 3.0645, 3.051 , 3.0375, 3.024 ,
3.0105, 2.997 , 2.9835, 2.97 , 2.9565, 2.943 , 2.9295,
2.916 , 2.9025, 2.889 , 2.8755, 2.862 , 2.8485, 2.835 ,
2.8215, 2.808 , 2.7945, 2.781 , 2.7675, 2.754 , 2.7405,
2.727 , 2.7135, 2.7 , 2.6865, 2.673 , 2.6595, 2.646 ,
2.6325, 2.619 , 2.6055, 2.592 , 2.5785, 2.565 , 2.5515,
2.538 , 2.5245, 2.511 , 2.4975, 2.484 , 2.4705, 2.457 ,
2.4435, 2.43 , 2.4165, 2.403 , 2.3895, 2.376 , 2.3625,
2.349 , 2.3355, 2.322 , 2.3085, 2.295 , 2.2815, 2.268 ,
2.2545, 2.241 , 2.2275, 2.214 , 2.2005, 2.187 , 2.1735,
2.16 , 2.1465, 2.133 , 2.1195, 2.106 , 2.0925, 2.079 ,
2.0655, 2.052 , 2.0385, 2.025 , 2.0115, 1.998 , 1.9845,
1.971 , 1.9575, 1.944 , 1.9305, 1.917 , 1.9035, 1.89 ,
1.8765, 1.863 , 1.8495, 1.836 , 1.8225, 1.809 , 1.7955,
1.782 , 1.7685, 1.755 , 1.7415, 1.728 , 1.7145, 1.701 ,
1.6875, 1.674 , 1.6605, 1.647 , 1.6335, 1.62 , 1.6065,
1.593 , 1.5795, 1.566 , 1.5525, 1.539 , 1.5255, 1.512 ,
1.4985, 1.485 , 1.4715, 1.458 , 1.4445, 1.431 , 1.4175,
1.404 , 1.3905, 1.377 , 1.3635, 1.35 , 1.3365, 1.323 ,
1.3095, 1.296 , 1.2825, 1.269 , 1.2555, 1.242 , 1.2285,
1.215 , 1.2015, 1.188 , 1.1745, 1.161 , 1.1475, 1.134 ,
1.1205, 1.107 , 1.0935, 1.08 , 1.0665, 1.053 , 1.0395,
1.026 , 1.0125, 0.999 , 0.9855, 0.972 , 0.9585, 0.945 ,
0.9315, 0.918 , 0.9045, 0.891 , 0.8775, 0.864 , 0.8505,
0.837 , 0.8235, 0.81 , 0.7965, 0.783 , 0.7695, 0.756 ,
0.7425, 0.729 , 0.7155, 0.702 , 0.6885, 0.675 , 0.6615,
0.648 , 0.6345, 0.621 , 0.6075, 0.594 , 0.5805, 0.567 ,
0.5535, 0.54 , 0.5265, 0.513 , 0.4995, 0.486 , 0.4725,
0.459 , 0.4455, 0.432 , 0.4185, 0.405 , 0.3915, 0.378 ,
0.3645, 0.351 , 0.3375, 0.324 , 0.3105, 0.297 , 0.2835,
0.27 , 0.2565, 0.243 , 0.2295, 0.216 , 0.2025, 0.189 ,
0.1755, 0.162 , 0.1485, 0.135 , 0.1215, 0.108 , 0.0945,
0.081 , 0.0675, 0.054 , 0.0405, 0.027 , 0.0135]),
'alpha': array([2.8 , 2.7988989 , 2.7977978 , 2.7966967 , 2.7955956 ,
2.79449449, 2.79339339, 2.79229229, 2.79119119, 2.79009009,
2.78898899, 2.78788789, 2.78678679, 2.78568569, 2.78458458,
2.78348348, 2.78238238, 2.78128128, 2.78018018, 2.77907908,
2.77797798, 2.77687688, 2.77577578, 2.77467467, 2.77357357,
2.77247247, 2.77137137, 2.77027027, 2.76916917, 2.76806807,
2.76696697, 2.76586587, 2.76476476, 2.76366366, 2.76256256,
2.76146146, 2.76036036, 2.75925926, 2.75815816, 2.75705706,
2.75595596, 2.75485485, 2.75375375, 2.75265265, 2.75155155,
2.75045045, 2.74934935, 2.74824825, 2.74714715, 2.74604605,
2.74494494, 2.74384384, 2.74274274, 2.74164164, 2.74054054,
2.73943944, 2.73833834, 2.73723724, 2.73613614, 2.73503504,
2.73393393, 2.73283283, 2.73173173, 2.73063063, 2.72952953,
2.72842843, 2.72732733, 2.72622623, 2.72512513, 2.72402402,
2.72292292, 2.72182182, 2.72072072, 2.71961962, 2.71851852,
2.71741742, 2.71631632, 2.71521522, 2.71411411, 2.71301301,
2.71191191, 2.71081081, 2.70970971, 2.70860861, 2.70750751,
2.70640641, 2.70530531, 2.7042042 , 2.7031031 , 2.702002 ,
2.7009009 , 2.6997998 , 2.6986987 , 2.6975976 , 2.6964965 ,
2.6953954 , 2.69429429, 2.69319319, 2.69209209, 2.69099099,
2.68988989, 2.68878879, 2.68768769, 2.68658659, 2.68548549,
2.68438438, 2.68328328, 2.68218218, 2.68108108, 2.67997998,
2.67887888, 2.67777778, 2.67667668, 2.67557558, 2.67447447,
2.67337337, 2.67227227, 2.67117117, 2.67007007, 2.66896897,
2.66786787, 2.66676677, 2.66566567, 2.66456456, 2.66346346,
2.66236236, 2.66126126, 2.66016016, 2.65905906, 2.65795796,
2.65685686, 2.65575576, 2.65465465, 2.65355355, 2.65245245,
2.65135135, 2.65025025, 2.64914915, 2.64804805, 2.64694695,
2.64584585, 2.64474474, 2.64364364, 2.64254254, 2.64144144,
2.64034034, 2.63923924, 2.63813814, 2.63703704, 2.63593594,
2.63483483, 2.63373373, 2.63263263, 2.63153153, 2.63043043,
2.62932933, 2.62822823, 2.62712713, 2.62602603, 2.62492492,
2.62382382, 2.62272272, 2.62162162, 2.62052052, 2.61941942,
2.61831832, 2.61721722, 2.61611612, 2.61501502, 2.61391391,
2.61281281, 2.61171171, 2.61061061, 2.60950951, 2.60840841,
2.60730731, 2.60620621, 2.60510511, 2.604004 , 2.6029029 ,
2.6018018 , 2.6007007 , 2.5995996 , 2.5984985 , 2.5973974 ,
2.5962963 , 2.5951952 , 2.59409409, 2.59299299, 2.59189189,
2.59079079, 2.58968969, 2.58858859, 2.58748749, 2.58638639,
2.58528529, 2.58418418, 2.58308308, 2.58198198, 2.58088088,
2.57977978, 2.57867868, 2.57757758, 2.57647648, 2.57537538,
2.57427427, 2.57317317, 2.57207207, 2.57097097, 2.56986987,
2.56876877, 2.56766767, 2.56656657, 2.56546547, 2.56436436,
2.56326326, 2.56216216, 2.56106106, 2.55995996, 2.55885886,
2.55775776, 2.55665666, 2.55555556, 2.55445445, 2.55335335,
2.55225225, 2.55115115, 2.55005005, 2.54894895, 2.54784785,
2.54674675, 2.54564565, 2.54454454, 2.54344344, 2.54234234,
2.54124124, 2.54014014, 2.53903904, 2.53793794, 2.53683684,
2.53573574, 2.53463463, 2.53353353, 2.53243243, 2.53133133,
2.53023023, 2.52912913, 2.52802803, 2.52692693, 2.52582583,
2.52472472, 2.52362362, 2.52252252, 2.52142142, 2.52032032,
2.51921922, 2.51811812, 2.51701702, 2.51591592, 2.51481481,
2.51371371, 2.51261261, 2.51151151, 2.51041041, 2.50930931,
2.50820821, 2.50710711, 2.50600601, 2.5049049 , 2.5038038 ,
2.5027027 , 2.5016016 , 2.5005005 , 2.4993994 , 2.4982983 ,
2.4971972 , 2.4960961 , 2.49499499, 2.49389389, 2.49279279,
2.49169169, 2.49059059, 2.48948949, 2.48838839, 2.48728729,
2.48618619, 2.48508509, 2.48398398, 2.48288288, 2.48178178,
2.48068068, 2.47957958, 2.47847848, 2.47737738, 2.47627628,
2.47517518, 2.47407407, 2.47297297, 2.47187187, 2.47077077,
2.46966967, 2.46856857, 2.46746747, 2.46636637, 2.46526527,
2.46416416, 2.46306306, 2.46196196, 2.46086086, 2.45975976,
2.45865866, 2.45755756, 2.45645646, 2.45535536, 2.45425425,
2.45315315, 2.45205205, 2.45095095, 2.44984985, 2.44874875,
2.44764765, 2.44654655, 2.44544545, 2.44434434, 2.44324324,
2.44214214, 2.44104104, 2.43993994, 2.43883884, 2.43773774,
2.43663664, 2.43553554, 2.43443443, 2.43333333, 2.43223223,
2.43113113, 2.43003003, 2.42892893, 2.42782783, 2.42672673,
2.42562563, 2.42452452, 2.42342342, 2.42232232, 2.42122122,
2.42012012, 2.41901902, 2.41791792, 2.41681682, 2.41571572,
2.41461461, 2.41351351, 2.41241241, 2.41131131, 2.41021021,
2.40910911, 2.40800801, 2.40690691, 2.40580581, 2.4047047 ,
2.4036036 , 2.4025025 , 2.4014014 , 2.4003003 , 2.3991992 ,
2.3980981 , 2.396997 , 2.3958959 , 2.39479479, 2.39369369,
2.39259259, 2.39149149, 2.39039039, 2.38928929, 2.38818819,
2.38708709, 2.38598599, 2.38488488, 2.38378378, 2.38268268,
2.38158158, 2.38048048, 2.37937938, 2.37827828, 2.37717718,
2.37607608, 2.37497497, 2.37387387, 2.37277277, 2.37167167,
2.37057057, 2.36946947, 2.36836837, 2.36726727, 2.36616617,
2.36506507, 2.36396396, 2.36286286, 2.36176176, 2.36066066,
2.35955956, 2.35845846, 2.35735736, 2.35625626, 2.35515516,
2.35405405, 2.35295295, 2.35185185, 2.35075075, 2.34964965,
2.34854855, 2.34744745, 2.34634635, 2.34524525, 2.34414414,
2.34304304, 2.34194194, 2.34084084, 2.33973974, 2.33863864,
2.33753754, 2.33643644, 2.33533534, 2.33423423, 2.33313313,
2.33203203, 2.33093093, 2.32982983, 2.32872873, 2.32762763,
2.32652653, 2.32542543, 2.32432432, 2.32322322, 2.32212212,
2.32102102, 2.31991992, 2.31881882, 2.31771772, 2.31661662,
2.31551552, 2.31441441, 2.31331331, 2.31221221, 2.31111111,
2.31001001, 2.30890891, 2.30780781, 2.30670671, 2.30560561,
2.3045045 , 2.3034034 , 2.3023023 , 2.3012012 , 2.3001001 ,
2.298999 , 2.2978979 , 2.2967968 , 2.2956957 , 2.29459459,
2.29349349, 2.29239239, 2.29129129, 2.29019019, 2.28908909,
2.28798799, 2.28688689, 2.28578579, 2.28468468, 2.28358358,
2.28248248, 2.28138138, 2.28028028, 2.27917918, 2.27807808,
2.27697698, 2.27587588, 2.27477477, 2.27367367, 2.27257257,
2.27147147, 2.27037037, 2.26926927, 2.26816817, 2.26706707,
2.26596597, 2.26486486, 2.26376376, 2.26266266, 2.26156156,
2.26046046, 2.25935936, 2.25825826, 2.25715716, 2.25605606,
2.25495495, 2.25385385, 2.25275275, 2.25165165, 2.25055055,
2.24944945, 2.24834835, 2.24724725, 2.24614615, 2.24504505,
2.24394394, 2.24284284, 2.24174174, 2.24064064, 2.23953954,
2.23843844, 2.23733734, 2.23623624, 2.23513514, 2.23403403,
2.23293293, 2.23183183, 2.23073073, 2.22962963, 2.22852853,
2.22742743, 2.22632633, 2.22522523, 2.22412412, 2.22302302,
2.22192192, 2.22082082, 2.21971972, 2.21861862, 2.21751752,
2.21641642, 2.21531532, 2.21421421, 2.21311311, 2.21201201,
2.21091091, 2.20980981, 2.20870871, 2.20760761, 2.20650651,
2.20540541, 2.2043043 , 2.2032032 , 2.2021021 , 2.201001 ,
2.1998999 , 2.1987988 , 2.1976977 , 2.1965966 , 2.1954955 ,
2.19439439, 2.19329329, 2.19219219, 2.19109109, 2.18998999,
2.18888889, 2.18778779, 2.18668669, 2.18558559, 2.18448448,
2.18338338, 2.18228228, 2.18118118, 2.18008008, 2.17897898,
2.17787788, 2.17677678, 2.17567568, 2.17457457, 2.17347347,
2.17237237, 2.17127127, 2.17017017, 2.16906907, 2.16796797,
2.16686687, 2.16576577, 2.16466466, 2.16356356, 2.16246246,
2.16136136, 2.16026026, 2.15915916, 2.15805806, 2.15695696,
2.15585586, 2.15475475, 2.15365365, 2.15255255, 2.15145145,
2.15035035, 2.14924925, 2.14814815, 2.14704705, 2.14594595,
2.14484484, 2.14374374, 2.14264264, 2.14154154, 2.14044044,
2.13933934, 2.13823824, 2.13713714, 2.13603604, 2.13493493,
2.13383383, 2.13273273, 2.13163163, 2.13053053, 2.12942943,
2.12832833, 2.12722723, 2.12612613, 2.12502503, 2.12392392,
2.12282282, 2.12172172, 2.12062062, 2.11951952, 2.11841842,
2.11731732, 2.11621622, 2.11511512, 2.11401401, 2.11291291,
2.11181181, 2.11071071, 2.10960961, 2.10850851, 2.10740741,
2.10630631, 2.10520521, 2.1041041 , 2.103003 , 2.1019019 ,
2.1008008 , 2.0996997 , 2.0985986 , 2.0974975 , 2.0963964 ,
2.0952953 , 2.09419419, 2.09309309, 2.09199199, 2.09089089,
2.08978979, 2.08868869, 2.08758759, 2.08648649, 2.08538539,
2.08428428, 2.08318318, 2.08208208, 2.08098098, 2.07987988,
2.07877878, 2.07767768, 2.07657658, 2.07547548, 2.07437437,
2.07327327, 2.07217217, 2.07107107, 2.06996997, 2.06886887,
2.06776777, 2.06666667, 2.06556557, 2.06446446, 2.06336336,
2.06226226, 2.06116116, 2.06006006, 2.05895896, 2.05785786,
2.05675676, 2.05565566, 2.05455455, 2.05345345, 2.05235235,
2.05125125, 2.05015015, 2.04904905, 2.04794795, 2.04684685,
2.04574575, 2.04464464, 2.04354354, 2.04244244, 2.04134134,
2.04024024, 2.03913914, 2.03803804, 2.03693694, 2.03583584,
2.03473473, 2.03363363, 2.03253253, 2.03143143, 2.03033033,
2.02922923, 2.02812813, 2.02702703, 2.02592593, 2.02482482,
2.02372372, 2.02262262, 2.02152152, 2.02042042, 2.01931932,
2.01821822, 2.01711712, 2.01601602, 2.01491491, 2.01381381,
2.01271271, 2.01161161, 2.01051051, 2.00940941, 2.00830831,
2.00720721, 2.00610611, 2.00500501, 2.0039039 , 2.0028028 ,
2.0017017 , 2.0006006 , 1.9994995 , 1.9983984 , 1.9972973 ,
1.9961962 , 1.9950951 , 1.99399399, 1.99289289, 1.99179179,
1.99069069, 1.98958959, 1.98848849, 1.98738739, 1.98628629,
1.98518519, 1.98408408, 1.98298298, 1.98188188, 1.98078078,
1.97967968, 1.97857858, 1.97747748, 1.97637638, 1.97527528,
1.97417417, 1.97307307, 1.97197197, 1.97087087, 1.96976977,
1.96866867, 1.96756757, 1.96646647, 1.96536537, 1.96426426,
1.96316316, 1.96206206, 1.96096096, 1.95985986, 1.95875876,
1.95765766, 1.95655656, 1.95545546, 1.95435435, 1.95325325,
1.95215215, 1.95105105, 1.94994995, 1.94884885, 1.94774775,
1.94664665, 1.94554555, 1.94444444, 1.94334334, 1.94224224,
1.94114114, 1.94004004, 1.93893894, 1.93783784, 1.93673674,
1.93563564, 1.93453453, 1.93343343, 1.93233233, 1.93123123,
1.93013013, 1.92902903, 1.92792793, 1.92682683, 1.92572573,
1.92462462, 1.92352352, 1.92242242, 1.92132132, 1.92022022,
1.91911912, 1.91801802, 1.91691692, 1.91581582, 1.91471471,
1.91361361, 1.91251251, 1.91141141, 1.91031031, 1.90920921,
1.90810811, 1.90700701, 1.90590591, 1.9048048 , 1.9037037 ,
1.9026026 , 1.9015015 , 1.9004004 , 1.8992993 , 1.8981982 ,
1.8970971 , 1.895996 , 1.89489489, 1.89379379, 1.89269269,
1.89159159, 1.89049049, 1.88938939, 1.88828829, 1.88718719,
1.88608609, 1.88498498, 1.88388388, 1.88278278, 1.88168168,
1.88058058, 1.87947948, 1.87837838, 1.87727728, 1.87617618,
1.87507508, 1.87397397, 1.87287287, 1.87177177, 1.87067067,
1.86956957, 1.86846847, 1.86736737, 1.86626627, 1.86516517,
1.86406406, 1.86296296, 1.86186186, 1.86076076, 1.85965966,
1.85855856, 1.85745746, 1.85635636, 1.85525526, 1.85415415,
1.85305305, 1.85195195, 1.85085085, 1.84974975, 1.84864865,
1.84754755, 1.84644645, 1.84534535, 1.84424424, 1.84314314,
1.84204204, 1.84094094, 1.83983984, 1.83873874, 1.83763764,
1.83653654, 1.83543544, 1.83433433, 1.83323323, 1.83213213,
1.83103103, 1.82992993, 1.82882883, 1.82772773, 1.82662663,
1.82552553, 1.82442442, 1.82332332, 1.82222222, 1.82112112,
1.82002002, 1.81891892, 1.81781782, 1.81671672, 1.81561562,
1.81451451, 1.81341341, 1.81231231, 1.81121121, 1.81011011,
1.80900901, 1.80790791, 1.80680681, 1.80570571, 1.8046046 ,
1.8035035 , 1.8024024 , 1.8013013 , 1.8002002 , 1.7990991 ,
1.797998 , 1.7968969 , 1.7957958 , 1.79469469, 1.79359359,
1.79249249, 1.79139139, 1.79029029, 1.78918919, 1.78808809,
1.78698699, 1.78588589, 1.78478478, 1.78368368, 1.78258258,
1.78148148, 1.78038038, 1.77927928, 1.77817818, 1.77707708,
1.77597598, 1.77487487, 1.77377377, 1.77267267, 1.77157157,
1.77047047, 1.76936937, 1.76826827, 1.76716717, 1.76606607,
1.76496496, 1.76386386, 1.76276276, 1.76166166, 1.76056056,
1.75945946, 1.75835836, 1.75725726, 1.75615616, 1.75505506,
1.75395395, 1.75285285, 1.75175175, 1.75065065, 1.74954955,
1.74844845, 1.74734735, 1.74624625, 1.74514515, 1.74404404,
1.74294294, 1.74184184, 1.74074074, 1.73963964, 1.73853854,
1.73743744, 1.73633634, 1.73523524, 1.73413413, 1.73303303,
1.73193193, 1.73083083, 1.72972973, 1.72862863, 1.72752753,
1.72642643, 1.72532533, 1.72422422, 1.72312312, 1.72202202,
1.72092092, 1.71981982, 1.71871872, 1.71761762, 1.71651652,
1.71541542, 1.71431431, 1.71321321, 1.71211211, 1.71101101,
1.70990991, 1.70880881, 1.70770771, 1.70660661, 1.70550551,
1.7044044 , 1.7033033 , 1.7022022 , 1.7011011 , 1.7 ]),
'feh': array([-1.8 , -1.7988989 , -1.7977978 , -1.7966967 , -1.7955956 ,
-1.79449449, -1.79339339, -1.79229229, -1.79119119, -1.79009009,
-1.78898899, -1.78788789, -1.78678679, -1.78568569, -1.78458458,
-1.78348348, -1.78238238, -1.78128128, -1.78018018, -1.77907908,
-1.77797798, -1.77687688, -1.77577578, -1.77467467, -1.77357357,
-1.77247247, -1.77137137, -1.77027027, -1.76916917, -1.76806807,
-1.76696697, -1.76586587, -1.76476476, -1.76366366, -1.76256256,
-1.76146146, -1.76036036, -1.75925926, -1.75815816, -1.75705706,
-1.75595596, -1.75485485, -1.75375375, -1.75265265, -1.75155155,
-1.75045045, -1.74934935, -1.74824825, -1.74714715, -1.74604605,
-1.74494494, -1.74384384, -1.74274274, -1.74164164, -1.74054054,
-1.73943944, -1.73833834, -1.73723724, -1.73613614, -1.73503504,
-1.73393393, -1.73283283, -1.73173173, -1.73063063, -1.72952953,
-1.72842843, -1.72732733, -1.72622623, -1.72512513, -1.72402402,
-1.72292292, -1.72182182, -1.72072072, -1.71961962, -1.71851852,
-1.71741742, -1.71631632, -1.71521522, -1.71411411, -1.71301301,
-1.71191191, -1.71081081, -1.70970971, -1.70860861, -1.70750751,
-1.70640641, -1.70530531, -1.7042042 , -1.7031031 , -1.702002 ,
-1.7009009 , -1.6997998 , -1.6986987 , -1.6975976 , -1.6964965 ,
-1.6953954 , -1.69429429, -1.69319319, -1.69209209, -1.69099099,
-1.68988989, -1.68878879, -1.68768769, -1.68658659, -1.68548549,
-1.68438438, -1.68328328, -1.68218218, -1.68108108, -1.67997998,
-1.67887888, -1.67777778, -1.67667668, -1.67557558, -1.67447447,
-1.67337337, -1.67227227, -1.67117117, -1.67007007, -1.66896897,
-1.66786787, -1.66676677, -1.66566567, -1.66456456, -1.66346346,
-1.66236236, -1.66126126, -1.66016016, -1.65905906, -1.65795796,
-1.65685686, -1.65575576, -1.65465465, -1.65355355, -1.65245245,
-1.65135135, -1.65025025, -1.64914915, -1.64804805, -1.64694695,
-1.64584585, -1.64474474, -1.64364364, -1.64254254, -1.64144144,
-1.64034034, -1.63923924, -1.63813814, -1.63703704, -1.63593594,
-1.63483483, -1.63373373, -1.63263263, -1.63153153, -1.63043043,
-1.62932933, -1.62822823, -1.62712713, -1.62602603, -1.62492492,
-1.62382382, -1.62272272, -1.62162162, -1.62052052, -1.61941942,
-1.61831832, -1.61721722, -1.61611612, -1.61501502, -1.61391391,
-1.61281281, -1.61171171, -1.61061061, -1.60950951, -1.60840841,
-1.60730731, -1.60620621, -1.60510511, -1.604004 , -1.6029029 ,
-1.6018018 , -1.6007007 , -1.5995996 , -1.5984985 , -1.5973974 ,
-1.5962963 , -1.5951952 , -1.59409409, -1.59299299, -1.59189189,
-1.59079079, -1.58968969, -1.58858859, -1.58748749, -1.58638639,
-1.58528529, -1.58418418, -1.58308308, -1.58198198, -1.58088088,
-1.57977978, -1.57867868, -1.57757758, -1.57647648, -1.57537538,
-1.57427427, -1.57317317, -1.57207207, -1.57097097, -1.56986987,
-1.56876877, -1.56766767, -1.56656657, -1.56546547, -1.56436436,
-1.56326326, -1.56216216, -1.56106106, -1.55995996, -1.55885886,
-1.55775776, -1.55665666, -1.55555556, -1.55445445, -1.55335335,
-1.55225225, -1.55115115, -1.55005005, -1.54894895, -1.54784785,
-1.54674675, -1.54564565, -1.54454454, -1.54344344, -1.54234234,
-1.54124124, -1.54014014, -1.53903904, -1.53793794, -1.53683684,
-1.53573574, -1.53463463, -1.53353353, -1.53243243, -1.53133133,
-1.53023023, -1.52912913, -1.52802803, -1.52692693, -1.52582583,
-1.52472472, -1.52362362, -1.52252252, -1.52142142, -1.52032032,
-1.51921922, -1.51811812, -1.51701702, -1.51591592, -1.51481481,
-1.51371371, -1.51261261, -1.51151151, -1.51041041, -1.50930931,
-1.50820821, -1.50710711, -1.50600601, -1.5049049 , -1.5038038 ,
-1.5027027 , -1.5016016 , -1.5005005 , -1.4993994 , -1.4982983 ,
-1.4971972 , -1.4960961 , -1.49499499, -1.49389389, -1.49279279,
-1.49169169, -1.49059059, -1.48948949, -1.48838839, -1.48728729,
-1.48618619, -1.48508509, -1.48398398, -1.48288288, -1.48178178,
-1.48068068, -1.47957958, -1.47847848, -1.47737738, -1.47627628,
-1.47517518, -1.47407407, -1.47297297, -1.47187187, -1.47077077,
-1.46966967, -1.46856857, -1.46746747, -1.46636637, -1.46526527,
-1.46416416, -1.46306306, -1.46196196, -1.46086086, -1.45975976,
-1.45865866, -1.45755756, -1.45645646, -1.45535536, -1.45425425,
-1.45315315, -1.45205205, -1.45095095, -1.44984985, -1.44874875,
-1.44764765, -1.44654655, -1.44544545, -1.44434434, -1.44324324,
-1.44214214, -1.44104104, -1.43993994, -1.43883884, -1.43773774,
-1.43663664, -1.43553554, -1.43443443, -1.43333333, -1.43223223,
-1.43113113, -1.43003003, -1.42892893, -1.42782783, -1.42672673,
-1.42562563, -1.42452452, -1.42342342, -1.42232232, -1.42122122,
-1.42012012, -1.41901902, -1.41791792, -1.41681682, -1.41571572,
-1.41461461, -1.41351351, -1.41241241, -1.41131131, -1.41021021,
-1.40910911, -1.40800801, -1.40690691, -1.40580581, -1.4047047 ,
-1.4036036 , -1.4025025 , -1.4014014 , -1.4003003 , -1.3991992 ,
-1.3980981 , -1.396997 , -1.3958959 , -1.39479479, -1.39369369,
-1.39259259, -1.39149149, -1.39039039, -1.38928929, -1.38818819,
-1.38708709, -1.38598599, -1.38488488, -1.38378378, -1.38268268,
-1.38158158, -1.38048048, -1.37937938, -1.37827828, -1.37717718,
-1.37607608, -1.37497497, -1.37387387, -1.37277277, -1.37167167,
-1.37057057, -1.36946947, -1.36836837, -1.36726727, -1.36616617,
-1.36506507, -1.36396396, -1.36286286, -1.36176176, -1.36066066,
-1.35955956, -1.35845846, -1.35735736, -1.35625626, -1.35515516,
-1.35405405, -1.35295295, -1.35185185, -1.35075075, -1.34964965,
-1.34854855, -1.34744745, -1.34634635, -1.34524525, -1.34414414,
-1.34304304, -1.34194194, -1.34084084, -1.33973974, -1.33863864,
-1.33753754, -1.33643644, -1.33533534, -1.33423423, -1.33313313,
-1.33203203, -1.33093093, -1.32982983, -1.32872873, -1.32762763,
-1.32652653, -1.32542543, -1.32432432, -1.32322322, -1.32212212,
-1.32102102, -1.31991992, -1.31881882, -1.31771772, -1.31661662,
-1.31551552, -1.31441441, -1.31331331, -1.31221221, -1.31111111,
-1.31001001, -1.30890891, -1.30780781, -1.30670671, -1.30560561,
-1.3045045 , -1.3034034 , -1.3023023 , -1.3012012 , -1.3001001 ,
-1.298999 , -1.2978979 , -1.2967968 , -1.2956957 , -1.29459459,
-1.29349349, -1.29239239, -1.29129129, -1.29019019, -1.28908909,
-1.28798799, -1.28688689, -1.28578579, -1.28468468, -1.28358358,
-1.28248248, -1.28138138, -1.28028028, -1.27917918, -1.27807808,
-1.27697698, -1.27587588, -1.27477477, -1.27367367, -1.27257257,
-1.27147147, -1.27037037, -1.26926927, -1.26816817, -1.26706707,
-1.26596597, -1.26486486, -1.26376376, -1.26266266, -1.26156156,
-1.26046046, -1.25935936, -1.25825826, -1.25715716, -1.25605606,
-1.25495495, -1.25385385, -1.25275275, -1.25165165, -1.25055055,
-1.24944945, -1.24834835, -1.24724725, -1.24614615, -1.24504505,
-1.24394394, -1.24284284, -1.24174174, -1.24064064, -1.23953954,
-1.23843844, -1.23733734, -1.23623624, -1.23513514, -1.23403403,
-1.23293293, -1.23183183, -1.23073073, -1.22962963, -1.22852853,
-1.22742743, -1.22632633, -1.22522523, -1.22412412, -1.22302302,
-1.22192192, -1.22082082, -1.21971972, -1.21861862, -1.21751752,
-1.21641642, -1.21531532, -1.21421421, -1.21311311, -1.21201201,
-1.21091091, -1.20980981, -1.20870871, -1.20760761, -1.20650651,
-1.20540541, -1.2043043 , -1.2032032 , -1.2021021 , -1.201001 ,
-1.1998999 , -1.1987988 , -1.1976977 , -1.1965966 , -1.1954955 ,
-1.19439439, -1.19329329, -1.19219219, -1.19109109, -1.18998999,
-1.18888889, -1.18778779, -1.18668669, -1.18558559, -1.18448448,
-1.18338338, -1.18228228, -1.18118118, -1.18008008, -1.17897898,
-1.17787788, -1.17677678, -1.17567568, -1.17457457, -1.17347347,
-1.17237237, -1.17127127, -1.17017017, -1.16906907, -1.16796797,
-1.16686687, -1.16576577, -1.16466466, -1.16356356, -1.16246246,
-1.16136136, -1.16026026, -1.15915916, -1.15805806, -1.15695696,
-1.15585586, -1.15475475, -1.15365365, -1.15255255, -1.15145145,
-1.15035035, -1.14924925, -1.14814815, -1.14704705, -1.14594595,
-1.14484484, -1.14374374, -1.14264264, -1.14154154, -1.14044044,
-1.13933934, -1.13823824, -1.13713714, -1.13603604, -1.13493493,
-1.13383383, -1.13273273, -1.13163163, -1.13053053, -1.12942943,
-1.12832833, -1.12722723, -1.12612613, -1.12502503, -1.12392392,
-1.12282282, -1.12172172, -1.12062062, -1.11951952, -1.11841842,
-1.11731732, -1.11621622, -1.11511512, -1.11401401, -1.11291291,
-1.11181181, -1.11071071, -1.10960961, -1.10850851, -1.10740741,
-1.10630631, -1.10520521, -1.1041041 , -1.103003 , -1.1019019 ,
-1.1008008 , -1.0996997 , -1.0985986 , -1.0974975 , -1.0963964 ,
-1.0952953 , -1.09419419, -1.09309309, -1.09199199, -1.09089089,
-1.08978979, -1.08868869, -1.08758759, -1.08648649, -1.08538539,
-1.08428428, -1.08318318, -1.08208208, -1.08098098, -1.07987988,
-1.07877878, -1.07767768, -1.07657658, -1.07547548, -1.07437437,
-1.07327327, -1.07217217, -1.07107107, -1.06996997, -1.06886887,
-1.06776777, -1.06666667, -1.06556557, -1.06446446, -1.06336336,
-1.06226226, -1.06116116, -1.06006006, -1.05895896, -1.05785786,
-1.05675676, -1.05565566, -1.05455455, -1.05345345, -1.05235235,
-1.05125125, -1.05015015, -1.04904905, -1.04794795, -1.04684685,
-1.04574575, -1.04464464, -1.04354354, -1.04244244, -1.04134134,
-1.04024024, -1.03913914, -1.03803804, -1.03693694, -1.03583584,
-1.03473473, -1.03363363, -1.03253253, -1.03143143, -1.03033033,
-1.02922923, -1.02812813, -1.02702703, -1.02592593, -1.02482482,
-1.02372372, -1.02262262, -1.02152152, -1.02042042, -1.01931932,
-1.01821822, -1.01711712, -1.01601602, -1.01491491, -1.01381381,
-1.01271271, -1.01161161, -1.01051051, -1.00940941, -1.00830831,
-1.00720721, -1.00610611, -1.00500501, -1.0039039 , -1.0028028 ,
-1.0017017 , -1.0006006 , -0.9994995 , -0.9983984 , -0.9972973 ,
-0.9961962 , -0.9950951 , -0.99399399, -0.99289289, -0.99179179,
-0.99069069, -0.98958959, -0.98848849, -0.98738739, -0.98628629,
-0.98518519, -0.98408408, -0.98298298, -0.98188188, -0.98078078,
-0.97967968, -0.97857858, -0.97747748, -0.97637638, -0.97527528,
-0.97417417, -0.97307307, -0.97197197, -0.97087087, -0.96976977,
-0.96866867, -0.96756757, -0.96646647, -0.96536537, -0.96426426,
-0.96316316, -0.96206206, -0.96096096, -0.95985986, -0.95875876,
-0.95765766, -0.95655656, -0.95545546, -0.95435435, -0.95325325,
-0.95215215, -0.95105105, -0.94994995, -0.94884885, -0.94774775,
-0.94664665, -0.94554555, -0.94444444, -0.94334334, -0.94224224,
-0.94114114, -0.94004004, -0.93893894, -0.93783784, -0.93673674,
-0.93563564, -0.93453453, -0.93343343, -0.93233233, -0.93123123,
-0.93013013, -0.92902903, -0.92792793, -0.92682683, -0.92572573,
-0.92462462, -0.92352352, -0.92242242, -0.92132132, -0.92022022,
-0.91911912, -0.91801802, -0.91691692, -0.91581582, -0.91471471,
-0.91361361, -0.91251251, -0.91141141, -0.91031031, -0.90920921,
-0.90810811, -0.90700701, -0.90590591, -0.9048048 , -0.9037037 ,
-0.9026026 , -0.9015015 , -0.9004004 , -0.8992993 , -0.8981982 ,
-0.8970971 , -0.895996 , -0.89489489, -0.89379379, -0.89269269,
-0.89159159, -0.89049049, -0.88938939, -0.88828829, -0.88718719,
-0.88608609, -0.88498498, -0.88388388, -0.88278278, -0.88168168,
-0.88058058, -0.87947948, -0.87837838, -0.87727728, -0.87617618,
-0.87507508, -0.87397397, -0.87287287, -0.87177177, -0.87067067,
-0.86956957, -0.86846847, -0.86736737, -0.86626627, -0.86516517,
-0.86406406, -0.86296296, -0.86186186, -0.86076076, -0.85965966,
-0.85855856, -0.85745746, -0.85635636, -0.85525526, -0.85415415,
-0.85305305, -0.85195195, -0.85085085, -0.84974975, -0.84864865,
-0.84754755, -0.84644645, -0.84534535, -0.84424424, -0.84314314,
-0.84204204, -0.84094094, -0.83983984, -0.83873874, -0.83763764,
-0.83653654, -0.83543544, -0.83433433, -0.83323323, -0.83213213,
-0.83103103, -0.82992993, -0.82882883, -0.82772773, -0.82662663,
-0.82552553, -0.82442442, -0.82332332, -0.82222222, -0.82112112,
-0.82002002, -0.81891892, -0.81781782, -0.81671672, -0.81561562,
-0.81451451, -0.81341341, -0.81231231, -0.81121121, -0.81011011,
-0.80900901, -0.80790791, -0.80680681, -0.80570571, -0.8046046 ,
-0.8035035 , -0.8024024 , -0.8013013 , -0.8002002 , -0.7990991 ,
-0.797998 , -0.7968969 , -0.7957958 , -0.79469469, -0.79359359,
-0.79249249, -0.79139139, -0.79029029, -0.78918919, -0.78808809,
-0.78698699, -0.78588589, -0.78478478, -0.78368368, -0.78258258,
-0.78148148, -0.78038038, -0.77927928, -0.77817818, -0.77707708,
-0.77597598, -0.77487487, -0.77377377, -0.77267267, -0.77157157,
-0.77047047, -0.76936937, -0.76826827, -0.76716717, -0.76606607,
-0.76496496, -0.76386386, -0.76276276, -0.76166166, -0.76056056,
-0.75945946, -0.75835836, -0.75725726, -0.75615616, -0.75505506,
-0.75395395, -0.75285285, -0.75175175, -0.75065065, -0.74954955,
-0.74844845, -0.74734735, -0.74624625, -0.74514515, -0.74404404,
-0.74294294, -0.74184184, -0.74074074, -0.73963964, -0.73853854,
-0.73743744, -0.73633634, -0.73523524, -0.73413413, -0.73303303,
-0.73193193, -0.73083083, -0.72972973, -0.72862863, -0.72752753,
-0.72642643, -0.72532533, -0.72422422, -0.72312312, -0.72202202,
-0.72092092, -0.71981982, -0.71871872, -0.71761762, -0.71651652,
-0.71541542, -0.71431431, -0.71321321, -0.71211211, -0.71101101,
-0.70990991, -0.70880881, -0.70770771, -0.70660661, -0.70550551,
-0.7044044 , -0.7033033 , -0.7022022 , -0.7011011 , -0.7 ]),
'id': array(1),
'mass': array([0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001,
0.001]),
'pos3': array([[-8. , 0. , 0.015],
[-8. , 0. , 0.015],
[-8. , 0. , 0.015],
...,
[-8. , 0. , 0.015],
[-8. , 0. , 0.015],
[-8. , 0. , 0.015]]),
'vel3': array([[ 11.1 , 239.08, 7.25],
[ 11.1 , 239.08, 7.25],
[ 11.1 , 239.08, 7.25],
...,
[ 11.1 , 239.08, 7.25],
[ 11.1 , 239.08, 7.25],
[ 11.1 , 239.08, 7.25]])}
```python
file = ebf.read('/home/rybizki/Programme/GalaxiaData/nbody1/LSMC/SMC_LMC.ebf','/')
file
```
{'age': array([1.350000e+01, 1.349865e+01, 1.349730e+01, ..., 4.050000e-03,
2.700000e-03, 1.350000e-03]),
'alpha': array([0., 0., 0., ..., 0., 0., 0.]),
'feh': array([-1.99167861, -1.93086057, -1.92814588, ..., -0.49064699,
-0.47341946, -0.45378013]),
'id': array(1),
'mass': array([53000., 53000., 53000., ..., 53000., 53000., 53000.]),
'pos3': array([[ 15.33545452, -36.20410216, -42.49457273],
[ 15.29980912, -36.00641168, -40.9126705 ],
[ 15.30658581, -36.77713091, -42.34716786],
...,
[ 15.52463484, -37.818011 , -42.71545747],
[ 15.39652357, -36.91277086, -42.14263451],
[ 15.55819806, -36.95022933, -42.9576046 ]]),
'vel3': array([[ -30.27567249, -195.93860278, 132.71019931],
[ -30.27567249, -195.93860278, 132.71019931],
[ -30.27567249, -195.93860278, 132.71019931],
...,
[ -30.27567249, -195.93860278, 132.71019931],
[ -30.27567249, -195.93860278, 132.71019931],
[ -30.27567249, -195.93860278, 132.71019931]])}
```python
file = ebf.read('/home/rybizki/Programme/GalaxiaData/nbody1/bj/sat0000.ebf','/')
file
```
{'age': array([12.6332, 11.4259, 12.6456, ..., 12.2225, 11.7246, 12.9941],
dtype=float32),
'alpha': array([0.281504 , 0.06217445, 0.285498 , ..., 0.181849 , 0.1010419 ,
0.2998405 ], dtype=float32),
'feh': array([-1.87098, -1.64581, -1.87503, ..., -1.76944, -1.68616, -3.99988],
dtype=float32),
'id': array([0], dtype=int32),
'mass': array([46.227158 , 8.630884 , 6.1402965, ..., 3.475836 , 3.4672792,
3.42881 ], dtype=float32),
'pos3': array([[-81.92041 , 154.84988 , -89.64631 ],
[-82.3133 , 155.28146 , -89.57967 ],
[-82.28089 , 155.18294 , -89.04917 ],
...,
[-81.99606 , 154.36931 , -89.69989 ],
[-82.41355 , 154.49896 , -89.213425],
[-81.18186 , 154.93681 , -89.75164 ]], dtype=float32),
'vel3': array([[ 27.788857 , -12.21555 , 55.965973 ],
[ 42.948017 , 8.397151 , 68.3472 ],
[ 33.88551 , 9.441175 , 65.91827 ],
...,
[ 43.73566 , -15.185085 , 55.043983 ],
[ 26.042114 , -7.028326 , 68.0241 ],
[ 27.867823 , 0.1820468, 56.755703 ]], dtype=float32)}
```python
pos = file["pos3"]
```
```python
vel = file["vel3"]
```
```python
ps = np.concatenate((pos,vel),axis = 1)
```
```python
np.savetxt("sat0.dat",ps,fmt='%.6f')
np.savetxt("sat0_pos.dat",pos,fmt='%.6f')
```
```python
file = ebf.read('/home/rybizki/Programme/GalaxiaData/nbody1/bj/sat0000_d3n64_den.ebf','/')
file["h_cubic"]
```
array([0.05839317, 0.10330353, 0.14811762, ..., 0.11479929, 0.13655213,
0.34863684], dtype=float32)
```python
file = ebf.read('/home/rybizki/Programme/GalaxiaData/nbody1/bj/sat0000_d6n64_den.ebf','/')
print("h0: ",min(file["h_cubic"][:,0]),max(file["h_cubic"][:,0]))
print("h1: ",min(file["h_cubic"][:,1]),max(file["h_cubic"][:,1]))
```
h0: 0.47667506 1.5975987
h1: 2.2416234 14.521095
```python
measure = np.sqrt(np.divide(file["h_cubic"][:,0],file["h_cubic"][:,1]))
plt.hist(measure)
```
(array([9.260e+02, 2.531e+03, 3.784e+03, 5.047e+03, 5.583e+03, 4.310e+03,
8.660e+02, 7.000e+00, 0.000e+00, 1.000e+00]),
array([0.20215598, 0.26540995, 0.32866395, 0.3919179 , 0.45517188,
0.5184259 , 0.5816798 , 0.6449338 , 0.7081878 , 0.77144176,
0.83469576], dtype=float32),
<a list of 10 Patch objects>)

```python
t = np.genfromtxt('../../../Programme/EnbidG/Examples/sat0/sat0.dat_ph3.est',skip_header=1)
print('h0: ',min(t[:,1]),max(t[:,1]))
print('h1: ',min(t[:,2]),max(t[:,2]))
measure = np.sqrt(np.divide(t[:,1],t[:,2]))
plt.hist(measure)
```
h0: 0.079951 0.507349
h1: 5.26297 36.3217
(array([1.550e+02, 9.870e+02, 2.621e+03, 5.441e+03, 7.878e+03, 4.940e+03,
8.470e+02, 1.550e+02, 2.900e+01, 2.000e+00]),
array([0.05927978, 0.07398158, 0.08868337, 0.10338517, 0.11808697,
0.13278877, 0.14749057, 0.16219237, 0.17689417, 0.19159596,
0.20629776]),
<a list of 10 Patch objects>)

```python
## OLD
t = np.genfromtxt('../../../Programme/EnbidG/Examples/sat0/sat0.dat_ph3.est',skip_header=1)
print('h0: ',min(t[:,1]),max(t[:,1]))
print('h1: ',min(t[:,2]),max(t[:,2]))
```
h0: 0.079951 0.507349
h1: 5.26297 36.3217
```python
file = ebf.read('/home/rybizki/Programme/GalaxiaData/nbody1/LSMC/MCslmc.ebf','/')
file
```
{'age': array([1.3500000e+01, 1.3499865e+01, 1.3499730e+01, ..., 4.0500000e-04,
2.7000000e-04, 1.3500000e-04]),
'alpha': array([0., 0., 0., ..., 0., 0., 0.]),
'feh': array([-2.92777242, -2.86082556, -2.78943982, ..., 1.32784252,
1.35303748, 1.365485 ]),
'id': array(1),
'mass': array([50000., 50000., 50000., ..., 50000., 50000., 50000.]),
'pos3': array([[ 11.20553734, -28.35582758, -34.40470097],
[ 11.01574738, -28.46678535, -33.27619523],
[ 11.31282265, -30.05515196, -33.65236826],
...,
[ 10.95831855, -30.99401569, -33.94550137],
[ 8.5799832 , -28.87799141, -33.21369304],
[ 9.74939674, -28.33013758, -33.15553499]]),
'vel3': array([[-260.95549649, -120.38760924, -274.75946279],
[-290.68051132, -102.42146425, -263.45265899],
[-295.31028574, -104.34331036, -239.6816715 ],
...,
[-296.98964282, -132.44003713, -240.01935022],
[-275.52424156, -93.65709035, -254.06192129],
[-298.19898921, -109.86693807, -257.22770266]])}
```python
file = ebf.read('/home/rybizki/Programme/GalaxiaData/nbody1/LSMC/MCssmc.ebf','/')
file
```
{'age': array([1.350000e+01, 1.349865e+01, 1.349730e+01, ..., 4.050000e-03,
2.700000e-03, 1.350000e-03]),
'alpha': array([0., 0., 0., ..., 0., 0., 0.]),
'feh': array([-1.83636463, -1.83307777, -1.827067 , ..., -0.41322379,
-0.4066888 , -0.40658839]),
'id': array(1),
'mass': array([53000., 53000., 53000., ..., 53000., 53000., 53000.]),
'pos3': array([[ 15.77877671, -36.59390033, -42.97986366],
[ 15.60783733, -36.64025669, -42.11937081],
[ 16.87498076, -36.46886774, -41.67738806],
...,
[ 15.37465567, -36.95043508, -41.88393783],
[ 15.17595043, -36.86802325, -43.20468465],
[ 15.7590299 , -35.70727167, -41.54376234]]),
'vel3': array([[ -34.10308835, -206.08447523, 131.79378624],
[ -35.72955195, -206.7835443 , 145.03916486],
[ -27.64620978, -193.59308615, 142.48603435],
...,
[ -22.53343414, -199.92107541, 123.09333658],
[ -32.75526782, -195.66907904, 125.20102063],
[ -36.31622042, -180.09201764, 126.45517367]])}
```python
file = ebf.read('/home/rybizki/Programme/GalaxiaData/nbody1/LSMC/SMC_LMC_d3n64_den.ebf','/')
file
```
{'h_cubic': array([0., 0.])}
```python
file = ebf.read('/home/rybizki/Programme/GalaxiaData/nbody1/flat_sfr/flat_sfr_selection_down_to_5_d6n64_den.ebf','/')
file
```
{'h_cubic': array([[0., 0.],
[0., 0.],
[0., 0.],
...,
[0., 0.],
[0., 0.],
[0., 0.]])}
```python
file = ebf.read('/home/rybizki/Programme/GalaxiaData/nbody1/LSMC/SMC_LMC_d6n64_den.ebf','/')
file
```
{'h_cubic': array([[0., 0.],
[0., 0.]])}
```python
```
|
jan-rybizkiREPO_NAMEGalaxia_wrapPATH_START.@Galaxia_wrap_extracted@Galaxia_wrap-master@notebook@inspect_nbody_ebf.ipynb@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "simonsobs/nextline-rdb",
"repo_path": "nextline-rdb_extracted/nextline-rdb-main/src/nextline_rdb/alembic/README.md",
"type": "Markdown"
}
|
# Alembic Migration Environment
This folder is the [Alembic Migration Environment](https://alembic.sqlalchemy.org/en/latest/tutorial.html#the-migration-environment).
## How to create a migration revision
In this folder, in which `alembic.ini` is located:
```bash
# Ensure the target DB doesn't exist
rm -f migration.sqlite3
# Create an empty up-to-date target DB
alembic upgrade head
# Generate a migration script
alembic revision --autogenerate -m 'message'
# Edit the generated migration script in `versions/` if necessary
# Apply the migration script to the target DB
alembic upgrade head
# Clean up
rm -f migration.sqlite3
```
## ORM models for migration versions
Copy `src/nextline_rdb/models/` to `alembic/models/rev_{revision}/`.
The `models` folder includes the ORM models, tests of the models,
Hypothesis strategies for the models, and the test of the strategies.
The ORM models can be used for data migration and test migrations.
## Data migration
An example:
[2024-02-03_14:13-68cb1c590d1f.py](https://github.com/simonsobs/nextline-rdb/blob/v0.5.0/src/nextline_rdb/alembic/versions/2024-02-03_14%3A13-68cb1c590d1f.py)
## Test migrations
An example:
[test_2024-02-04_09:01-4dc6a93dfed8.py](https://github.com/simonsobs/nextline-rdb/blob/v0.5.0/tests/alembic/migrations/test_2024-02-04_09%3A01-4dc6a93dfed8.py)
|
simonsobsREPO_NAMEnextline-rdbPATH_START.@nextline-rdb_extracted@nextline-rdb-main@src@nextline_rdb@alembic@README.md@.PATH_END.py
|
{
"filename": "large_distributions.py",
"repo_name": "mwaskom/seaborn",
"repo_path": "seaborn_extracted/seaborn-master/examples/large_distributions.py",
"type": "Python"
}
|
"""
Plotting large distributions
============================
"""
import seaborn as sns
sns.set_theme(style="whitegrid")
diamonds = sns.load_dataset("diamonds")
clarity_ranking = ["I1", "SI2", "SI1", "VS2", "VS1", "VVS2", "VVS1", "IF"]
sns.boxenplot(
diamonds, x="clarity", y="carat",
color="b", order=clarity_ranking, width_method="linear",
)
|
mwaskomREPO_NAMEseabornPATH_START.@seaborn_extracted@seaborn-master@examples@large_distributions.py@.PATH_END.py
|
{
"filename": "stat.py",
"repo_name": "COSMOGRAIL/PyCS",
"repo_path": "PyCS_extracted/PyCS-master/pycs/gen/stat.py",
"type": "Python"
}
|
"""
Statistics related stuff.
"""
import sys
import os
import numpy as np
import math
import pycs.gen.util
def normal(x, mu, sigma):
return (1.0/np.sqrt(2.0*np.pi*sigma*sigma)) * np.exp( - (x - mu)**2/(2*sigma*sigma))
def sf(l, binsize = 200, ssf=False):
"""
Structure function of a lightcurve
ssf gives a 2D density plot, otherwise binned.
For definition see for instance :
De Vries, W.H. de, Becker, R., White, R., Loomis, C., 2005. Structure Function Analysis of Long-Term Quasar Variability. The Astronomical Journal 129, 615-615-629-629.
"""
import matplotlib.pyplot as plt
mags = l.getmags()
jds = l.getjds()
n = len(l)
#n = 1000
#jds = np.arange(n)
#mags = np.random.randn(n)*3.0
ja = np.ones((n,n)) * jds
jam = ja - ja.transpose()
jam = jam.flatten()
keep = jam > 0.0
jam = jam[keep]
ma = np.ones((n,n)) * mags
mam = ma - ma.transpose()
mam = mam.flatten()
mam = mam[keep]
if ssf: # stochastic structure function, we plot a 2d distribution
jam = jam.flatten()
mam = mam.flatten()
plt.scatter(jam, mam, s=1.0)
plt.xlabel("Delta t")
plt.ylabel("Delta m")
plt.show()
else: # we do a normal structure function, "variance of increments versus delta t" :
mam = np.square(mam)
order = np.argsort(jam) # sorting according to the jd gaps
jam = jam[order]
mam = mam[order]
m = len(jam)
nbins = int(math.floor(m/binsize))
jam = jam[0:nbins*binsize] # cutting to the nearest last bin
mam = mam[0:nbins*binsize]
jam = jam.reshape((nbins, binsize))
mam = mam.reshape((nbins, binsize))
cjs = np.mean(jam, axis=1)
cms = np.sqrt(np.mean(mam, axis=1) / float(binsize))
#cms2 = np.sqrt(np.median(mam, axis=1) / float(binsize))
plt.scatter(cjs, cms)
#plt.scatter(cjs, cms2, c="red")
plt.xlabel("Delta t")
plt.ylabel("SF")
plt.show()
def mad(data, axis=None):
"""
Median absolute deviation
:param data: array from which to compute the MAD
:param axis: axis along to compute the MAD
:return: float, MAD of the array
"""
return np.median(np.absolute(data - np.median(data, axis)), axis)
def erf(x):
"""
Error function. There is one in scipy, but this way we do it without scipy...
scipy.special.erf(z)
"""
# save the sign of x
sign = 1
if x < 0:
sign = -1
x = abs(x)
# constants
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
# A&S formula 7.1.26
t = 1.0/(1.0 + p*x)
y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*math.exp(-x*x)
return sign*y # erf(-x) = -erf(x)
def runstest(residuals, autolevel=False, verbose=True):
"""
One-sample runs test of randomness as presented in Practical Statistics for Astronomers by
J. V. Wall and C. R. Jenkins, paragraph 5.3.3
WARNING : ERROR IN THE BOOKS EXPECTATION FORMULA ! confirmed by author
residuals is a numpy array of floats
returns z (in units of sigmas)
and p = assuming that the data are independent, the probability to get a result worse then what you got.
"""
medianlevel = np.median(residuals)
if autolevel:
if verbose:
print "Leveling to median : %f" % (medianlevel)
residuals -= medianlevel
bools = residuals > 0 # So residuals = 0 would be set to False, but
bools = bools[np.abs(residuals) > 0.000001] # we remove those very close to 0
if verbose:
print "Total : %i points / significant : %i points" % (len(residuals), len(bools))
n = len(bools)
if n <= 20:
print "WARNING : to few points for a meaningful runs test (or too close to 0.0)"
intbools = bools.astype(int)
nplus = np.sum(bools)
nminus = n - nplus
# And the number of runs
changes = np.abs(intbools[1:] - intbools[:-1])
nruns = np.sum(changes) + 1
if verbose:
print " + (m) : %i" % nplus
print " - (n) : %i" % nminus
print " Runs (r) : %i" % nruns
# For large N, we can approximate the distribution of r by a gaussian :
# Error from the book :
#mur = (2.0 * nplus * nminus)/(nplus + nminus + 1.0)
#sigmar = math.sqrt((2.0*nplus*nminus*(2.0*nplus*nminus - n))/(n*n*(n - 1.0)))
# From corrected book and also wikipedia :
mur = ((2.0 * nplus * nminus)/n) + 1.0
sigmar = math.sqrt((mur-1.0)*(mur-2.0) / (n - 1.0))
zruns = (nruns - mur) / sigmar
# The probability to obtain a result worse than what you got :
pruns = 1.0 - erf(abs(zruns)/math.sqrt(2.0))
return {"zruns":zruns, "pruns":pruns, "nruns":nruns}
# def maprunstest(rl, seasons = None, autolevel=False, verbose=True):
# """
# maps the function runstest above on the seasons of a residual lightcurve rl
#
# I DO NOT TAKE INTO ACCOUNT THE MASK, PLEASE CUT IT FIRST !
# AND THEN RECALCULATE YOUR SEASONS !!!
#
# >>> res = pycs.gen.stat.maprunstest(lca, seasons, autolevel=False, verbose = False)
# >>> out = [(season.name, res["z"]) for (season, res) in zip (seasons, res)]
#
#
# """
#
# if lc.hasmask():
# print "WARNING : I do not take into account the mask !"
#
# residualslist = []
# if seasons == None:
# residualslist.append(lc.getmags())
# else:
# mags = lc.getmags()
# for season in seasons:
# residuals = mags[season.indices]
# residualslist.append(residuals)
#
# return map(lambda x: runstest(x, autolevel=autolevel, verbose=verbose), residualslist)
def subtract(lcs, spline):
"""
I return a list of residual light curves ("lcs - spline").
Technically, "residual" light curves are nothing but normal lightcurves objects.
Of course, I take into account any shifts or microlensing of your lcs.
I do not modify my input arguments.
"""
rls = []
for l in lcs:
if l.hasmask():
print "WARNING : I do not take into account the mask !"
lp = l.copy() # To avoid border effects
lp.applyfluxshift()
lp.applymagshift()
if lp.ml != None:
lp.applyml()
#print lcs
lp.mags -= spline.eval(lp.getjds())
rls.append(lp)
return rls
def resistats(rl):
"""
Give me a residual lightcurve, I return a dict with some descriptive stats about its magnitudes.
"""
meanmag = np.mean(rl.getmags())
# stdmag = np.std(rl.getmags())
stdmag = mad(rl.getmags()) #use median absolute deviation instead, for robustness to outliers
runs = runstest(rl.getmags(), autolevel=False, verbose=False)
out = {"mean":meanmag, "std":stdmag}
out.update(runs)
return out
def mapresistats(rls):
return [resistats(rl) for rl in rls]
def anaoptdrawn(optoriglcs, optorigspline, simset="simset", optset="optset", npkl=1000, plots=True, nplots=3, r=0.11, plotjdrange=None, plotcurveindexes=None, showplot=False, directory = "./", resihist_figsize = None):
"""
Not flexible but very high level function to analyse the spline-fit-residuals of drawn curves and comparing them to the
real observations.
This can be used to tune the parameters of the "drawing".
.. warning:: The simset must have been optimized using spline fits, with option keepopt=True
:param optoriglcs: optimized original curves
:param optorigspline: spline that matches to these curves
:param simset: Put this e.g. to plotcurveindex=(0,2) if you want to plot only the first 2 curves ...
:param plotcurveindexes: allows you to plot only a subset of lcs (smaller plots). Give a tuple like eg (0, 2, 3)
:param npkl: I read only the first npkl picke files.
"""
import matplotlib.pyplot as plt
import glob
print "Analysing the residuals of simset %s" % (simset)
# For each light curve we make a dict that we will use to store stuff
curves = [{"optoriglc":optoriglc} for optoriglc in optoriglcs]
# We compute the residuals of the original curve
optorigrlcs = subtract(optoriglcs, optorigspline)
for (curve, optorigrlc) in zip(curves, optorigrlcs):
curve["optorigrlc"] = optorigrlc
# We read all the optimized mock curves :
pkls = sorted(glob.glob(directory + "sims_%s_opt_%s/*_opt.pkl" % (simset, optset)))
print directory + "sims_%s_opt_%s/*_opt.pkl" % (simset, optset)
optmocksplinelist = []
optmocklcslist = []
for (i, pkl) in enumerate(pkls):
if i >= npkl:
break
opttweak = pycs.gen.util.readpickle(pkl, verbose=False)
optmocksplinelist.extend(opttweak["optfctoutlist"])
optmocklcslist.extend(opttweak["optlcslist"])
assert len(optmocksplinelist) == len(optmocklcslist)
print "We have %i simulated curves" % (len(optmocksplinelist))
# We compute all the residuals of the mock curves, and store them
for curve in curves:
curve["optmockrlclist"] = []
for (optmocklcs, optmockspline) in zip(optmocklcslist, optmocksplinelist):
assert len(optmocklcs) == len(optoriglcs)
optmockrlcs = pycs.gen.stat.subtract(optmocklcs, optmockspline)
for (curve, optmockrlc) in zip(curves, optmockrlcs):
assert curve["optorigrlc"].object == optmockrlc.object
curve["optmockrlclist"].append(optmockrlc)
# We want to return the displayed statistics
stats = []
for curve in curves:
curve["origresistats"] = resistats(curve["optorigrlc"])
curve["mockresistats"] = map(resistats, curve["optmockrlclist"])
curve["meanmockresistats"] = dict([[key, np.mean(np.array([el[key] for el in curve["mockresistats"]]))] for key in curve["origresistats"].keys()])
curve["medmockresistats"] = dict([[key, np.median(np.array([el[key] for el in curve["mockresistats"]]))] for key in curve["origresistats"].keys()])
curve["stdmockresistats"] = dict([[key, np.std(np.array([el[key] for el in curve["mockresistats"]]))] for key in curve["origresistats"].keys()])
print "++++++ %s ++++++" % (curve["optorigrlc"].object)
curve["zrunstxt"] = "zruns : %.2f (obs) vs %.2f +/- %.2f (sim)" % (curve["origresistats"]["zruns"], curve["meanmockresistats"]["zruns"], curve["stdmockresistats"]["zruns"])
curve["sigmatxt"] = "sigma : %.4f (obs) vs %.4f +/- %.4f (sim)" % (curve["origresistats"]["std"], curve["meanmockresistats"]["std"], curve["stdmockresistats"]["std"])
print curve["zrunstxt"]
print curve["sigmatxt"]
# return the original, mean and std of mocks zruns, then original, mean and std of mocks of sigma
stats.append([curve["origresistats"]["zruns"], curve["meanmockresistats"]["zruns"], curve["stdmockresistats"]["zruns"], curve["origresistats"]["std"], curve["meanmockresistats"]["std"], curve["stdmockresistats"]["std"]])
#for item in curve["mockresistats"]:
# print item["zruns"]
# Now we proceed with making plots.
# The plots of the residuals statistics, one for each curve :
"""
if plots:
for curve in curves:
plt.figure(figsize=(12, 6))
plt.figtext(0.03, 0.5, curve["optorigrlc"].object, fontsize=30)
# Histo of zruns
plt.subplot(121)
plt.hist(np.array([el["zruns"] for el in curve["mockresistats"]]), 30, color="gray")
plt.axvline(curve["origresistats"]["zruns"], color="green", linewidth=3.0)
plt.title(curve["zrunstxt"])
plt.xlabel("zruns")
plt.xlim(-10.0, 1.0)
# Histo of residuals
r = 0.1
plt.subplot(122)
plt.hist(np.concatenate([rlc.mags for rlc in curve["optmockrlclist"]]), 50, range=(-r, r), facecolor='gray', normed=True)
# Gaussian for the mock hist :
plt.plot(np.linspace(-r, r, 100), normal(np.linspace(-r, r, 100), curve["meanmockresistats"]["mean"], curve["meanmockresistats"]["std"]), color="gray")
plt.hist(curve["optorigrlc"].mags, 50, facecolor='green', alpha=0.5, range=(-r, r), normed=True)
plt.xlabel("Residuals [mag]")
plt.title(curve["sigmatxt"])
plt.xlim(-r, r)
plt.savefig("anaoptdrawn_%s_%s_%s.pdf" % (simset, optset, curve["optorigrlc"].object))
"""
# Resi histos combined into one nicer figure :
"""
if plots:
r = 0.11
plt.figure(figsize=(3*len(curves), 3))
plt.subplots_adjust(left=0.03, bottom=0.19, right=0.97, top=None, wspace=None, hspace=None)
for (i,curve) in enumerate(curves):
#print (1, len(curves), i+1)
plt.subplot(1, len(curves), i+1)
plt.hist(np.concatenate([rlc.mags for rlc in curve["optmockrlclist"]]), 50, range=(-r, r), facecolor='gray', normed=True, histtype="stepfilled")
# Gaussian for the mock hist :
#plt.plot(np.linspace(-r, r, 100), normal(np.linspace(-r, r, 100), curve["origresistats"]["mean"], curve["origresistats"]["std"]), color="green")
plt.hist(curve["optorigrlc"].mags, 50, facecolor='green', alpha=0.4, range=(-r, r), normed=True, histtype="stepfilled")
plt.xlabel("Spline fit residuals [mag]")
#plt.title(sigmatxt)
#print plt.gca().get_ylim()
plt.text(-r+0.1*r, 0.85*plt.gca().get_ylim()[1], curve["optorigrlc"].object, fontsize=20)
plt.xlim(-r, r)
plt.gca().get_yaxis().set_ticks([])
#plt.show()
plt.savefig("anaoptdrawn_%s_%s_resihists.pdf" % (simset, optset))
"""
# Resi and zruns histos combined into one nicer figure :
if plots:
if resihist_figsize == None :
plt.figure(figsize=(3*len(curves), 4))
else :
plt.figure(figsize=resihist_figsize)
plt.subplots_adjust(left=0.02, bottom=0.12, right=0.98, top=0.98, wspace=0.08, hspace=0.37)
# Resi histos :
for (i,curve) in enumerate(curves):
#print (1, len(curves), i+1)
plt.subplot(2, len(curves), i+1)
plt.hist(np.concatenate([rlc.mags for rlc in curve["optmockrlclist"]]), 50, range=(-r, r), facecolor='black', alpha=0.4, normed=1, histtype="stepfilled")
# Gaussian for the mock hist :
#plt.plot(np.linspace(-r, r, 100), normal(np.linspace(-r, r, 100), curve["origresistats"]["mean"], curve["origresistats"]["std"]), color="green")
plt.hist(curve["optorigrlc"].mags, 50, facecolor=curve["optorigrlc"].plotcolour, alpha=0.5, range=(-r, r), normed=1, histtype="stepfilled")
plt.xlabel("Spline fit residuals [mag]")
#print plt.gca().get_ylim()
plt.text(-r+0.1*r, 0.8*plt.gca().get_ylim()[1], curve["optorigrlc"].object, fontsize=18)
plt.xlim(-r, r)
plt.gca().get_yaxis().set_ticks([])
# zruns histos :
for (i,curve) in enumerate(curves):
#print (1, len(curves), i+1)
plt.subplot(2, len(curves), len(curves)+i+1)
plt.hist(np.array([el["zruns"] for el in curve["mockresistats"]]), 20, facecolor="black", alpha=0.4, normed=1, histtype="stepfilled")
plt.axvline(curve["origresistats"]["zruns"], color=curve["optorigrlc"].plotcolour, linewidth=2.0, alpha=1.0)
plt.xlabel(r"$z_{\mathrm{r}}$", fontsize=18)
# plt.xlim(-5.0, 5.0)
#plt.text(-9.0, 0.85*plt.gca().get_ylim()[1], curve["optorigrlc"].object, fontsize=20)
plt.gca().get_yaxis().set_ticks([])
if showplot:
plt.show()
plt.savefig("fig_anaoptdrawn_%s_%s_resihists.png" % (simset, optset))
# A detailed plot of some residuals, just for a few drawn curves
if plots:
for i in range(nplots):
optmockrlcs = [curve["optmockrlclist"][i] for curve in curves]
for l in optmockrlcs:
l.plotcolour = "black"
optorigrlcs = [curve["optorigrlc"] for curve in curves]
if plotcurveindexes != None:
optorigrlcs = [optorigrlcs[index] for index in plotcurveindexes]
optmockrlcs = [optmockrlcs[index] for index in plotcurveindexes]
plotresiduals([optorigrlcs, optmockrlcs], jdrange=plotjdrange, nicelabel=False, showlegend=False, showsigmalines = False, errorbarcolour = "#999999", filename="fig_anaoptdrawn_%s_%s_resi_%i.png" % (simset, optset, i+1))
return stats
def plotresiduals(rlslist, jdrange=None, magrad=0.1, errorbarcolour = "#BBBBBB",
showerrorbars=True, showlegend=True, nicelabel=True,
showsigmalines=True, filename=None, ax = None):
"""
We plot the residual lightcurves in separate frames.
The arguement rlslist is a *list* of *lists* of lightcurve objects.
Ths sublists should have the same length, I'll choose my number of panels accordingly.
The structure is : [[lca, lcb], [lca_sim1, lcb_sim1], ...]
If you have only one lightcurve object, you can of course pass [[l]] ...
:param rlslist:
I disregard the timeshift of the curves !
"""
import matplotlib.pyplot as plt
import matplotlib.ticker
minorLocator = matplotlib.ticker.MultipleLocator(50)
majorLocator = matplotlib.ticker.MultipleLocator(200)
resminorLocator = matplotlib.ticker.MultipleLocator(0.01)
resmajorLocator = matplotlib.ticker.MultipleLocator(0.05)
eps = 0.001
npanels = len(rlslist[0])
if ax == None:
fig = plt.figure(figsize=(12,1.6*npanels)) # sets figure size
fig.subplots_adjust(left=0.07, right=0.99, top=0.95, bottom=0.17, hspace=0.05)
ax = plt.gca()
ihaveax = False
else :
ihaveax = True
# fig = plt.figure(figsize=(12,1.6*npanels))
# fig.subplots_adjust(left = 0.07, right=0.99, top=0.95, bottom=0.14, hspace=0.05)
#plt.rc('font', family = 'serif', serif = 'STIXGeneral')
for i in range(npanels): # i is the panel index
rls = [rlslist[j][i] for j in range(len(rlslist))] # j is the curve index.
if ihaveax :
ax0 = ax
else :
if i > 0:
ax = plt.subplot(npanels, 1, i+1, sharex=ax0, sharey=ax0)
else:
ax = plt.subplot(npanels, 1, i+1)
ax0 = ax
for (j, rl) in enumerate(rls):
stats = resistats(rl)
label = "[%s/%s] (std: %.4f, zruns : %.3f)" % (rl.telescopename, rl.object, stats["std"], stats["zruns"])
#print label
if nicelabel:
label = "%s" % (rl.object)
#label = "%s (std: %.4f, zruns : %.3f)" % (rl.object, stats["std"], stats["zruns"])
if showerrorbars:
ax.errorbar(rl.jds, rl.getmags(), rl.magerrs, fmt=".", color=rl.plotcolour, markeredgecolor=rl.plotcolour, ecolor=errorbarcolour, label=label, elinewidth=0.5)
else:
ax.plot(rl.jds, rl.getmags(), marker=".", markersize=3.0, linestyle="None", markeredgecolor=rl.plotcolour, color=rl.plotcolour, label=label)
if showsigmalines:
ax.axhline(y = stats["std"], lw=0.5, color=rl.plotcolour)
ax.axhline(y = -stats["std"], lw=0.5, color=rl.plotcolour)
if nicelabel:
ax.text(0.04 + (0.087 * j), 0.82 , label, transform=ax.transAxes, color = rl.plotcolour)
else:
if not showlegend:
if j == 0:
ax.text(0.01 , 0.81 , rl.object, transform=ax.transAxes, color = rl.plotcolour, fontsize=17)
ax.axhline(0, color="gray", dashes=(3,3))
ax.xaxis.set_minor_locator(minorLocator)
ax.xaxis.set_major_locator(majorLocator)
ax.yaxis.set_minor_locator(resminorLocator)
ax.yaxis.set_major_locator(resmajorLocator)
ax.set_ylim(-magrad+eps, magrad-eps)
ax.set_ylim(ax.get_ylim()[::-1])
if showlegend:
ax.legend(numpoints = 1, prop={'size':10})
#ax.set_ylabel("Residual [mag]")
ax.set_xlabel("HJD - 2400000.5 [day]", fontsize =18)
if i != npanels-1:
plt.setp(ax.get_xticklabels(), visible=False)
ax.set_xlabel("")
ax.text(0.015, 0.54, ' Spline Residuals [mag]', rotation=90, verticalalignment="center", horizontalalignment="center", transform=plt.gcf().transFigure, fontsize = 16)
if jdrange != None:
plt.xlim(jdrange[0], jdrange[1])
else:
plt.xlim(np.min(rlslist[0][0].jds)-50, np.max(rlslist[0][0].jds)+50)
if filename and ihaveax:
plt.savefig(filename)
elif filename and not ihaveax :
plt.savefig(filename)
elif not filename and ihaveax :
return
else:
plt.show()
|
COSMOGRAILREPO_NAMEPyCSPATH_START.@PyCS_extracted@PyCS-master@pycs@gen@stat.py@.PATH_END.py
|
{
"filename": "PecinaCeplechaFunction.py",
"repo_name": "wmpg/WesternMeteorPyLib",
"repo_path": "WesternMeteorPyLib_extracted/WesternMeteorPyLib-master/wmpl/Utils/PecinaCeplechaFunction.py",
"type": "Python"
}
|
import numpy as np
import scipy.optimize
import scipy.special
import scipy.integrate
import scipy.interpolate
from wmpl.Utils.AtmosphereDensity import atmDensPoly, fitAtmPoly
# Define the ceiling height (assumed to be h_inf in terms of the air density)
HT_CEILING = 180
def lenFromHt(h, c, zr):
""" Compute the length from the height, constant c, and the zenith angle zr.
Arguments:
h: [float] Height in km.
c: [float] Height-length constant (km).
zr: [float] Zenith angle (radians).
Return:
l: [float] Length (km).
"""
l = c - h/np.cos(zr)
return l
def _lenFromHtResidual(params, ht_data, len_target):
""" Residual function usef for finding the constant c and the zenith angle. """
c, zr = params
return np.sum((len_target - lenFromHt(ht_data, c, zr))**2)
def htFromLen(l, c, zr):
""" Compute the height from the length, constant c, and the zenith angle zr.
Arguments:
l: [float] Length (km).
c: [float] Height-length constant (km).
zr: [float] Zenith angle (radians).
Return:
h: [float] Height in km.
"""
return (c - l)*np.cos(zr)
def velFromHtPhysicalParams(ht_arr, v_inf, m_inf, sigma, zr, K, dens_interp):
""" For the given height as meteoroid parameters, compute the velocity.
Arguments:
ht_arr: [ndarray] Height in meters.
v_inf: [float] Initial velocity in m/s.
m_inf: [float] Mass in kg.
sigma: [float] Ablation coefficient in m^2/s^2.
zr: [float] Zenith angle (radians).
K: [float] Shape-density coefficient (m^2/kg^(2/3)).
dens_interp: [scipy.interpol handle] Interpolation handle for the air mass density in kg/m^3 where input is in meters.
Return:
vel_arr: [ndarray] Velocity for every given height (m/s).
"""
# Convert to km as it keeps the values in the Ei intergral small
ht_arr = np.array(ht_arr)/1000
v_inf /= 1000
sigma *= 1e6
vel_arr = []
# Go through the whole array of heights (in km)
for ht in ht_arr:
# Integrate the air density (compute in kg/m^3)
air_dens_integ = scipy.integrate.quad(dens_interp, 1000*ht, 1000*HT_CEILING)[0]
# Compute the Ei((sigma*v**2)/6) term
eiv_term = scipy.special.expi((sigma*v_inf**2)/6) - (2*K*np.exp((sigma*v_inf**2)/6))/((m_inf**(1/3.0))*np.cos(zr))*air_dens_integ
### Numerically invert the velocity from the exponential integral ###
def _diff(v, sigma, eiv_target):
# Compute the guess value of the Ei((sigma*v**2)/6) term for the given velocity
eiv_guess = scipy.special.expi((sigma*v**2)/6)
# Compute the square residual
return (eiv_target - eiv_guess)**2
v_first_guess = v_inf
v_bounds = [(0.1, 80)]
res = scipy.optimize.minimize(_diff, v_first_guess, args=(sigma, eiv_term), bounds=v_bounds)
# print()
# print("HT:", ht)
# print("Air integ:", air_dens_integ)
# print("E_vinf:", scipy.special.expi((sigma*v_inf**2)/6))
# print("EIV:", eiv_term)
# print("vel:", res.x[0])
vel = res.x[0]
###
# Store the velocity in m/s
vel_arr.append(1000*vel)
return np.array(vel_arr)
def velFromHt(ht_arr, h0, v0, v_inf, sigma, c, zr, dens_interp):
""" Compute the velocity given the height and parameters as defined by Pecina & Ceplecha (1984) model.
Arugments:
ht_arr: [ndarray] Height in km.
h0: [float] Height of the reference point (seconds).
v0: [float] Velocity at the reference point (km/s).
v_inf: [float] Velocity at infinity (km/s).
sigma: [float] Ablation coefficeint km^2/s^2.
c: [float] Height-length constant (km).
zr: [float] Zenith angle (radians).
dens_interp: [scipy.interpol handle] Interpolation handle for the air mass density in kg/m^3 where input is in meters.
Return:
vel_arr: [ndarray] Velocity at the given height (km/s).
"""
vel_arr = []
for ht in ht_arr:
# Integrate the air density from the reference point to infinity (compute in kg/m^3)
air_dens_integ_h0 = scipy.integrate.quad(dens_interp, 1000*h0, 1000*HT_CEILING)[0]
# Integrate the air density from the given height to infinity (compute in kg/m^3)
air_dens_integ_ht = scipy.integrate.quad(dens_interp, 1000*ht, 1000*HT_CEILING)[0]
# Compute the Ei((sigma*v**2)/6) term
eiv_term = scipy.special.expi((sigma*v_inf**2)/6) - (scipy.special.expi((sigma*v_inf**2)/6) \
- scipy.special.expi((sigma*v0**2)/6))*air_dens_integ_ht/air_dens_integ_h0
### Numerically invert the velocity from the exponential integral ###
def _diff(v, sigma, eiv_target):
# Compute the guess value of the Ei((sigma*v**2)/6) term for the given velocity
eiv_guess = scipy.special.expi((sigma*v**2)/6)
# Compute the square residual
return (eiv_target - eiv_guess)**2
v_first_guess = v_inf
v_bounds = [(0.1, 80)]
res = scipy.optimize.minimize(_diff, v_first_guess, args=(sigma, eiv_term), bounds=v_bounds)
vel = res.x[0]
###
# Store the velocity in km/s
vel_arr.append(vel)
return np.array(vel_arr)
def timeFromLen(len_arr, t0, l0, v0, v_inf, sigma, c, zr, dens_interp):
""" Compute the time given the length of a Pecina & Ceplecha (1984) model.
Arugments:
len_arr: [ndarray] Length in km.
t0: [float] Time of the reference point (seconds).
l0: [float] Length of the reference point (km).
v0: [float] Velocity at the reference point (km/s).
v_inf: [float] Velocity at infinity (km/s).
sigma: [float] Ablation coefficeint km^2/s^2.
c: [float] Height-length constant (km).
zr: [float] Zenith angle (radians).
dens_interp: [scipy.interpol handle] Interpolation handle for the air mass density in kg/m^3 where input is in meters.
Return:
time_arr: [ndarray] Time at the given length (seconds).
"""
# Compute the h0 limit
h0 = htFromLen(l0, c, zr)
# Compute the height for the given length
ht_arr = [htFromLen(l, c, zr) for l in len_arr]
# Compute the velocity from the height
vel_arr = velFromHt(ht_arr, h0, v0, v_inf, sigma, c, zr, dens_interp)
# Compute the time from length
time_arr = []
for l, vel in zip(len_arr, vel_arr):
# Interpolate the inverse velocity over length
inv_vel_interp = scipy.interpolate.CubicSpline(len_arr, 1.0/vel_arr)
# Integrate the velocity^-1 over length to compute the relative time from t0
vel_integ = scipy.integrate.quad(inv_vel_interp, l0, l)[0]
# Compute the final time
t = t0 + vel_integ
time_arr.append(t)
return np.array(time_arr)
def jacchiaFuncLen(t, a1, a2, a3, a4):
""" Predict the length from time using the Jacchia exponential function. """
return a1 + a2*t - np.abs(a3)*np.exp(np.abs(a4)*t)
def jacchiaFuncVel(t, a1, a2, a3, a4):
""" Predict the velocity from time using the Jacchia exponential function. """
return a2 - np.abs(a3*a4)*np.exp(np.abs(a4)*t)
def fitPecinaCeplecha84Model(lat, lon, jd, time_data, ht_data, len_data, dens_interp=None, sigma_initial=0.03):
""" Fit the Pecina & Ceplecha (1984) model to the given data.
Arguments:
lat: [float] Latitude (radians).
Lon: [float] Longitude (radians).
jd: [float] Julian date of the event.
time_data: [ndarray] Relative time (seconds).
ht_data: [ndarray] Height (km).
len_data: [ndarray] Length (km).
Keyword arguments:
dens_interp: [func] Function which takes the height (in METERS!) and return the atmosphere density
at the given point in kg/m^3. If not given, it will be computed.
sigma_initial: [float] Initial ablation coefficient (km^2/s^2). The fit is very dependent on this
number and different numbers should be tried to improve the fit. sigma = 0.03 by default.
Return:
t0: [float] Time of the reference point (seconds).
l0: [float] Length of the reference point (km).
v0: [float] Velocity at the reference point (km/s).
v_inf: [float] Velocity at infinity (km/s).
sigma: [float] Ablation coefficeint km^2/s^2.
c: [float] Height-length constant (km).
zr: [float] Zenith angle (radians).
dens_interp: [scipy.interpol handle] Interpolation handle for the air mass density in kg/m^3 where
input is in meters.
"""
### FIT THE AIR DENSITY MODEL ###
# Fit a 7th order polynomial to the air mass density from NRL-MSISE from the ceiling height to 3 km below
# the fireball - limit the height to 12 km
ht_min = np.min(ht_data) - 3
if ht_min < 12:
ht_min = 12
if dens_interp is None:
# Compute the poly fit
print("Fitting atmosphere polynomial...")
dens_co = fitAtmPoly(lat, lon, 1000*ht_min, 1000*HT_CEILING, jd)
# Create a convinience function for compute the density at the given height
dens_interp = lambda h: atmDensPoly(h, dens_co)
print(" ... done!")
###
### FIT THE HEIGHT-LENGTH CONSTANT
print("Finding height-length constant...")
# Find the height-length constant and zenith angle
p0 = [0, np.radians(45)]
res = scipy.optimize.minimize(_lenFromHtResidual, p0, args=(ht_data, len_data))
# Extracted fitted parameters
c, zr = res.x
zr = np.abs(zr)
print("c = {:.2f} km".format(c))
print("zr = {:.2f} deg".format(np.degrees(zr)))
# # Plot the c, zr fit
# ht_arr = np.linspace(np.min(ht_data), np.max(ht_data), 100)
# plt.scatter(ht_data, len_data)
# plt.plot(ht_arr, lenFromHt(ht_arr, c, zr))
# plt.xlabel("Height (km)")
# plt.ylabel("Length (km)")
# plt.show()
###
def _jacchiaResiduals(params, len_target, time_data):
return np.sum((len_target - jacchiaFuncLen(time_data, *params))**2)
#return np.sum(np.abs(len_target - jacchiaFuncLen(time_data, *params)))
# Fit the Jacchia function to get the initial estimate of the fit parameters
p0 = [0, 10, 0, 1]
res = scipy.optimize.minimize(_jacchiaResiduals, p0, args=(len_data, time_data), method='Nelder-Mead')
a1, a2, a3, a4 = res.x
# # Show Jacchia fit
# plt.scatter(time_data, len_data)
# plt.plot(time_data, jacchiaFuncLen(time_data, a1, a2, a3, a4))
# plt.show()
def _residuals(params, t0, c, zr, dens_interp, len_arr, time_target):
""" Residuals function for the model fit. """
l0, v0, v_inf, sigma = params
# Compute the time guess with the given parameters
time_arr = timeFromLen(len_arr, t0, l0, v0, v_inf, sigma, c, zr, dens_interp)
# Sum of squared residuals
cost = np.sum((time_target - time_arr)**2)
# # Sum of absolute residuals
# cost = np.sum(np.abs(time_target - time_arr))
print("Cost = {:16.10f}, guess: l0 = {:7.3f}, v0 = {:6.3f}, vi = {:6.3f}, sigma = {:.5f}".format(cost, *params))
return cost
# Choose t0 at the 0.77*max_time (converges better if this is at a point where there's deceleration)
t0 = 0.77*np.max(time_data)
print("t0 = {:.2f} s".format(t0))
# Construct the initial guess of the fit parameters using the Jacchia function
l0 = jacchiaFuncLen(t0, a1, a2, a3, a4)
v0 = jacchiaFuncVel(t0, a1, a2, a3, a4)
v_inf = a2
sigma = sigma_initial # km^2/s^2
# Separate initial guess velocities if they are too close
if (v_inf - v0) < 1:
v0 = v_inf - 2
p0 = [l0, v0, v_inf, sigma]
print("Initial parameters:", p0)
# Set the optimization bounds
bounds = [
( 0, np.max(len_data)), # l0
( 0, 80.0), # v0
(10, 80.0), # v_inf
(0.0001, 1.0) # sigma
]
# Set the constraint that v_inf > v0
constraints = ({'type': 'ineq',
'fun': lambda x: x[2] - x[1]})
# Fit the parameters to the observations
res = scipy.optimize.minimize(_residuals, p0, args=(t0, c, zr, dens_interp, len_data, time_data), \
bounds=bounds, constraints=constraints, method='SLSQP')
# # Default tolerance using by SLSQP
# ftol = 1e-06
# # Compute the formal uncertainties
# # Source: https://stackoverflow.com/a/53489234
# tmp_i = np.zeros(len(res.x))
# for i in range(len(res.x)):
# tmp_i[i] = 1.0
# hess_inv_i = res.hess_inv(tmp_i)[i]
# uncertainty_i = np.sqrt(max(1, abs(res.fun))*ftol*hess_inv_i)
# tmp_i[i] = 0.0
# print('x^{0} = {1:.3f} ± {2:.6f}'.format(i, res.x[i], uncertainty_i))
l0, v0, v_inf, sigma = res.x
return t0, l0, v0, v_inf, sigma, c, zr, dens_interp
if __name__ == "__main__":
import os
import sys
import argparse
import matplotlib.pyplot as plt
from wmpl.Utils.Pickling import loadPickle
# ### COMMAND LINE ARGUMENTS
# # Init the command line arguments parser
# arg_parser = argparse.ArgumentParser(description="""Fit the Pecina & Ceplecha (1984) model to a trajectory in the pickle file.""",
# formatter_class=argparse.RawTextHelpFormatter)
# arg_parser.add_argument('input_file', type=str, help='Path to the .pickle file.')
# # Parse the command line arguments
# cml_args = arg_parser.parse_args()
# ############################
# # Load the pickle file
# if not os.path.isfile(cml_args.input_file):
# print("Could not find file:", cml_args.input_file)
# print("Exiting...")
# sys.exit()
# # Load the trajectory pickle file
# traj = loadPickle(*os.path.split(cml_args.input_file))
# # Extract the time, height, and length data
# time_data = []
# len_data = []
# ht_data = []
# vel_data = []
# for obs in traj.observations:
# # Relative time in seconds
# time_obs = obs.time_data[obs.ignore_list == 0]
# time_data += time_obs.tolist()
# # Height in km
# ht_obs = obs.model_ht[obs.ignore_list == 0]/1000
# ht_data += ht_obs.tolist()
# # Length in km
# len_obs = obs.state_vect_dist[obs.ignore_list == 0]/1000
# len_data += len_obs.tolist()
# # Velocity in km/s
# vel_obs = obs.velocities[obs.ignore_list == 0]/1000
# vel_data += vel_obs.tolist()
# # Sort observations by length
# tmp_arr = np.c_[time_data, ht_data, len_data, vel_data]
# tmp_arr = tmp_arr[np.argsort(len_data)]
# time_data, ht_data, len_data, vel_data = tmp_arr.T
# # # Check data
# # plt.scatter(time_data, len_data)
# # plt.show()
# # plt.scatter(ht_data, vel_data)
# # plt.show()
# # Fit the Pecina & Ceplecha (1984) model to observations
# t0, l0, v0, v_inf, sigma, c, zr, dens_interp = fitPecinaCeplecha84Model(traj.rend_lat, traj.rend_lon, \
# traj.jdt_ref, time_data, ht_data, len_data)
# print("Solution:")
# print(" t0 = {:.3f} s".format(t0))
# print(" l0 = {:.3f} km".format(l0))
# print(" v0 = {:.3f} km/s".format(v0))
# print(" v_inf = {:.3f} km/s".format(v_inf))
# print(" sigma = {:.6f} km^2/s^2".format(sigma))
# # Compute the h0 limit
# h0 = htFromLen(l0, c, zr)
# # Compute the velocity from height and model parameters
# ht_arr = ht_dens_arr = np.linspace(1000*np.min(ht_data), 1000*np.max(ht_data), 100)
# vel_arr = 1000*velFromHt(ht_arr/1000, h0, v0, v_inf, sigma, c, zr, dens_interp)
# # Plot velocity observations vs fit
# plt.scatter(vel_data[vel_data > 0], ht_data[vel_data > 0])
# plt.plot(vel_arr/1000, ht_arr/1000)
# plt.xlabel("Velocity (km/s)")
# plt.ylabel("Height (km)")
# plt.show()
# # Compute the time from height and model parameters
# len_arr = np.linspace(np.min(len_data), np.max(len_data), 100)
# time_arr = timeFromLen(len_arr, t0, l0, v0, v_inf, sigma, c, zr, dens_interp)
# # Plot time vs length observations vs fit
# plt.scatter(time_data, len_data)
# plt.plot(time_arr, len_arr)
# plt.xlabel("Time (s)")
# plt.ylabel("Length (km)")
# plt.show()
# # Plot fit residuals
# time_residuals = time_data - timeFromLen(len_data, t0, l0, v0, v_inf, sigma, c, zr, dens_interp)
# plt.scatter(len_data, time_residuals)
# # Plot the zero line
# plt.plot(len_arr, np.zeros_like(len_arr), c='k', linestyle='dashed')
# plt.xlabel("Length (km)")
# plt.ylabel("Time residuals (s)")
# max_res = 1.2*np.max(np.abs(time_residuals))
# plt.ylim(-max_res, max_res)
# plt.show()
# sys.exit()
### BELOW IS THE EXAMPLE FOR THE ORIGINAL PAPER ###
# Location data for the PN example event (rough)
lat = np.radians(50)
lon = np.radians(-107)
jd = 2444239.50000
# Example data from Pecina & Ceplecha (1983) for PN 39 404
pn_data = np.array([
# t(s),h (km),l (km)
[0.00,79.174,0.000],
[0.05,78.581,0.714],
[0.10,77.904,1.530],
[0.15,77.311,2.246],
[0.25,76.015,3.808],
[0.30,75.384,4.569],
[0.40,74.111,6.102],
[0.45,73.461,6.886],
[0.50,72.837,7.639],
[0.55,72.195,8.413],
[0.60,71.556,9.183],
[0.65,70.909,9.964],
[0.70,70.269,10.735],
[0.75,69.646,11.487],
[0.90,67.750,13.773],
[1.00,66.482,15.303],
[1.05,65.852,16.062],
[1.10,65.229,16.814],
[1.15,64.596,17.578],
[1.20,63.960,18.345],
[1.25,63.338,19.096],
[1.30,62.694,19.873],
[1.35,62.086,20.606],
[1.40,61.449,21.376],
[1.45,60.829,22.123],
[1.55,59.558,23.657],
[1.60,58.949,24.392],
[1.70,57.685,25.918],
[1.75,57.055,26.679],
[1.80,56.424,27.440],
[1.85,55.795,28.199],
[1.90,55.187,28.933],
[1.95,54.576,29.671],
[2.00,53.995,30.372],
[2.05,53.340,31.163],
[2.20,51.410,33.493],
[2.30,50.191,34.966],
[2.35,49.563,35.724],
[2.40,48.892,36.534],
[2.45,48.294,37.257],
[2.50,47.682,37.996],
[2.55,47.107,38.691],
[2.60,46.500,39.424],
[2.65,45.900,40.148],
[2.70,45.289,40.887],
[2.75,44.713,41.583],
[2.85,43.532,43.010],
[2.90,42.907,43.765],
[2.95,42.363,44.422],
[3.05,41.144,45.895],
[3.10,40.581,46.575],
[3.15,40.001,47.276],
[3.20,39.478,47.909],
[3.25,38.925,48.577],
[3.30,38.369,49.249],
[3.35,37.851,49.875],
[3.50,36.290,51.762],
[3.60,35.301,52.957],
[3.65,34.825,53.533],
[3.70,34.330,54.128],
[3.75,33.915,54.633],
[3.80,33.430,55.220],
[3.85,32.993,55.743],
[3.90,32.592,56.233],
[3.95,32.184,56.727],
[4.00,31.798,57.193],
[4.05,31.436,57.631],
[4.15,30.765,58.443],
[4.20,30.442,58.832],
[4.25,30.134,59.205],
[4.35,29.565,59.894],
[4.40,29.314,60.198],
[4.45,29.049,60.517],
[4.50,28.807,60.810],
[4.55,28.567,61.101],
[4.60,28.347,61.367]
])
# Extract the example PN data
time_data, ht_data, len_data = pn_data.T
# Compute the point-to-point velocity
len_diff = len_data[1:] - len_data[:-1]
time_diff = time_data[1:] - time_data[:-1]
vel_data = len_diff/time_diff
# Fit the Pecina & Ceplecha (1984) model to observations
t0, l0, v0, v_inf, sigma, c, zr, dens_interp = fitPecinaCeplecha84Model(lat, lon, jd, time_data, ht_data, len_data)
# Compute the h0 limit
h0 = htFromLen(l0, c, zr)
# Compute the velocity from height and model parameters
ht_arr = ht_dens_arr = np.linspace(1000*np.min(ht_data), 1000*np.max(ht_data), 100)
vel_arr = 1000*velFromHt(ht_arr/1000, h0, v0, v_inf, sigma, c, zr, dens_interp)
# Plot velocity observations vs fit
plt.scatter(vel_data, ht_data[1:])
plt.plot(vel_arr/1000, ht_arr/1000)
plt.xlabel("Velocity (km/s)")
plt.ylabel("Height (km)")
plt.show()
# Compute the time from height and model parameters
len_arr = np.linspace(np.min(len_data), np.max(len_data), 100)
time_arr = timeFromLen(len_arr, t0, l0, v0, v_inf, sigma, c, zr, dens_interp)
# Plot time vs length observations vs fit
plt.scatter(time_data, len_data)
plt.plot(time_arr, len_arr)
plt.xlabel("Time (s)")
plt.ylabel("Length (km)")
plt.show()
sys.exit()
### BELOW IS THE CHECK OF THE FUNCTIONS ON THE ORIGINAL VALUES FROM THE PAPER ###
### FIT THE AIR DENSITY MODEL ###
# Fit a 7th order polynomial to the air mass density from NRL-MSISE from the ceiling height to 3 km below
# the fireball - limit the height to 12 km
ht_min = np.min(ht_data) - 3
if ht_min < 12:
ht_min = 12
# Compute the poly fit
print("Fitting atmosphere polynomial...")
dens_co = fitAtmPoly(lat, lon, 1000*ht_min, 1000*HT_CEILING, jd)
# Create a convinience function for compute the density at the given height
dens_interp = lambda h: atmDensPoly(h, dens_co)
print(" ... done!")
###
### TEST EXAMPLE ###
# PN
v_inf = 15.3456 # km/s
m_inf = 91.2 # kg
sigma = 0.0308 # km^2/s^2
zr = np.radians(34.089)
K = 1.0*1.2*650**(-2/3.0) # m^2/kg^(2/3)
t0 = 3.5 # s
l0 = 51.773 # km
v0 = 12.281 # km/s
# # Compute the velocity for every height using K
# vel_arr = velFromHtPhysicalParams(ht_arr, 1000*v_inf, m_inf, sigma/1e6, zr, K, dens_interp)
# # Plot observations vs fit
# plt.scatter(ht_data[1:], vel_data)
# plt.plot(ht_arr/1000, vel_arr/1000)
# plt.show()
# sys.exit()
###
print("Finding height-length constant...")
# Find the height-length constant and zenith angle
p0 = [0, np.radians(45)]
res = scipy.optimize.minimize(_lenFromHtResidual, p0, args=(ht_data, len_data))
# Extracted fitted parameters
c, zr = res.x
zr = np.abs(zr)
print("c = {:.2f} km".format(c))
print("zr = {:.2f} deg".format(np.degrees(zr)))
# Compute the h0 limit
h0 = htFromLen(l0, c, zr)
# Compute the velocity from height and model parameters
ht_arr = ht_dens_arr = np.linspace(1000*ht_min, 1000*np.max(ht_data), 100)
vel_arr = 1000*velFromHt(ht_arr/1000, h0, v0, v_inf, sigma, c, zr, dens_interp)
# Plot velocity observations vs fit
plt.scatter(vel_data, ht_data[1:])
plt.plot(vel_arr/1000, ht_arr/1000)
plt.xlabel("Velocity (km/s)")
plt.ylabel("Height (km)")
plt.show()
# Compute the time from height and model parameters
len_arr = np.linspace(np.min(len_data), np.max(len_data), 100)
time_arr = timeFromLen(len_arr, t0, l0, v0, v_inf, sigma, c, zr, dens_interp)
# Plot time vs length observations vs fit
plt.scatter(time_data, len_data)
plt.plot(time_arr, len_arr)
plt.xlabel("Time (s)")
plt.ylabel("Length (km)")
plt.show()
|
wmpgREPO_NAMEWesternMeteorPyLibPATH_START.@WesternMeteorPyLib_extracted@WesternMeteorPyLib-master@wmpl@Utils@PecinaCeplechaFunction.py@.PATH_END.py
|
{
"filename": "_plotutils.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/scipy/py3/scipy/spatial/_plotutils.py",
"type": "Python"
}
|
import numpy as np
from scipy._lib.decorator import decorator as _decorator
__all__ = ['delaunay_plot_2d', 'convex_hull_plot_2d', 'voronoi_plot_2d']
@_decorator
def _held_figure(func, obj, ax=None, **kw):
import matplotlib.pyplot as plt
if ax is None:
fig = plt.figure()
ax = fig.gca()
return func(obj, ax=ax, **kw)
# As of matplotlib 2.0, the "hold" mechanism is deprecated.
# When matplotlib 1.x is no longer supported, this check can be removed.
was_held = getattr(ax, 'ishold', lambda: True)()
if was_held:
return func(obj, ax=ax, **kw)
try:
ax.hold(True)
return func(obj, ax=ax, **kw)
finally:
ax.hold(was_held)
def _adjust_bounds(ax, points):
margin = 0.1 * points.ptp(axis=0)
xy_min = points.min(axis=0) - margin
xy_max = points.max(axis=0) + margin
ax.set_xlim(xy_min[0], xy_max[0])
ax.set_ylim(xy_min[1], xy_max[1])
@_held_figure
def delaunay_plot_2d(tri, ax=None):
"""
Plot the given Delaunay triangulation in 2-D
Parameters
----------
tri : scipy.spatial.Delaunay instance
Triangulation to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Delaunay
matplotlib.pyplot.triplot
Notes
-----
Requires Matplotlib.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.spatial import Delaunay, delaunay_plot_2d
The Delaunay triangulation of a set of random points:
>>> rng = np.random.default_rng()
>>> points = rng.random((30, 2))
>>> tri = Delaunay(points)
Plot it:
>>> _ = delaunay_plot_2d(tri)
>>> plt.show()
"""
if tri.points.shape[1] != 2:
raise ValueError("Delaunay triangulation is not 2-D")
x, y = tri.points.T
ax.plot(x, y, 'o')
ax.triplot(x, y, tri.simplices.copy())
_adjust_bounds(ax, tri.points)
return ax.figure
@_held_figure
def convex_hull_plot_2d(hull, ax=None):
"""
Plot the given convex hull diagram in 2-D
Parameters
----------
hull : scipy.spatial.ConvexHull instance
Convex hull to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
ConvexHull
Notes
-----
Requires Matplotlib.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.spatial import ConvexHull, convex_hull_plot_2d
The convex hull of a random set of points:
>>> rng = np.random.default_rng()
>>> points = rng.random((30, 2))
>>> hull = ConvexHull(points)
Plot it:
>>> _ = convex_hull_plot_2d(hull)
>>> plt.show()
"""
from matplotlib.collections import LineCollection
if hull.points.shape[1] != 2:
raise ValueError("Convex hull is not 2-D")
ax.plot(hull.points[:, 0], hull.points[:, 1], 'o')
line_segments = [hull.points[simplex] for simplex in hull.simplices]
ax.add_collection(LineCollection(line_segments,
colors='k',
linestyle='solid'))
_adjust_bounds(ax, hull.points)
return ax.figure
@_held_figure
def voronoi_plot_2d(vor, ax=None, **kw):
"""
Plot the given Voronoi diagram in 2-D
Parameters
----------
vor : scipy.spatial.Voronoi instance
Diagram to plot
ax : matplotlib.axes.Axes instance, optional
Axes to plot on
show_points : bool, optional
Add the Voronoi points to the plot.
show_vertices : bool, optional
Add the Voronoi vertices to the plot.
line_colors : string, optional
Specifies the line color for polygon boundaries
line_width : float, optional
Specifies the line width for polygon boundaries
line_alpha : float, optional
Specifies the line alpha for polygon boundaries
point_size : float, optional
Specifies the size of points
Returns
-------
fig : matplotlib.figure.Figure instance
Figure for the plot
See Also
--------
Voronoi
Notes
-----
Requires Matplotlib.
Examples
--------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy.spatial import Voronoi, voronoi_plot_2d
Create a set of points for the example:
>>> rng = np.random.default_rng()
>>> points = rng.random((10,2))
Generate the Voronoi diagram for the points:
>>> vor = Voronoi(points)
Use `voronoi_plot_2d` to plot the diagram:
>>> fig = voronoi_plot_2d(vor)
Use `voronoi_plot_2d` to plot the diagram again, with some settings
customized:
>>> fig = voronoi_plot_2d(vor, show_vertices=False, line_colors='orange',
... line_width=2, line_alpha=0.6, point_size=2)
>>> plt.show()
"""
from matplotlib.collections import LineCollection
if vor.points.shape[1] != 2:
raise ValueError("Voronoi diagram is not 2-D")
if kw.get('show_points', True):
point_size = kw.get('point_size', None)
ax.plot(vor.points[:, 0], vor.points[:, 1], '.', markersize=point_size)
if kw.get('show_vertices', True):
ax.plot(vor.vertices[:, 0], vor.vertices[:, 1], 'o')
line_colors = kw.get('line_colors', 'k')
line_width = kw.get('line_width', 1.0)
line_alpha = kw.get('line_alpha', 1.0)
center = vor.points.mean(axis=0)
ptp_bound = vor.points.ptp(axis=0)
finite_segments = []
infinite_segments = []
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices):
simplex = np.asarray(simplex)
if np.all(simplex >= 0):
finite_segments.append(vor.vertices[simplex])
else:
i = simplex[simplex >= 0][0] # finite end Voronoi vertex
t = vor.points[pointidx[1]] - vor.points[pointidx[0]] # tangent
t /= np.linalg.norm(t)
n = np.array([-t[1], t[0]]) # normal
midpoint = vor.points[pointidx].mean(axis=0)
direction = np.sign(np.dot(midpoint - center, n)) * n
if (vor.furthest_site):
direction = -direction
far_point = vor.vertices[i] + direction * ptp_bound.max()
infinite_segments.append([vor.vertices[i], far_point])
ax.add_collection(LineCollection(finite_segments,
colors=line_colors,
lw=line_width,
alpha=line_alpha,
linestyle='solid'))
ax.add_collection(LineCollection(infinite_segments,
colors=line_colors,
lw=line_width,
alpha=line_alpha,
linestyle='dashed'))
_adjust_bounds(ax, vor.points)
return ax.figure
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@scipy@py3@scipy@spatial@_plotutils.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "cosmostat/CosmoPMC",
"repo_path": "CosmoPMC_extracted/CosmoPMC-master/README.md",
"type": "Markdown"
}
|
# CosmoPMC
Cosmology sampling with Population Monte Carlo (PMC)
## Information
### Description
CosmoPMC is a Monte-Carlo sampling method to explore the likelihood of various
cosmological probes. The sampling engine is implemented with the package
pmclib. It is called Population MonteCarlo (PMC), which is a novel technique to
sample from the posterior [Capp'{e} et al. 2008](http://arxiv.org/abs/0710.4242).
PMC is an adaptive importance sampling method which iteratively improves the
proposal to approximate the posterior. This code has been introduced, tested
and applied to various cosmology data sets in
[Wraith, Kilbinger, Benabed et al.(2009)](http://arxiv.org/abs/0903.0837).
Results on the Bayesian evidence using PMC are discussed in
[Kilbinger, Wraith, Benabed et al. (2010)](http://arxiv.org/abs/0912.1614).
### Authors
Martin Kilbinger
Karim Benabed, Olivier Cappé, Jean Coupon, Jean-François Cardoso, Gersende Fort, Henry Joy McCracken, Simon Prunet, Christian P. Robert, Darren Wraith
### Version
1.4
### Installation
#### Automatic installation (recommended)
`CosmoPMC` requires the libraries
[nicaea](https://github.com/CosmoStat/nicaea),
[pmclib](https://github.com/cosmostat/pmclib), and third-party libraries and
programs such as `gsl`, `fftw3`, `lacpack`, or `cmake`. Download and run the
automatic script [install_CosmoPMC.sh](install_CosmoPMC.sh) to build all
dependent packages and programs into a `conda` virtual environment. The only
prerequisite (apart from the `bash` shell) is `conda`, which can be downloaded
and installed from
[https://docs.conda.io/en/latest/miniconda.html](https://docs.conda.io/en/latest/miniconda.html).
Once `conda` is installed and in the search path, the installation of `CosmoPMC` should be easy:
```bash
git clone https://github.com/CosmoStat/CosmoPMC
cd CosmoPMC
./install_CosmoPMC.sh --no-R [OPTIONS]
```
Type `./install_CosmoPMC -h` for help.
You might need to activate the `cosmopmc` conda environment after installation, with
```bash
conda activate cosmopmc
```
On success, the command line prompt should start now with the string `(cosmopmc)`.
#### Installation by hand (for advanced users)
You can also install all packages by hand.
First, download and install the CosmoPMC-adjacent packages, from their respective github pages for [nicaea](https://github.com/CosmoStat/nicaea) and [pmclib](https://github.com/CosmoStat/pmclib).
Next, if not alreay done, download the `CosmoPMC` package from the github repository:
```bash
git clone https://github.com/CosmoStat/CosmoPMC
```
A new directory `CosmoPMC` will be created automatically. Change into that directory, and configure the code with the (poor-man's) python configuration script.
```bash
cd CosmoPMC
./configure.py [FLAGS]
```
You will need to indicate paths to libraries and other flags. At the minimum, you probably need to specify the basic paths to the libraries `nicaea` and `pmclib`. (Specify both even if the paths are the same). Type `./configure.py -h` to see all options.
After configuration, compile the code as follows:
```bash
make
```
#### Topolike set-up
If you need to `topolike` external module, the following steps are required.
1. Compile the `topolike` code and create the `topotest' test program.
2. Create the topolike library. In the `topolike` code directory:
```
ar rv libtopo.a *.o
```
3. On some computing architectures, the linker flags need to be communicated to `CosmoPMC`. This can be done by using
the option `--lflags LFLAGS` for `install_CosmoPMC.sh`, and setting all flags as `LFLAGS`.
### Running the code - quick guide
#### Tempering examples (new) ###
See the directory `Demo/tempering` and the corresponding [readme](Demo/tempering/README.md).
#### <a href="Examples"></a>Examples
To get familiar with `CosmoPMC`, use the examples which are included
in the software package. Simply change to one of the subdirectories in
`Demo/MC_Demo` and proceed on to the subsection
[Run](#Run) below. A quick-to-run likelihood is the supernova one, in `Demo/MC_Demo/SN`.
#### User-defined runs
To run different likelihood combinations, using existing or your own data, the following two
steps are recommended to set up a CosmoPMC run.
1. Data and parameter files
Create a new directory and copy data files. You can do this automatically for the pre-defined
probes of `CosmoPMC` by using
```bash
newdir_pmc.sh
```
When asked, enter the likelihood/data type. More than one type can be chosen by
adding the corresponding (bit-coded) type id’s. Symbolic links to corresponding
files in `COSMOPMC/data` are set, and parameter files from `COSMOPMC/par_files`
are copied to the new directory on request.
2. Configuration file
Create the PMC configuration file `config_pmc`. Examples for existing data modules
can be found in `COSMOPMC/Demo/MC_Demo`. In some cases, information about
the galaxy redshift distribution(s) have to be provided, and the corresponding
files (`nofz*`) copied. See [Examples](#Examples) above.
#### <a name="Run"></a>Run
Type
```bash
/path/to/CosmoPMC/bin/cosmo pmc.pl -n NCPU
```
to run CosmoPMC on NCPU CPUs. See `cosmo pmc.pl -h` for more options.
Depending on the type of initial proposal, a maximum-search is started followed by a
Fisher matrix calculation. After that, PMC is started.
Depending on the machine's architecture, the default way to use MPI (calling the executable with `mpirun`) might not be supported. In that case you will have to run the PMC part by using the executable `/path/to/CosmoPMC/bin/cosmo pmc`, or modifying the `perl` script.
The figure below shows a flow chart of the script’s actions.
<p align="center">
<img width="520" src="Manual/cosmo_pmc_flow.png">
</p>
#### Diagnostics
Check the text files `perplexity` and `enc`. If the perplexity reaches values of 0.8 or
larger, and if the effective number of components (ENC) is not smaller than around
1.5, the posterior has very likely been explored sufficiently. Those and other
files are being updated during run-time and can be monitored while PMC is running.
#### Results
The results are stored in the subdirectory of the last, final PMC iteration,
`iter_{niter-1}/`. The text file `mean` contains mean and confidence levels.
#### Plotting
The file `all_cont2d.pdf` (when `R` is used, or `all_contour2d.pdf` for `yorick+perl`)
shows plots of the 1d- and 2d-marginals. Plots can be
redone or refined, or created from other than the last iteration with
`plot_confidence.R` (or `plot_contour2d.pl`), both scripts are in `/path/to/CosmoPMC/bin`.
To have `cosmo_pmc.pl` create these plots, the program `R` (or `yorick`) have to be installed.
For `R`, also install the libraries `coda`, `getopt`, and `optparse`.
Note that in the default setting the posterior plots are not smoothed, this can be achieved
using various command line options, see `plot_confidence.R -h` (or `plot_contour2d.pl -h`).
### Further reading
Check out the latest version of the [manual](https://github.com/CosmoStat/CosmoPMC/blob/master/Manual/manual.pdf)
The manual for v1.2 can be found on arXiv, at [http://arxiv.org/abs/1101.0950](http://arxiv.org/abs/1101.0950).
CosmoPMC is also listed in ASCL at [ascl:1212.006](http://asterisk.apod.com/viewtopic.php?f=35&t=30375).
### References
If you use CosmoPMC in a publication, please cite the last paper in the list below (Wraith, Kilbinger, Benabed et al. 2009).
[Kilbinger et al. (2011)](https://arxiv.org/abs/1101.0950): Cosmo Population Monte Carlo - User's manual. Note that earlier version of CosmoPMC <=1.2) contain `pmclib` and `nicaea` as built-in code instead of external libraries.
[Kilbinger, Benabed et al. (2012)](http://ascl.net/1212.006): ASCL link of the software package
[Kilbinger, Wraith, Benabed et al. (2010)](https://arxiv.org/abs/0912.1614): Bayesian evidence
[Wraith, Kilbinger, Benabed et al. (2009)](https://arxiv.org/abs/0903.0837): Comparison of PMC and MCMC, parameter estimation. The first paper to use CosmoPMC.
|
cosmostatREPO_NAMECosmoPMCPATH_START.@CosmoPMC_extracted@CosmoPMC-master@README.md@.PATH_END.py
|
{
"filename": "cities.py",
"repo_name": "brandon-rhodes/pyephem",
"repo_path": "pyephem_extracted/pyephem-master/generate/cities.py",
"type": "Python"
}
|
#!/usr/bin/env python
from urllib2 import urlopen, HTTPError
import re
# Look up each city in "cities.in" on the http://www.fallingrain.com/
# web site, and append their geographic data into "cities.out".
cin = open('cities.in', 'r')
cout = open('cities.out', 'a')
def transchar(c):
if c == ' ':
return '32'
elif c == '-':
return '45'
elif c == '.':
return '46'
return c
def get_page(name, n):
url = 'http://www.fallingrain.com/world/a/' + '/'.join(
[ transchar(c) for c in name[:n] ]
)
try:
return urlopen(url).read()
except HTTPError:
return None
def rename(name):
bettername = {
'Hong Kong historical': 'Hong Kong',
'Mexico': 'Mexico City',
'Sankt-Peterburg': 'St. Petersburg',
}.get(name, None)
return bettername or name
class Entry(object):
pass
def get_entries(content):
entries = []
for line in content.split('\n'):
if line.startswith('<tr><td>'):
fields = re.split(r'<[^>]*>', line)
e = Entry()
e.name = fields[3]
e.type = fields[5]
e.lat = fields[8]
e.lon = fields[9]
e.elev = fields[10]
e.pop = int(fields[11])
entries.append(e)
return entries
for line in cin:
line = line.strip()
if not line or line.startswith('#'):
continue
name = line
i = 4 # first guess for how deep the URL goes
content = get_page(name, i)
if not content: # we went too deep
while not content:
i -= 1
content = get_page(name, i)
else:
while content and i <= len(name):
old_content = content
i += 1
content = get_page(name, i)
content = old_content
entries = get_entries(content)
entries = [ e for e in entries if e.type == 'city' ]
entries = [ e for e in entries if e.name == name ]
if entries:
esort = [ (e.pop, e) for e in entries ]
esort.sort()
e = esort[-1][1]
s = " '%s': ('%s', '%s', %f)," % (rename(e.name), e.lat, e.long,
float(e.elev) * 0.3048)
print s
print >>cout, s
else:
print '-------- cannot find:', name
print >>cout, '-------- cannot find:', name
|
brandon-rhodesREPO_NAMEpyephemPATH_START.@pyephem_extracted@pyephem-master@generate@cities.py@.PATH_END.py
|
{
"filename": "Quick_start.md",
"repo_name": "teuben/DataComb",
"repo_path": "DataComb_extracted/DataComb-main/Quick_start.md",
"type": "Markdown"
}
|
# Quick Start Guide
This guide will walk you through how to run the data combination
script on example data and how to quickly run the script in the
future.
The general overview of the steps to get combination images:
1. Set data and user specific parameters in `DC_pars.py`
2. Execute `DC_locals.py` - if it was not done via CASA's startup.py
3. Execute `DC_script.py`
## Preparation
Before following this guide, you should have already:
- Installed CASA 6
- Ensure **astropy** is installed
- Installed analysisutils
- Downloaded any example data and know the path to the data
- Configured your local paths (see [Preparation.md](Preparation.md))
## Step 1: Set parameters in DC_pars.py
You need to have a `DC_pars.py` for the data combination script to run. For your own data
simply copy an example parameter file and edit. For this example, we will use `DC_pars_M100.py`.
```bash
cp templates/DC_pars_M100.py DC_pars.py
```
Make sure that you have the data in the correct directory and have set up your local directories correctly.
## Step 2 (Optional): Execute `DC_locals.py`
If you DID NOT put a reference to `DC_locals.py` in your `~/.casa/startup.py` file,
you need to execute `DC_locals.py` in your current CASA session (and you will have
to continue to do so each time you start a new CASA session to work with this data combination script) by
```python
execfile("DC_locals.py")
```
*Q: do we need globals() here ?*
## Step 3: Run `DC_script.py`
The last step is to run the scripts to do the data combination in CASA
```python
execfile("DC_script.py")
```
## Other examples
Other example data and parameter files that we provide:
* M100 - the casaguide example
* GMC - the skymodel (formerly called skymodel-b)
|
teubenREPO_NAMEDataCombPATH_START.@DataComb_extracted@DataComb-main@Quick_start.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/scatter3d/stream/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._token import TokenValidator
from ._maxpoints import MaxpointsValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._token.TokenValidator", "._maxpoints.MaxpointsValidator"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@scatter3d@stream@__init__.py@.PATH_END.py
|
{
"filename": "setting.py",
"repo_name": "veusz/veusz",
"repo_path": "veusz_extracted/veusz-master/veusz/setting/setting.py",
"type": "Python"
}
|
# Copyright (C) 2005 Jeremy S. Sanders
# Email: Jeremy Sanders <jeremy@jeremysanders.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
###############################################################################
"""Module for holding setting values.
e.g.
s = Int('foo', 5)
s.get()
s.set(42)
s.fromUIText('42')
"""
import re
import sys
import numpy as N
from .. import qtall as qt
from . import controls
from .settingdb import settingdb, uilocale, ui_floattostring, ui_stringtofloat
from .reference import ReferenceBase, Reference
from .. import utils
from .. import datasets
class OnModified(qt.QObject):
"""onmodified is emitted from an object contained in each setting."""
onModified = qt.pyqtSignal()
class Setting:
"""A class to store a value with a particular type."""
# differentiate widgets, settings and setting
nodetype = 'setting'
typename = 'setting'
# various items in class hierarchy
iswidget = False
issetting = True
issettings = False
def __init__(self, name, value, descr='', usertext='',
formatting=False, hidden=False):
"""Initialise the values.
name: setting name
value: default value and initial value
descr: description of the setting
usertext: name of setting for user
formatting: whether setting applies to formatting
hidden: hide widget from user
"""
self.readonly = False
self.parent = None
self.name = name
self.descr = descr
self.usertext = usertext
self.formatting = formatting
self.hidden = hidden
self.default = value
self.onmodified = OnModified()
self._val = self._ref = None
# calls the set function for the val property
self.val = value
def _copyHelper(self, before, after, optional):
"""Help copy an object.
before are arguments before val
after are arguments after val
optinal as optional arguments
"""
val = self._ref if self._ref else self._val
args = (self.name,) + before + (val,) + after
opt = optional.copy()
opt['descr'] = self.descr
opt['usertext'] = self.usertext
opt['formatting'] = self.formatting
opt['hidden'] = self.hidden
obj = self.__class__(*args, **opt)
obj.readonly = self.readonly
obj.default = self.default
return obj
def copy(self):
"""Make a setting which has its values copied from this one.
This needs to be overridden if the constructor changes
"""
return self._copyHelper((), (), {})
def get(self):
"""Get the value."""
if self._ref:
return self._ref.resolve(self).get()
else:
return self._val
def set(self, v):
"""Set the value."""
if isinstance(v, ReferenceBase):
self._val = None
self._ref = v
else:
self._val = self.normalize(v)
self._ref = None
self.onmodified.onModified.emit()
val = property(
get, set, None,
'Get or modify the value of the setting')
def isReference(self):
"""Is this a setting a reference to another object."""
return bool(self._ref)
def getReference(self):
"""Return the reference object. Raise ValueError if not a reference"""
if self._ref:
return self._ref
else:
raise ValueError("Setting is not a reference")
def getStylesheetLink(self):
"""Get text that this setting should default to linked to the
stylesheet."""
path = []
obj = self
while not obj.parent.iswidget:
path.insert(0, obj.name)
obj = obj.parent
path = ['', 'StyleSheet', obj.parent.typename] + path
return '/'.join(path)
def linkToStylesheet(self):
"""Make this setting link to stylesheet setting, if possible."""
self.set( Reference(self.getStylesheetLink()) )
@property
def path(self):
"""Return full path of setting."""
path = []
obj = self
while obj is not None:
# logic easier to understand here
# do not add settings name for settings of widget
if not obj.iswidget and obj.parent.iswidget:
pass
else:
if obj.name == '/':
path.insert(0, '')
else:
path.insert(0, obj.name)
obj = obj.parent
return '/'.join(path)
def toUIText(self):
"""Convert the type to text to show in UI."""
return ""
def fromUIText(self, text):
"""Convert text from UI into type for setting.
Raises utils.InvalidType if cannot convert."""
return None
def saveText(self, saveall, rootname = ''):
"""Return text to restore the value of this setting."""
if (saveall or not self.isDefault()) and not self.readonly:
if self._ref:
return "SetToReference('%s%s', %s)\n" % (
rootname, self.name, repr(self._ref.value))
else:
return "Set('%s%s', %s)\n" % (
rootname, self.name, utils.rrepr(self.val))
else:
return ''
def setOnModified(self, fn):
"""Set the function to be called on modification (passing True)."""
self.onmodified.onModified.connect(fn)
if self._ref:
# tell references to notify us if they are modified
self._ref.setOnModified(self, fn)
def removeOnModified(self, fn):
"""Remove the function from the list of function to be called."""
self.onmodified.onModified.disconnect(fn)
def newDefault(self, value):
"""Update the default and the value."""
self.default = value
self.val = value
def isDefault(self):
"""Is the current value a default?
This also returns true if it is linked to the appropriate stylesheet
"""
if self._ref and isinstance(self.default, ReferenceBase):
return self._ref.value == self.default.value
else:
return self._val == self.default
def isDefaultLink(self):
"""Is this a link to the default stylesheet value."""
return self._ref and self._ref.value == self.getStylesheetLink()
def setSilent(self, val):
"""Set the setting, without propagating modified flags.
This shouldn't often be used as it defeats the automatic updation.
Used for temporary modifications."""
self._ref = None
self._val = self.normalize(val)
def normalize(self, val):
"""Convert external value to normalized form for storing
Raises a utils.InvalidType if this is not possible."""
return val
def makeControl(self, *args):
"""Make a qt control for editing the setting.
The control emits settingValueChanged() when the setting has
changed value."""
return None
def getDocument(self):
"""Return document."""
p = self.parent
while p:
if p.iswidget:
return p.document
p = p.parent
return None
def getWidget(self):
"""Return associated widget."""
w = self.parent
while not w.iswidget:
w = w.parent
return w
def safeEvalHelper(self, text):
"""Evaluate an expression, catching naughtiness."""
try:
comp = self.getDocument().evaluate.compileCheckedExpression(
text)
if comp is None:
raise utils.InvalidType
return float( eval(comp, self.getDocument().evaluate.context) )
except:
raise utils.InvalidType
# forward setting to another setting
class SettingBackwardCompat(Setting):
"""Forward setting requests to another setting.
This is used for backward-compatibility.
"""
typename = 'backward-compat'
def __init__(self, name, newrelpath, val, translatefn=None,
**args):
"""Point this setting to another.
newrelpath is a path relative to this setting's parent
"""
self.translatefn = translatefn
args['hidden'] = True
Setting.__init__(self, name, val, **args)
self.relpath = newrelpath
def getForward(self):
"""Get setting this setting forwards to."""
doc = self.getDocument()
return doc.resolveSettingPath(self.parent, self.relpath)
def normalize(self, val):
if self.parent is not None:
return self.getForward().normalize(val)
def toUIText(self):
return self.getForward().toUIText()
def fromUIText(self, val):
return self.getForward().fromUIText(val)
def set(self, val):
if self.parent is not None and not isinstance(val, ReferenceBase):
if self.translatefn:
val = self.translatefn(val)
self.getForward().set(val)
def isDefault(self):
return self.getForward().isDefault()
def get(self):
return self.getForward().get()
def copy(self):
return self._copyHelper(
(self.relpath,), (), {'translatefn': self.translatefn})
def makeControl(self, *args):
return None
def saveText(self, saveall, rootname = ''):
return ''
def linkToStylesheet(self):
"""Do nothing for backward compatibility settings."""
pass
# Store strings
class Str(Setting):
"""String setting."""
typename = 'str'
def normalize(self, val):
if isinstance(val, str):
return val
raise utils.InvalidType
def toUIText(self):
return self.val
def fromUIText(self, text):
return text
def makeControl(self, *args):
return controls.String(self, *args)
class Notes(Str):
"""String for making notes."""
typename = 'str-notes'
def makeControl(self, *args):
return controls.Notes(self, *args)
# Store bools
class Bool(Setting):
"""Bool setting."""
typename = 'bool'
def normalize(self, val):
if type(val) in (bool, int):
return bool(val)
raise utils.InvalidType
def toUIText(self):
return 'True' if self.val else 'False'
def fromUIText(self, text):
t = text.strip().lower()
if t in ('true', '1', 't', 'y', 'yes'):
return True
elif t in ('false', '0', 'f', 'n', 'no'):
return False
else:
raise utils.InvalidType
def makeControl(self, *args):
return controls.Bool(self, *args)
# Storing integers
class Int(Setting):
"""Integer settings."""
typename = 'int'
def __init__(self, name, value, minval=-1000000, maxval=1000000,
**args):
"""Initialise the values.
minval is minimum possible value of setting
maxval is maximum possible value of setting
"""
self.minval = minval
self.maxval = maxval
Setting.__init__(self, name, value, **args)
def copy(self):
"""Make a setting which has its values copied from this one.
This needs to be overridden if the constructor changes
"""
return self._copyHelper((), (), {
'minval': self.minval,
'maxval': self.maxval})
def normalize(self, val):
if isinstance(val, int):
if val >= self.minval and val <= self.maxval:
return val
else:
raise utils.InvalidType('Out of range allowed')
raise utils.InvalidType
def toUIText(self):
return uilocale.toString(self.val)
def fromUIText(self, text):
i, ok = uilocale.toLongLong(text)
if not ok:
raise ValueError
if i >= self.minval and i <= self.maxval:
return i
else:
raise utils.InvalidType('Out of range allowed')
def makeControl(self, *args):
return controls.Int(self, *args)
def _finiteRangeFloat(f, minval=-1e300, maxval=1e300):
"""Return a finite float in range or raise exception otherwise."""
f = float(f)
if not N.isfinite(f):
raise utils.InvalidType('Finite values only allowed')
if f < minval or f > maxval:
raise utils.InvalidType('Out of range allowed')
return f
# for storing floats
class Float(Setting):
"""Float settings."""
typename = 'float'
def __init__(self, name, value, minval=-1e200, maxval=1e200,
**args):
"""Initialise the values.
minval is minimum possible value of setting
maxval is maximum possible value of setting
"""
self.minval = minval
self.maxval = maxval
Setting.__init__(self, name, value, **args)
def copy(self):
"""Make a setting which has its values copied from this one.
This needs to be overridden if the constructor changes
"""
return self._copyHelper((), (), {
'minval': self.minval,
'maxval': self.maxval})
def normalize(self, val):
if isinstance(val, int) or isinstance(val, float):
return _finiteRangeFloat(
val, minval=self.minval, maxval=self.maxval)
raise utils.InvalidType
def toUIText(self):
return ui_floattostring(self.val)
def fromUIText(self, text):
try:
f = ui_stringtofloat(text)
except ValueError:
# try to evaluate
f = self.safeEvalHelper(text)
return self.normalize(f)
def makeControl(self, *args):
return controls.Edit(self, *args)
class FloatOrAuto(Float):
"""Save a float or text auto."""
typename = 'float-or-auto'
def normalize(self, val):
if type(val) in (int, float):
return _finiteRangeFloat(val, minval=self.minval, maxval=self.maxval)
elif isinstance(val, str) and val.strip().lower() == 'auto':
return 'Auto'
else:
raise utils.InvalidType
def toUIText(self):
if isinstance(self.val, str) and self.val.lower() == 'auto':
return 'Auto'
else:
return ui_floattostring(self.val)
def fromUIText(self, text):
if text.strip().lower() == 'auto':
return 'Auto'
else:
return Float.fromUIText(self, text)
def makeControl(self, *args):
return controls.Choice(self, True, ['Auto'], *args)
class FloatSlider(Float):
"""A float with a slider control."""
typename = 'float-slider'
def __init__(self, name, value, step=10, tick=50, scale=1, **args):
"""Step is the size to step by."""
Float.__init__(self, name, value, **args)
self.step = step
self.tick = tick
self.scale = scale
def copy(self):
return self._copyHelper((), (), {
'minval': self.minval,
'maxval': self.maxval,
'step': self.step,
'tick': self.tick,
'scale': self.scale,
})
def makeControl(self, *args):
return controls.FloatSlider(self, *args)
class IntOrAuto(Setting):
"""Save an int or text auto."""
typename = 'int-or-auto'
def normalize(self, val):
if isinstance(val, int):
return val
elif isinstance(val, str) and val.strip().lower() == 'auto':
return 'Auto'
else:
raise utils.InvalidType
def toUIText(self):
if isinstance(self.val, str) and self.val.lower() == 'auto':
return 'Auto'
else:
return uilocale.toString(self.val)
def fromUIText(self, text):
if text.strip().lower() == 'auto':
return 'Auto'
else:
i, ok = uilocale.toLongLong(text)
if not ok:
raise utils.InvalidType
return i
def makeControl(self, *args):
return controls.Choice(self, True, ['Auto'], *args)
# these are functions used by the distance setting below.
# they don't work as class methods
def _distPhys(match, painter, mult):
"""Convert a physical unit measure in multiples of points."""
return painter.pixperpt * mult * float(match.group(1))
def _idistval(val, unit):
"""Convert value to text, dropping zeros and . points on right."""
return ("%.3f" % val).rstrip('0').rstrip('.') + unit
def _distInvPhys(pixdist, painter, mult, unit):
"""Convert number of pixels into physical distance."""
return _idistval(pixdist / (mult*painter.pixperpt), unit)
def _distPerc(match, painter):
"""Convert from a percentage of maxdim."""
return painter.maxdim * 0.01 * float(match.group(1))
def _distInvPerc(pixdist, painter):
"""Convert pixel distance into percentage."""
return _idistval(pixdist * 100. / painter.maxdim, '%')
def _distFrac(match, painter):
"""Convert from a fraction a/b of maxdim."""
try:
return painter.maxdim * float(match.group(1))/float(match.group(4))
except ZeroDivisionError:
return 0.
def _distRatio(match, painter):
"""Convert from a simple 0.xx ratio of maxdim."""
# if it's greater than 1 then assume it's a point measurement
if float(match.group(1)) > 1.:
return _distPhys(match, painter, 1)
return painter.maxdim * float(match.group(1))
# regular expression to match distances
distre_expr = r'''^
[ ]* # optional whitespace
(\.?[0-9]+|[0-9]+\.[0-9]*) # a floating point number
[ ]* # whitespace
(cm|pt|mm|inch|in|"|%|| # ( unit, no unit,
(?P<slash>/) ) # or / )
(?(slash)[ ]* # if it was a slash, match any whitespace
(\.?[0-9]+|[0-9]+\.[0-9]*)) # and match following fp number
[ ]* # optional whitespace
$'''
class Distance(Setting):
"""A veusz distance measure, e.g. 1pt or 3%."""
typename = 'distance'
# match a distance
distre = re.compile(distre_expr, re.VERBOSE)
# functions to convert from unit values to points
unit_func = {
'cm': lambda match, painter:
_distPhys(match, painter, 720/25.4),
'pt': lambda match, painter:
_distPhys(match, painter, 1.),
'mm': lambda match, painter:
_distPhys(match, painter, 72/25.4),
'in': lambda match, painter:
_distPhys(match, painter, 72.),
'inch': lambda match, painter:
_distPhys(match, painter, 72.),
'"': lambda match, painter:
_distPhys(match, painter, 72.),
'%': _distPerc,
'/': _distFrac,
'': _distRatio
}
# inverse functions for converting points to units
inv_unit_func = {
'cm': lambda match, painter:
_distInvPhys(match, painter, 720/25.4, 'cm'),
'pt': lambda match, painter:
_distInvPhys(match, painter, 1., 'pt'),
'mm': lambda match, painter:
_distInvPhys(match, painter, 72/25.4, 'mm'),
'in': lambda match, painter:
_distInvPhys(match, painter, 72., 'in'),
'inch': lambda match, painter:
_distInvPhys(match, painter, 72., 'in'),
'"': lambda match, painter:
_distInvPhys(match, painter, 72., 'in'),
'%': _distInvPerc,
'/': _distInvPerc,
'': _distInvPerc
}
@classmethod
def isDist(kls, dist):
"""Is the text a valid distance measure?"""
return kls.distre.match(dist) is not None
def normalize(self, val):
if self.distre.match(val) is not None:
return val
else:
raise utils.InvalidType
def toUIText(self):
# convert decimal point to display locale
return self.val.replace('.', uilocale.decimalPoint())
def fromUIText(self, text):
# convert decimal point from display locale
text = text.replace(uilocale.decimalPoint(), '.')
if self.isDist(text):
return text
else:
raise utils.InvalidType
def makeControl(self, *args):
return controls.Distance(self, *args)
@classmethod
def convertDistance(kls, painter, dist):
'''Convert a distance to plotter units.
dist: eg 0.1 (fraction), 10% (percentage), 1/10 (fraction),
10pt, 1cm, 20mm, 1inch, 1in, 1" (size)
painter: painter to get metrics to convert physical sizes
'''
# match distance against expression
m = kls.distre.match(dist)
if m is not None:
# lookup function to call to do conversion
func = kls.unit_func[m.group(2)]
return func(m, painter)
# none of the regexps match
raise ValueError(
"Cannot convert distance in form '%s'" % dist )
def convert(self, painter):
"""Convert this setting's distance as above"""
return self.convertDistance(painter, self.val)
def convertPts(self, painter):
"""Get the distance in points."""
return self.convert(painter) / painter.pixperpt
def convertInverse(self, distpix, painter):
"""Convert distance in pixels into units of this distance.
"""
m = self.distre.match(self.val)
if m is not None:
# if it matches convert back
inversefn = self.inv_unit_func[m.group(2)]
else:
# otherwise force unit
inversefn = self.inv_unit_func['cm']
# do inverse mapping
return inversefn(distpix, painter)
class DistancePt(Distance):
"""For a distance in points."""
def makeControl(self, *args):
return controls.DistancePt(self, *args)
class DistancePhysical(Distance):
"""For physical distances (no fractional)."""
def isDist(self, val):
m = self.distre.match(val)
if m:
# disallow non-physical distances
if m.group(2) not in ('/', '', '%'):
return True
return False
def makeControl(self, *args):
return controls.Distance(self, *args, physical=True)
class DistanceOrAuto(Distance):
"""A distance or the value Auto"""
typename = 'distance-or-auto'
distre = re.compile( distre_expr + r'|^Auto$', re.VERBOSE )
def isAuto(self):
return self.val == 'Auto'
def makeControl(self, *args):
return controls.Distance(self, allowauto=True, *args)
class Choice(Setting):
"""One out of a list of strings."""
# maybe should be implemented as a dict to speed up checks
typename = 'choice'
def __init__(self, name, vallist, val, descriptions=None,
uilist=None, **args):
"""Setting val must be in vallist.
descriptions is an optional addon to put a tooltip on each item
in the control.
uilist is a tuple/list of text to show to the user, instead of vallist
"""
assert type(vallist) in (list, tuple)
self.vallist = vallist
self.descriptions = descriptions
self.uilist = uilist
Setting.__init__(self, name, val, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper(
(self.vallist,), (),
{
'descriptions': self.descriptions,
'uilist': self.uilist,
}
)
def normalize(self, val):
if val in self.vallist:
return val
else:
raise utils.InvalidType
def toUIText(self):
return self.val
def fromUIText(self, text):
if text in self.vallist:
return text
else:
raise utils.InvalidType
def makeControl(self, *args):
return controls.Choice(
self, False, self.vallist,
descriptions=self.descriptions,
uilist=self.uilist, *args
)
class ChoiceOrMore(Setting):
"""One out of a list of strings, or anything else."""
# maybe should be implemented as a dict to speed up checks
typename = 'choice-or-more'
def __init__(self, name, vallist, val, descriptions=None, **args):
"""Setting has val must be in vallist.
descriptions is an optional addon to put a tooltip on each item
in the control
"""
self.vallist = vallist
self.descriptions = descriptions
Setting.__init__(self, name, val, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper(
(self.vallist,), (),
{
'descriptions': self.descriptions
}
)
def normalize(self, val):
return val
def toUIText(self):
return self.val
def fromUIText(self, text):
return text
def makeControl(self, *args):
argsv = { 'descriptions': self.descriptions }
return controls.Choice(self, True, self.vallist, *args, **argsv)
class FloatChoice(ChoiceOrMore):
"""A numeric value, which can also be chosen from the list of values."""
typename = 'float-choice'
def normalize(self, val):
if isinstance(val, int) or isinstance(val, float):
return _finiteRangeFloat(val)
raise utils.InvalidType
def toUIText(self):
return ui_floattostring(self.val)
def fromUIText(self, text):
try:
f = ui_stringtofloat(text)
except ValueError:
# try to evaluate
f = self.safeEvalHelper(text)
return self.normalize(f)
def makeControl(self, *args):
argsv = {'descriptions': self.descriptions}
strings = [ui_floattostring(x) for x in self.vallist]
return controls.Choice(self, True, strings, *args, **argsv)
class FloatDict(Setting):
"""A dictionary, taking floats as values."""
typename = 'float-dict'
def normalize(self, val):
if type(val) != dict:
raise utils.InvalidType
for v in val.values():
if type(v) not in (float, int):
raise utils.InvalidType
# return copy
return dict(val)
def toUIText(self):
text = [
'%s = %s' % (k, ui_floattostring(self.val[k]))
for k in sorted(self.val)
]
return '\n'.join(text)
def fromUIText(self, text):
"""Do conversion from list of a=X\n values."""
out = {}
# break up into lines
for l in text.split('\n'):
l = l.strip()
if len(l) == 0:
continue
# break up using =
p = l.strip().split('=')
if len(p) != 2:
raise utils.InvalidType
try:
v = ui_stringtofloat(p[1])
except ValueError:
raise utils.InvalidType
out[ p[0].strip() ] = v
return out
def makeControl(self, *args):
return controls.MultiLine(self, *args)
class FloatList(Setting):
"""A list of float values."""
typename = 'float-list'
def normalize(self, val):
if type(val) not in (list, tuple):
raise utils.InvalidType
# horribly slow test for invalid entries
out = []
for i in val:
if type(i) not in (float, int):
raise utils.InvalidType
else:
out.append( float(i) )
return out
def toUIText(self):
"""Make a string a, b, c."""
# can't use the comma for splitting if used as a decimal point
join = ', '
if uilocale.decimalPoint() == ',':
join = '; '
return join.join( [ui_floattostring(x) for x in self.val] )
def fromUIText(self, text):
"""Convert from a, b, c or a b c."""
# don't use commas if it is the decimal separator
splitre = r'[\t\n, ]+'
if uilocale.decimalPoint() == ',':
splitre = r'[\t\n; ]+'
out = []
for x in re.split(splitre, text.strip()):
if x:
try:
out.append( ui_stringtofloat(x) )
except ValueError:
out.append( self.safeEvalHelper(x) )
return out
def makeControl(self, *args):
return controls.String(self, *args)
class WidgetPath(Str):
"""A setting holding a path to a widget. This is checked for validity."""
typename = 'widget-path'
def __init__(self, name, val, relativetoparent=True,
allowedwidgets=None,
**args):
"""Initialise the setting.
The widget is located relative to
parent if relativetoparent is True, otherwise this widget.
If allowedwidgets is not None, only those widgets types in the list are
allowed by this setting.
"""
Str.__init__(self, name, val, **args)
self.relativetoparent = relativetoparent
self.allowedwidgets = allowedwidgets
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper(
(), (),
{
'relativetoparent': self.relativetoparent,
'allowedwidgets': self.allowedwidgets,
}
)
def getReferredWidget(self, val = None):
"""Get the widget referred to. We double-check here to make sure
it's the one.
Returns None if setting is blank
utils.InvalidType is raised if there's a problem
"""
# this is a bit of a hack, so we don't have to pass a value
# for the setting (which we need to from normalize)
if val is None:
val = self.val
if val == '':
return None
# find the widget associated with this setting
widget = self
while not widget.iswidget:
widget = widget.parent
# usually makes sense to give paths relative to a parent of a widget
if self.relativetoparent:
widget = widget.parent
# resolve the text to a widget
try:
widget = widget.document.resolveWidgetPath(widget, val)
except ValueError:
raise utils.InvalidType
# check the widget against the list of allowed types if given
if self.allowedwidgets is not None:
allowed = False
for c in self.allowedwidgets:
if isinstance(widget, c):
allowed = True
if not allowed:
raise utils.InvalidType
return widget
class Dataset(Str):
"""A setting to choose from the possible datasets."""
typename = 'dataset'
def __init__(self, name, val, dimensions=1, datatype='numeric',
**args):
"""
dimensions is the number of dimensions the dataset needs
"""
self.dimensions = dimensions
self.datatype = datatype
Setting.__init__(self, name, val, **args)
def copy(self):
"""Make a setting which has its values copied from this one."""
return self._copyHelper(
(), (),
{
'dimensions': self.dimensions,
'datatype': self.datatype,
}
)
def makeControl(self, *args):
"""Allow user to choose between the datasets."""
return controls.Dataset(
self, self.getDocument(), self.dimensions,
self.datatype, *args)
def getData(self, doc):
"""Return a list of datasets entered."""
d = doc.data.get(self.val)
if ( d is not None and
d.datatype == self.datatype and
(d.dimensions == self.dimensions or self.dimensions == 'all') ):
return d
class Strings(Setting):
"""A multiple set of strings."""
typename = 'str-multi'
def normalize(self, val):
"""Takes a tuple/list of strings:
('ds1','ds2'...)
"""
if isinstance(val, str):
return (val, )
if type(val) not in (list, tuple):
raise utils.InvalidType
# check each entry in the list is appropriate
for ds in val:
if not isinstance(ds, str):
raise utils.InvalidType
return tuple(val)
def makeControl(self, *args):
"""Allow user to choose between the datasets."""
return controls.Strings(self, self.getDocument(), *args)
class Datasets(Setting):
"""A setting to choose one or more of the possible datasets."""
typename = 'dataset-multi'
def __init__(self, name, val, dimensions=1, datatype='numeric',
**args):
"""
dimensions is the number of dimensions the dataset needs
"""
Setting.__init__(self, name, val, **args)
self.dimensions = dimensions
self.datatype = datatype
def normalize(self, val):
"""Takes a tuple/list of strings:
('ds1','ds2'...)
"""
if isinstance(val, str):
return (val, )
if type(val) not in (list, tuple):
raise utils.InvalidType
# check each entry in the list is appropriate
for ds in val:
if not isinstance(ds, str):
raise utils.InvalidType
return tuple(val)
def copy(self):
"""Make a setting which has its values copied from this one."""
return self._copyHelper(
(), (),
{
'dimensions': self.dimensions,
'datatype': self.datatype
}
)
def makeControl(self, *args):
"""Allow user to choose between the datasets."""
return controls.Datasets(
self, self.getDocument(), self.dimensions,
self.datatype, *args
)
def getData(self, doc):
"""Return a list of datasets entered."""
out = []
for name in self.val:
d = doc.data.get(name)
if ( d is not None and
d.datatype == self.datatype and
d.dimensions == self.dimensions ):
out.append(d)
return out
class DatasetExtended(Dataset):
"""Choose a dataset, give an expression or specify a list of float
values."""
typename = 'dataset-extended'
def normalize(self, val):
"""Check is a string (dataset name or expression) or a list of
floats (numbers).
"""
if isinstance(val, str):
return val
elif self.dimensions == 1:
# list of numbers only allowed for 1d datasets
if isinstance(val, float) or isinstance(val, int):
return [val]
else:
try:
return [float(x) for x in val]
except (TypeError, ValueError):
pass
raise utils.InvalidType
def toUIText(self):
if isinstance(self.val, str):
return self.val
else:
# join based on , or ; depending on decimal point
join = ', '
if uilocale.decimalPoint() == ',':
join = '; '
return join.join( [ ui_floattostring(x)
for x in self.val ] )
def fromUIText(self, text):
"""Convert from text."""
text = text.strip()
if self.dimensions > 1:
return text
# split based on , or ; depending on decimal point
splitre = r'[\t\n, ]+'
if uilocale.decimalPoint() == ',':
splitre = r'[\t\n; ]+'
out = []
for x in re.split(splitre, text):
if x:
try:
out.append( ui_stringtofloat(x) )
except ValueError:
# fail conversion, so exit with text
return text
return out
def getFloatArray(self, doc):
"""Get a numpy of values or None."""
if isinstance(self.val, str):
ds = doc.evaluate.evalDatasetExpression(
self.val, datatype=self.datatype, dimensions=self.dimensions)
if ds:
# get numpy array of values
return N.array(ds.data)
else:
# list of values
return N.array(self.val)
return None
def isDataset(self, doc):
"""Is this setting a dataset?"""
return (isinstance(self.val, str) and
doc.data.get(self.val))
def isEmpty(self):
"""Is this unset?"""
return self.val == [] or self.val == ''
def getData(self, doc):
"""Return veusz dataset"""
if isinstance(self.val, str):
return doc.evaluate.evalDatasetExpression(
self.val, datatype=self.datatype, dimensions=self.dimensions)
else:
return datasets.valsToDataset(
self.val, self.datatype, self.dimensions)
class DatasetOrStr(Dataset):
"""Choose a dataset or enter a string.
Non string datasets are converted to string arrays using this.
"""
typename = 'dataset-or-str'
def __init__(self, name, val, **args):
Dataset.__init__(self, name, val, datatype='text', **args)
def getData(self, doc, checknull=False):
"""Return either a list of strings, a single item list.
If checknull then None is returned if blank
"""
if doc:
ds = doc.data.get(self.val)
if ds and ds.dimensions == 1:
return doc.formatValsWithDatatypeToText(
ds.data, ds.displaytype)
if checknull and not self.val:
return None
else:
return [str(self.val)]
def makeControl(self, *args):
return controls.DatasetOrString(self, self.getDocument(), *args)
def copy(self):
"""Make a setting which has its values copied from this one."""
return self._copyHelper((), (), {})
class Color(ChoiceOrMore):
"""A color setting."""
typename = 'color'
def __init__(self, name, value, **args):
"""Initialise the color setting with the given name, default
and description."""
ChoiceOrMore.__init__(self, name, [], value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def color(self, painter, dataindex=0):
"""Return QColor from color.
painter is a Veusz Painter
dataindex is index for automatically getting colors for subdatasets.
"""
if self.val.lower() == 'auto':
# lookup widget
w = self.parent
while w is not None and not w.iswidget:
w = w.parent
if w is None:
return qt.QColor()
# get automatic color
return painter.docColor(w.autoColor(painter, dataindex=dataindex))
else:
return painter.docColor(self.val)
def makeControl(self, *args):
return controls.Color(self, *args)
class FillStyle(Choice):
"""A setting for the different fill styles provided by Qt."""
typename = 'fill-style'
_fillstyles = [
'solid', 'horizontal', 'vertical', 'cross',
'forward diagonals', 'backward diagonals',
'diagonal cross',
'94% dense', '88% dense', '63% dense', '50% dense',
'37% dense', '12% dense', '6% dense'
]
_fillcnvt = {
'solid': qt.Qt.SolidPattern,
'horizontal': qt.Qt.HorPattern,
'vertical': qt.Qt.VerPattern,
'cross': qt.Qt.CrossPattern,
'forward diagonals': qt.Qt.FDiagPattern,
'backward diagonals': qt.Qt.BDiagPattern,
'diagonal cross': qt.Qt.DiagCrossPattern,
'94% dense': qt.Qt.Dense1Pattern,
'88% dense': qt.Qt.Dense2Pattern,
'63% dense': qt.Qt.Dense3Pattern,
'50% dense': qt.Qt.Dense4Pattern,
'37% dense': qt.Qt.Dense5Pattern,
'12% dense': qt.Qt.Dense6Pattern,
'6% dense': qt.Qt.Dense7Pattern,
}
controls.FillStyle._fills = _fillstyles
controls.FillStyle._fillcnvt = _fillcnvt
def __init__(self, name, value, **args):
Choice.__init__(self, name, self._fillstyles, value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def qtStyle(self):
"""Return Qt ID of fill."""
return self._fillcnvt[self.val]
def makeControl(self, *args):
return controls.FillStyle(self, *args)
class LineStyle(Choice):
"""A setting choosing a particular line style."""
typename = 'line-style'
# list of allowed line styles
_linestyles = [
'solid', 'dashed', 'dotted',
'dash-dot', 'dash-dot-dot', 'dotted-fine',
'dashed-fine', 'dash-dot-fine',
'dot1', 'dot2', 'dot3', 'dot4',
'dash1', 'dash2', 'dash3', 'dash4', 'dash5',
'dashdot1', 'dashdot2', 'dashdot3'
]
# convert from line styles to Qt constants and a custom pattern (if any)
_linecnvt = {
'solid': (qt.Qt.SolidLine, None),
'dashed': (qt.Qt.DashLine, None),
'dotted': (qt.Qt.DotLine, None),
'dash-dot': (qt.Qt.DashDotLine, None),
'dash-dot-dot': (qt.Qt.DashDotDotLine, None),
'dotted-fine': (qt.Qt.CustomDashLine, [2, 4]),
'dashed-fine': (qt.Qt.CustomDashLine, [8, 4]),
'dash-dot-fine': (qt.Qt.CustomDashLine, [8, 4, 2, 4]),
'dot1': (qt.Qt.CustomDashLine, [0.1, 2]),
'dot2': (qt.Qt.CustomDashLine, [0.1, 4]),
'dot3': (qt.Qt.CustomDashLine, [0.1, 6]),
'dot4': (qt.Qt.CustomDashLine, [0.1, 8]),
'dash1': (qt.Qt.CustomDashLine, [4, 4]),
'dash2': (qt.Qt.CustomDashLine, [4, 8]),
'dash3': (qt.Qt.CustomDashLine, [8, 8]),
'dash4': (qt.Qt.CustomDashLine, [16, 8]),
'dash5': (qt.Qt.CustomDashLine, [16, 16]),
'dashdot1': (qt.Qt.CustomDashLine, [0.1, 4, 4, 4]),
'dashdot2': (qt.Qt.CustomDashLine, [0.1, 4, 8, 4]),
'dashdot3': (qt.Qt.CustomDashLine, [0.1, 2, 4, 2]),
}
controls.LineStyle._lines = _linestyles
def __init__(self, name, default, **args):
Choice.__init__(self, name, self._linestyles, default, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def qtStyle(self):
"""Get Qt ID of chosen line style."""
return self._linecnvt[self.val]
def makeControl(self, *args):
return controls.LineStyle(self, *args)
class Axis(Str):
"""A setting to hold the name of an axis.
direction is 'horizontal', 'vertical' or 'both'
"""
typename = 'axis'
def __init__(self, name, val, direction, **args):
"""Initialise using the document, so we can get the axes later.
direction is horizontal or vertical to specify the type of axis to
show
"""
Setting.__init__(self, name, val, **args)
self.direction = direction
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (self.direction,), {})
def makeControl(self, *args):
"""Allows user to choose an axis or enter a name."""
return controls.Axis(self, self.getDocument(), self.direction, *args)
class WidgetChoice(Str):
"""Hold the name of a child widget."""
typename = 'widget-choice'
def __init__(self, name, val, widgettypes={}, **args):
"""Choose widgets from (named) type given."""
Setting.__init__(self, name, val, **args)
self.widgettypes = widgettypes
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper(
(), (),
{
'widgettypes': self.widgettypes
}
)
def buildWidgetList(self, level, widget, outdict):
"""A recursive helper to build up a list of possible widgets.
This iterates over widget's children, and adds widgets as tuples
to outdict using outdict[name] = (widget, level)
Lower level images of the same name outweigh other images further down
the tree
"""
for child in widget.children:
if child.typename in self.widgettypes:
if (child.name not in outdict) or (outdict[child.name][1]>level):
outdict[child.name] = (child, level)
else:
self.buildWidgetList(level+1, child, outdict)
def getWidgetList(self):
"""Return a dict of valid widget names and the corresponding objects."""
# find widget which contains setting
widget = self.parent
while not widget.iswidget and widget is not None:
widget = widget.parent
# get widget's parent
if widget is not None:
widget = widget.parent
# get list of widgets from recursive find
widgets = {}
if widget is not None:
self.buildWidgetList(0, widget, widgets)
# turn (object, level) pairs into object
outdict = {}
for name, val in widgets.items():
outdict[name] = val[0]
return outdict
def findWidget(self):
"""Find the image corresponding to this setting.
Returns Image object if succeeds or None if fails
"""
widgets = self.getWidgetList()
try:
return widgets[self.get()]
except KeyError:
return None
def makeControl(self, *args):
"""Allows user to choose an image widget or enter a name."""
return controls.WidgetChoice(self, self.getDocument(), *args)
class Marker(Choice):
"""Choose a marker type from one allowable."""
typename = 'marker'
def __init__(self, name, value, **args):
Choice.__init__(self, name, utils.MarkerCodes, value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def makeControl(self, *args):
return controls.Marker(self, *args)
class Arrow(Choice):
"""Choose an arrow type from one allowable."""
typename = 'arrow'
def __init__(self, name, value, **args):
Choice.__init__(self, name, utils.ArrowCodes, value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def makeControl(self, *args):
return controls.Arrow(self, *args)
class LineSet(Setting):
"""A setting which corresponds to a set of lines.
"""
typename='line-multi'
def normalize(self, val):
"""Takes a tuple/list of tuples:
[('dotted', '1pt', 'color', <trans>, False), ...]
These are style, width, color, and hide or
style, widget, color, transparency, hide
"""
if type(val) not in (list, tuple):
raise utils.InvalidType
# check each entry in the list is appropriate
for line in val:
try:
style, width, color, hide = line
except ValueError:
raise utils.InvalidType
if ( not isinstance(color, str) or
not Distance.isDist(width) or
style not in LineStyle._linestyles or
type(hide) not in (int, bool) ):
raise utils.InvalidType
return val
def makeControl(self, *args):
"""Make specialised lineset control."""
return controls.LineSet(self, *args)
def makePen(self, painter, row):
"""Make a pen for the painter using row.
If row is outside of range, then cycle
"""
if len(self.val) == 0:
return qt.QPen(qt.Qt.NoPen)
else:
row = row % len(self.val)
v = self.val[row]
style, width, color, hide = v
width = Distance.convertDistance(painter, width)
style, dashpattern = LineStyle._linecnvt[style]
col = painter.docColor(color)
pen = qt.QPen(col, width, style)
if dashpattern:
pen.setDashPattern(dashpattern)
if hide:
pen.setStyle(qt.Qt.NoPen)
return pen
class FillSet(Setting):
"""A setting which corresponds to a set of fills.
This setting keeps an internal array of LineSettings.
"""
typename = 'fill-multi'
def normalize(self, val):
"""Takes a tuple/list of tuples:
[('solid', 'color', False), ...]
These are color, fill style, and hide or
color, fill style, and hide
(style, color, hide,
[optional transparency, linewidth,
linestyle, spacing, backcolor, backtrans, backhide]])
"""
if type(val) not in (list, tuple):
raise utils.InvalidType
# check each entry in the list is appropriate
for fill in val:
try:
style, color, hide = fill[:3]
except ValueError:
raise utils.InvalidType
if ( not isinstance(color, str) or
style not in utils.extfillstyles or
type(hide) not in (int, bool) or
len(fill) not in (3, 10) ):
raise utils.InvalidType
return val
def makeControl(self, *args):
"""Make specialised lineset control."""
return controls.FillSet(self, *args)
def returnBrushExtended(self, row):
"""Return BrushExtended for the row."""
from . import collections
s = collections.BrushExtended('tempbrush')
s.parent = self
if len(self.val) == 0:
s.hide = True
else:
v = self.val[row % len(self.val)]
s.style = v[0]
s.color = v[1]
s.hide = v[2]
if len(v) == 10:
(s.transparency, s.linewidth, s.linestyle,
s.patternspacing, s.backcolor,
s.backtransparency, s.backhide) = v[3:]
return s
class Filename(Str):
"""Represents a filename setting."""
typename = 'filename'
def makeControl(self, *args):
return controls.Filename(self, 'file', *args)
def normalize(self, val):
if sys.platform == 'win32':
val = val.replace('\\', '/')
return val
class ImageFilename(Filename):
"""Represents an image filename setting."""
typename = 'filename-image'
def makeControl(self, *args):
return controls.Filename(self, 'image', *args)
class SVGFilename(Filename):
"""Represents an svg filename setting."""
typename = 'filename-svg'
def makeControl(self, *args):
return controls.Filename(self, 'svg', *args)
class FontFamily(Str):
"""Represents a font family."""
typename = 'font-family'
def makeControl(self, *args):
"""Make a special font combobox."""
return controls.FontFamily(self, *args)
class FontStyle(Str):
"""Represents a font style."""
typename = 'font-style'
def __init__(self, name, val, familysetnname, **args):
"""Initialise font style.
familysetnname is a name of the family setting the style is associated
with."""
Str.__init__(self, name, val, **args)
self.familysetnname = familysetnname
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper( (), (self.familysetnname,), {})
def makeControl(self, *args):
familysetn = self.parent.get(self.familysetnname)
return controls.FontStyle(self, familysetn, *args)
class ErrorStyle(Choice):
"""Error bar style.
The allowed values are below in _errorstyles.
"""
typename = 'errorbar-style'
_errorstyles = (
'none',
'bar', 'barends', 'box', 'diamond', 'curve',
'barbox', 'bardiamond', 'barcurve',
'boxfill', 'diamondfill', 'curvefill',
'fillvert', 'fillhorz',
'linevert', 'linehorz',
'linevertbar', 'linehorzbar',
'barhi', 'barlo',
'barendshi', 'barendslo',
'linehorzlo', 'linehorzhi', 'linevertlo', 'lineverthi',
)
controls.ErrorStyle._errorstyles = _errorstyles
def __init__(self, name, value, **args):
Choice.__init__(self, name, self._errorstyles, value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def makeControl(self, *args):
return controls.ErrorStyle(self, *args)
class AlignHorz(Choice):
"""Alignment horizontally."""
typename = 'align-horz'
def __init__(self, name, value, **args):
Choice.__init__(self, name, ['left', 'centre', 'right'], value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
class AlignVert(Choice):
"""Alignment vertically."""
typename = 'align-vert'
def __init__(self, name, value, **args):
Choice.__init__(self, name, ['top', 'centre', 'bottom'], value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
class AlignHorzWManual(Choice):
"""Alignment horizontally."""
typename = 'align-horz-+manual'
def __init__(self, name, value, **args):
Choice.__init__(self, name, ['left', 'centre', 'right', 'manual'],
value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
class AlignVertWManual(Choice):
"""Alignment vertically."""
typename = 'align-vert-+manual'
def __init__(self, name, value, **args):
Choice.__init__(self, name, ['top', 'centre', 'bottom', 'manual'],
value, **args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
# Bool which shows/hides other settings
class BoolSwitch(Bool):
"""Bool switching setting."""
def __init__(self, name, value, settingsfalse=[], settingstrue=[],
**args):
"""Enables/disables a set of settings if True or False
settingsfalse and settingstrue are lists of names of settings
which are hidden/shown to user
"""
self.sfalse = settingsfalse
self.strue = settingstrue
Bool.__init__(self, name, value, **args)
def makeControl(self, *args):
return controls.BoolSwitch(self, *args)
def copy(self):
return self._copyHelper((), (), {'settingsfalse': self.sfalse,
'settingstrue': self.strue})
class ChoiceSwitch(Choice):
"""Show or hide other settings based on the choice given here."""
def __init__(self, name, vallist, value,
showfn=lambda x: ((),()),
**args):
"""Enables/disables a set of settings depending on showfn(val)
showfn(val) returns (show, hide), where show and hide are
lists of settings to show or hide
"""
self.showfn = showfn
Choice.__init__(self, name, vallist, value, **args)
def makeControl(self, *args):
return controls.ChoiceSwitch(self, False, self.vallist, *args)
def copy(self):
return self._copyHelper(
(self.vallist,), (),
{'showfn': self.showfn})
class FillStyleExtended(ChoiceSwitch):
"""A setting for the different fill styles provided by Qt."""
typename = 'fill-style-ext'
@staticmethod
def _showsetns(val):
"""Get list of settings to show or hide"""
hatchsetns = (
'linewidth', 'linestyle', 'patternspacing',
'backcolor', 'backtransparency', 'backhide')
if val == 'solid' or val.find('dense') >= 0:
return ((), hatchsetns)
else:
return (hatchsetns, ())
def __init__(self, name, value, **args):
ChoiceSwitch.__init__(
self, name, utils.extfillstyles, value,
showfn=self._showsetns,
**args)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
def makeControl(self, *args):
return controls.FillStyleExtended(self, *args)
class RotateInterval(Choice):
'''Rotate a label with intervals given.'''
def __init__(self, name, val, **args):
Choice.__init__(
self, name,
('-180', '-135', '-90', '-45', '0', '45', '90', '135', '180'),
val, **args
)
def normalize(self, val):
"""Store rotate angle."""
# backward compatibility with rotate option
# False: angle 0
# True: angle 90
if val is False:
val = '0'
elif val is True:
val = '90'
return Choice.normalize(self, val)
def copy(self):
"""Make a copy of the setting."""
return self._copyHelper((), (), {})
class Colormap(Str):
"""A setting to set the color map used in an image.
This is based on a Str rather than Choice as the list might
change later.
"""
def makeControl(self, *args):
return controls.Colormap(self, self.getDocument(), *args)
class AxisBound(FloatOrAuto):
"""Axis bound - either numeric, Auto or date."""
typename = 'axis-bound'
def makeControl(self, *args):
return controls.AxisBound(self, *args)
def toUIText(self):
"""Convert to text, taking into account mode of Axis.
Displays datetimes in date format if used
"""
try:
mode = self.parent.mode
except AttributeError:
mode = None
v = self.val
if ( not isinstance(v, str) and v is not None and
mode == 'datetime' ):
return utils.dateFloatToString(v)
return FloatOrAuto.toUIText(self)
def fromUIText(self, txt):
"""Convert from text, allowing datetimes."""
v = utils.dateStringToDate(txt)
if N.isfinite(v):
return v
else:
return FloatOrAuto.fromUIText(self, txt)
|
veuszREPO_NAMEveuszPATH_START.@veusz_extracted@veusz-master@veusz@setting@setting.py@.PATH_END.py
|
{
"filename": "personality.py",
"repo_name": "crossbario/crossbar",
"repo_path": "crossbar_extracted/crossbar-master/crossbar/edge/personality.py",
"type": "Python"
}
|
##############################################################################
#
# Crossbar.io
# Copyright (C) Crossbar.io Technologies GmbH. All rights reserved.
#
##############################################################################
from typing import Dict
from collections.abc import Mapping, Sequence
from pprint import pformat
import six
import txaio
from crossbar.personality import Personality as CrossbarPersonality
from crossbar.personality import default_native_workers
from crossbar.common import checkconfig
from crossbar.node.node import NodeOptions
from crossbar.node.worker import RouterWorkerProcess
from crossbar.edge.node.node import FabricNode
from crossbar.edge.worker.realmstore import RealmStoreDatabase
from crossbar.edge.worker.router import ExtRouterController
from crossbar.edge.worker.hostmonitor import HostMonitor, HostMonitorProcess
from crossbar.edge.worker.xbrmm import MarketplaceController, MarketplaceControllerProcess
from crossbar.edge.webservice import RouterWebServicePairMe
def do_nothing(*args, **kw):
return
# check blockchain configuration item (can be part of controller and market maker configurations)
def check_blockchain(personality, blockchain):
# Examples:
#
# "blockchain": {
# "type": "ethereum",
# "gateway": {
# "type": "auto"
# }
# }
#
# "blockchain": {
# "type": "ethereum",
# "gateway": {
# "type": "user",
# "http": "http://127.0.0.1:8545"
# "websocket": "ws://127.0.0.1:8545"
# },
# "from_block": 1,
# "chain_id": 5777
# }
#
# "blockchain": {
# "type": "ethereum",
# "gateway": {
# "type": "infura",
# "network": "ropsten",
# "key": "${INFURA_PROJECT_ID}",
# "secret": "${INFURA_PROJECT_SECRET}"
# },
# "from_block": 6350652,
# "chain_id": 3
# }
checkconfig.check_dict_args(
{
'id': (False, [six.text_type]),
'type': (True, [six.text_type]),
'gateway': (True, [Mapping]),
'key': (False, [six.text_type]),
'from_block': (False, [int]),
'chain_id': (False, [int]),
}, blockchain, "blockchain configuration item {}".format(pformat(blockchain)))
if blockchain['type'] not in ['ethereum']:
raise checkconfig.InvalidConfigException('invalid type "{}" in blockchain configuration'.format(
blockchain['type']))
gateway = blockchain['gateway']
if 'type' not in gateway:
raise checkconfig.InvalidConfigException(
'missing type in gateway item "{}" of blockchain configuration'.format(pformat(gateway)))
if gateway['type'] not in ['infura', 'user', 'auto']:
raise checkconfig.InvalidConfigException(
'invalid type "{}" in gateway item of blockchain configuration'.format(gateway['type']))
if gateway['type'] == 'infura':
checkconfig.check_dict_args(
{
'type': (True, [six.text_type]),
'network': (True, [six.text_type]),
'key': (True, [six.text_type]),
'secret': (True, [six.text_type]),
}, gateway, "blockchain gateway configuration {}".format(pformat(gateway)))
# allow to set value from environment variable
gateway['key'] = checkconfig.maybe_from_env('blockchain.gateway["infura"].key',
gateway['key'],
hide_value=True)
gateway['secret'] = checkconfig.maybe_from_env('blockchain.gateway["infura"].secret',
gateway['secret'],
hide_value=True)
elif gateway['type'] == 'user':
checkconfig.check_dict_args(
{
'type': (True, [six.text_type]),
'http': (True, [six.text_type]),
# 'websocket': (True, [six.text_type]),
},
gateway,
"blockchain gateway configuration {}".format(pformat(gateway)))
elif gateway['type'] == 'auto':
checkconfig.check_dict_args({
'type': (True, [six.text_type]),
}, gateway, "blockchain gateway configuration {}".format(pformat(gateway)))
else:
# should not arrive here
raise Exception('logic error')
# check database configuration item (can be part of controller, markets worker and market maker configurations)
def check_database(personality, database):
checkconfig.check_dict_args(
{
'type': (True, [six.text_type]),
'path': (True, [six.text_type]),
'maxsize': (False, six.integer_types),
}, database, "database configuration")
if database['type'] not in ['cfxdb']:
raise checkconfig.InvalidConfigException('invalid type "{}" in database configuration'.format(
database['type']))
if 'maxsize' in database:
# maxsize must be between 1MB and 1TB
if database['maxsize'] < 2**20 or database['maxsize'] > 2**40:
raise checkconfig.InvalidConfigException(
'invalid maxsize {} in database configuration - must be between 1MB and 1TB'.format(
database['maxsize']))
def check_controller_fabric(personality, fabric):
"""
Check controller Fabric configuration override (which essentially is only
for debugging purposes or for people running Crossbar.io Service on-premise)
:param fabric: The Fabric configuration to check.
:type fabric: dict
"""
if not isinstance(fabric, Mapping):
raise checkconfig.InvalidConfigException(
"'fabric' in controller configuration must be a dictionary ({} encountered)\n\n".format(type(fabric)))
for k in fabric:
if k not in ['transport', 'heartbeat']:
raise checkconfig.InvalidConfigException(
"encountered unknown attribute '{}' in 'fabric' in controller configuration".format(k))
if 'transport' in fabric:
checkconfig.check_connecting_transport(personality, fabric['transport'])
if 'heartbeat' in fabric:
heartbeat = fabric['heartbeat']
checkconfig.check_dict_args(
{
'startup_delay': (False, [int, float]),
'heartbeat_period': (False, [int, float]),
'include_system_stats': (False, [bool]),
'send_workers_heartbeats': (False, [bool]),
'aggregate_workers_heartbeats': (False, [bool]),
}, heartbeat, "heartbeat configuration: {}".format(pformat(heartbeat)))
def check_controller(personality, controller, ignore=[]):
res = checkconfig.check_controller(personality, controller, ['fabric', 'blockchain', 'enable_docker'] + ignore)
if 'fabric' in controller:
check_controller_fabric(personality, controller['fabric'])
if 'blockchain' in controller:
check_blockchain(personality, controller['blockchain'])
if 'enable_docker' in controller:
enable_docker = controller['enable_docker']
if type(enable_docker) != bool:
raise checkconfig.InvalidConfigException('invalid type "{}" for "enable_docker" in controller'.format(
type(enable_docker)))
return res
def check_controller_options(personality, options, ignore=[]):
return checkconfig.check_controller_options(personality, options, ignore)
def check_hostmonitor_options(personality, options):
checkconfig.check_native_worker_options(personality, options, ignore=['interval', 'monitors'])
# polling interval of sensors in ms
interval = options.get('interval', 500)
if type(interval) not in six.integer_types:
raise checkconfig.InvalidConfigException(
'invalid type "{}" for "interval" in host monitor configuration (must be an integer for ms)'.format(
type(interval)))
monitors = options.get('monitors', {})
if not isinstance(monitors, Mapping):
raise checkconfig.InvalidConfigException(
'invalid type "{}" for "monitors" in host monitor configuration (must be a dict)'.format(type(monitors)))
for monitor in monitors:
# FIXME: check if we know the monitor, and monitor
# specific configuration is valid
pass
# check native worker options of market maker workers
def check_markets_worker_options(personality, options):
checkconfig.check_native_worker_options(personality, options, ignore=[])
# check market maker configuration items (as part of market maker workers)
def check_market_maker(personality, maker):
maker = dict(maker)
checkconfig.check_dict_args(
{
'id': (True, [six.text_type]),
'key': (True, [six.text_type]),
'database': (True, [Mapping]),
'connection': (True, [Mapping]),
'blockchain': (False, [Mapping]),
}, maker, "market maker configuration {}".format(pformat(maker)))
check_database(personality, dict(maker['database']))
checkconfig.check_dict_args({
'realm': (True, [six.text_type]),
'transport': (True, [Mapping]),
}, dict(maker['connection']), "market maker connection configuration")
checkconfig.check_connecting_transport(personality, dict(maker['connection']['transport']))
if 'blockchain' in maker:
check_blockchain(personality, maker['blockchain'])
# check native worker configuration of maker maker workers
def check_markets_worker(personality, config):
for k in config:
if k not in ['id', 'type', 'options', 'makers']:
raise checkconfig.InvalidConfigException(
'encountered unknown attribute "{}" in XBR markets worker configuration'.format(k))
if 'id' in config:
checkconfig.check_id(config['id'])
if 'options' not in config:
raise checkconfig.InvalidConfigException('missing attribute "database" in XBR markets worker configuration')
check_markets_worker_options(personality, config['options'])
if 'extra' not in config['options']:
raise checkconfig.InvalidConfigException(
'missing attribute "options.extra" in XBR markets worker configuration')
extra = config['options']['extra']
if 'database' not in extra:
raise checkconfig.InvalidConfigException(
'missing attribute "options.extra.database" in XBR markets worker configuration')
check_database(personality, extra['database'])
if 'blockchain' not in extra:
raise checkconfig.InvalidConfigException(
'missing attribute "options.extra.blockchain" in XBR markets worker configuration')
check_blockchain(personality, extra['blockchain'])
makers = config.get('makers', [])
if not isinstance(makers, Sequence):
raise checkconfig.InvalidConfigException("'makers' items must be lists ({} encountered)\n\n{}".format(
type(makers), pformat(config)))
for maker in makers:
check_market_maker(personality, maker)
_native_workers = default_native_workers()
# Override existing worker type: router workers
_native_workers.update({
'router': {
'class': RouterWorkerProcess,
'worker_class': ExtRouterController,
# check a whole router worker configuration item (including realms, transports, ..)
'checkconfig_item': checkconfig.check_router,
# only check router worker options
'checkconfig_options': checkconfig.check_router_options,
'logname': 'Router',
'topics': {
'starting': 'crossbar.on_router_starting',
'started': 'crossbar.on_router_started',
}
}
})
# New worker type: host monitor
_native_workers.update({
'hostmonitor': {
'process_class': HostMonitor,
'class': HostMonitorProcess,
'worker_class': HostMonitor,
# FIXME: check a whole hostmonitor configuration item
'checkconfig_item': do_nothing,
# FIXME: only check hostmonitor worker options
'checkconfig_options': check_hostmonitor_options,
'logname': 'Hostmonitor',
'topics': {
'starting': 'crossbar.on_hostmonitor_starting',
'started': 'crossbar.on_hostmonitor_started',
}
}
})
# New worker type: XBR Market Maker ("xbrmm")
_native_workers.update({
'xbrmm': {
'process_class': MarketplaceController,
'class': MarketplaceControllerProcess,
'worker_class': MarketplaceController,
'checkconfig_item': check_markets_worker,
'checkconfig_options': check_markets_worker_options,
'logname': 'XBRMM',
'topics': {
'starting': 'crossbar.on_xbrmm_starting',
'started': 'crossbar.on_xbrmm_started',
}
}
})
class Personality(CrossbarPersonality):
log = txaio.make_logger()
NAME = 'edge'
TEMPLATE_DIRS = [('crossbar', 'edge/webservice/templates')] + CrossbarPersonality.TEMPLATE_DIRS
WEB_SERVICE_CHECKERS: Dict[str, object] = {
'pairme': RouterWebServicePairMe.check,
**CrossbarPersonality.WEB_SERVICE_CHECKERS
}
WEB_SERVICE_FACTORIES: Dict[str, object] = {
'pairme': RouterWebServicePairMe,
**CrossbarPersonality.WEB_SERVICE_FACTORIES
}
REALM_STORES: Dict[str, object] = {'cfxdb': RealmStoreDatabase, **CrossbarPersonality.REALM_STORES}
check_controller = check_controller
check_controller_options = check_controller_options
check_market_maker = check_market_maker
Node = FabricNode
NodeOptions = NodeOptions
native_workers = _native_workers
|
crossbarioREPO_NAMEcrossbarPATH_START.@crossbar_extracted@crossbar-master@crossbar@edge@personality.py@.PATH_END.py
|
{
"filename": "plot_sdtw.py",
"repo_name": "tslearn-team/tslearn",
"repo_path": "tslearn_extracted/tslearn-main/docs/examples/metrics/plot_sdtw.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
r"""
sDTW multi path matching
========================
This example illustrates how subsequent DTW can be used to find multiple
matches of a sequence in a longer sequence.
A potential usecase is to identify the occurrence of certain events in
continuous sensor signals. As one example Barth et al. [1] used this method
to find stride in sensor recordings of gait.
The example demonstrates the use of the functions
`subsequence_cost_matrix` and `subsequence_path`
to manually calculate warping paths from multiple potential alignments. If
you are only interested in finding the optimal alignment, you can directly use
`dtw_subsequence_path`.
[1] Barth, et al. (2013): Subsequence dynamic time warping as a method for \
robust step segmentation using gyroscope signals of daily life activities, \
EMBS, https://doi.org/10.1109/EMBC.2013.6611104
"""
# Author: Arne Kuederle
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy
from scipy.signal import find_peaks
from tslearn import metrics
from tslearn.generators import random_walks
from tslearn.preprocessing import TimeSeriesScalerMeanVariance
numpy.random.seed(0)
n_ts, sz, d = 2, 100, 1
n_repeat = 5
dataset = random_walks(n_ts=n_ts, sz=sz, d=d)
scaler = TimeSeriesScalerMeanVariance(mu=0., std=1.) # Rescale time series
dataset_scaled = scaler.fit_transform(dataset)
# We repeat the long sequence multiple times to generate multiple possible
# matches
long_sequence = numpy.tile(dataset_scaled[1], (n_repeat, 1))
short_sequence = dataset_scaled[0]
sz1 = len(long_sequence)
sz2 = len(short_sequence)
print('Shape long sequence: {}'.format(long_sequence.shape))
print('Shape short sequence: {}'.format(short_sequence.shape))
# Calculate the accumulated cost matrix
mat = metrics.subsequence_cost_matrix(short_sequence,
long_sequence)
# Calculate cost function
cost_func = mat[-1, :]
# Identify potential matches in the cost function (parameters are tuned to
# fit this example)
potential_matches = find_peaks(-cost_func, distance=sz * 0.75, height=-50)[0]
# Calculate the optimal warping path starting from each of the identified
# minima
paths = [metrics.subsequence_path(mat, match) for match in
potential_matches]
plt.figure(1, figsize=(6 * n_repeat, 6))
# definitions for the axes
left, bottom = 0.01, 0.1
h_ts = 0.2
w_ts = h_ts / n_repeat
left_h = left + w_ts + 0.02
width = height = 0.65
bottom_h = bottom + height + 0.02
rect_s_y = [left, bottom, w_ts, height]
rect_gram = [left_h, bottom, width, height]
rect_s_x = [left_h, bottom_h, width, h_ts]
ax_gram = plt.axes(rect_gram)
ax_s_x = plt.axes(rect_s_x)
ax_s_y = plt.axes(rect_s_y)
ax_gram.imshow(numpy.sqrt(mat))
ax_gram.axis("off")
ax_gram.autoscale(False)
# Plot the paths
for path in paths:
ax_gram.plot([j for (i, j) in path], [i for (i, j) in path], "w-",
linewidth=3.)
ax_s_x.plot(numpy.arange(sz1), long_sequence, "b-", linewidth=3.)
ax_s_x.axis("off")
ax_s_x.set_xlim((0, sz1 - 1))
ax_s_y.plot(- short_sequence, numpy.arange(sz2)[::-1], "b-", linewidth=3.)
ax_s_y.axis("off")
ax_s_y.set_ylim((0, sz2 - 1))
plt.show()
|
tslearn-teamREPO_NAMEtslearnPATH_START.@tslearn_extracted@tslearn-main@docs@examples@metrics@plot_sdtw.py@.PATH_END.py
|
{
"filename": "test_spectrum.py",
"repo_name": "tomasstolker/species",
"repo_path": "species_extracted/species-main/tests/test_read/test_spectrum.py",
"type": "Python"
}
|
import os
import shutil
import pytest
import numpy as np
from species import SpeciesInit
# from species.data.database import Database
# from species.read.read_spectrum import ReadSpectrum
# from species.plot.plot_spectrum import plot_spectrum
from species.util import test_util
class TestSpectrum:
def setup_class(self):
self.limit = 1e-8
self.test_path = os.path.dirname(__file__) + "/"
def teardown_class(self):
os.remove("species_database.hdf5")
os.remove("species_config.ini")
# os.remove("spectrum.pdf")
shutil.rmtree("data/")
def test_species_init(self):
test_util.create_config("./")
SpeciesInit()
# def test_read_spectrum(self):
# database = Database()
#
# with pytest.warns(UserWarning):
# database.add_spectrum(
# "irtf",
# sptypes=[
# "L",
# ],
# )
#
# read_spectrum = ReadSpectrum("irtf", filter_name="MKO/NSFCam.H")
# assert read_spectrum.wavel_range == pytest.approx(
# (1.382, 1.8656), rel=1e-6, abs=0.0
# )
#
# def test_get_spectrum(self):
# read_spectrum = ReadSpectrum("irtf", filter_name="MKO/NSFCam.H")
# spec_box = read_spectrum.get_spectrum(
# sptypes=[
# "L0",
# ],
# exclude_nan=True,
# )
#
# assert spec_box.wavelength[0].shape == (1063,)
# assert spec_box.flux[0].shape == (1063,)
#
# assert np.sum(spec_box.wavelength[0]) == pytest.approx(
# 1692.8604, rel=1e-7, abs=0.0
# )
# assert np.sum(spec_box.flux[0]) == pytest.approx(
# 4.5681937e-11, rel=1e-7, abs=0.0
# )
#
# plot_spectrum(
# boxes=[
# spec_box,
# ],
# filters=[
# "MKO/NSFCam.H",
# ],
# output="spectrum.pdf",
# xlim=(1.0, 2.5),
# offset=(-0.08, -0.06),
# )
#
# assert os.path.exists("spectrum.pdf")
|
tomasstolkerREPO_NAMEspeciesPATH_START.@species_extracted@species-main@tests@test_read@test_spectrum.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "ggmichael/craterstats",
"repo_path": "craterstats_extracted/craterstats-main/src/craterstats/config/__init__.py",
"type": "Python"
}
|
ggmichaelREPO_NAMEcraterstatsPATH_START.@craterstats_extracted@craterstats-main@src@craterstats@config@__init__.py@.PATH_END.py
|
|
{
"filename": "massless_ferm.ipynb",
"repo_name": "awsteiner/o2scl",
"repo_path": "o2scl_extracted/o2scl-main/doc/o2scl/python/massless_ferm.ipynb",
"type": "Jupyter Notebook"
}
|
```python
import sympy
import numpy
```
```python
alpha=sympy.Symbol('alpha')
eta=sympy.Symbol('eta')
beta=sympy.Symbol('beta')
```
Basic expression to approximate
```python
cbt=(alpha**(sympy.Rational(-1,6))*(-1+sympy.sqrt(1+alpha))**
(sympy.Rational(1,3)))
display(cbt)
```
$\displaystyle \frac{\sqrt[3]{\sqrt{\alpha + 1} - 1}}{\sqrt[6]{\alpha}}$
```python
f=1/cbt-cbt
display(f)
```
$\displaystyle \frac{\sqrt[6]{\alpha}}{\sqrt[3]{\sqrt{\alpha + 1} - 1}} - \frac{\sqrt[3]{\sqrt{\alpha + 1} - 1}}{\sqrt[6]{\alpha}}$
Rewriting:
```python
Nlow=99
```
```python
lowaser=sympy.series(f.subs(alpha,eta**6),eta,0,Nlow).removeO()
display(lowaser)
```
$\displaystyle - \frac{60837710436840623 \cdot 2^{\frac{2}{3}} \eta^{97}}{67390312367240773632} - \frac{158136806508304345 \sqrt[3]{2} \eta^{95}}{134780624734481547264} + \frac{696995045921251 \cdot 2^{\frac{2}{3}} \eta^{91}}{701982420492091392} + \frac{3630889060034345 \sqrt[3]{2} \eta^{89}}{2807929681968365568} - \frac{85726663402079 \cdot 2^{\frac{2}{3}} \eta^{85}}{77998046721343488} - \frac{55953740067775 \sqrt[3]{2} \eta^{83}}{38999023360671744} + \frac{7966571683861 \cdot 2^{\frac{2}{3}} \eta^{79}}{6499837226778624} + \frac{41710969868705 \sqrt[3]{2} \eta^{77}}{25999348907114496} - \frac{373343301695 \cdot 2^{\frac{2}{3}} \eta^{73}}{270826551115776} - \frac{3921815591005 \sqrt[3]{2} \eta^{71}}{2166612408926208} + \frac{5890704547 \cdot 2^{\frac{2}{3}} \eta^{67}}{3761479876608} + \frac{124220403335 \sqrt[3]{2} \eta^{65}}{60183678025728} - \frac{18058389349 \cdot 2^{\frac{2}{3}} \eta^{61}}{10030613004288} - \frac{747088265 \sqrt[3]{2} \eta^{59}}{313456656384} + \frac{1754890501 \cdot 2^{\frac{2}{3}} \eta^{55}}{835884417024} + \frac{291988675 \sqrt[3]{2} \eta^{53}}{104485552128} - \frac{19284511 \cdot 2^{\frac{2}{3}} \eta^{49}}{7739670528} - \frac{3230513 \sqrt[3]{2} \eta^{47}}{967458816} + \frac{487475 \cdot 2^{\frac{2}{3}} \eta^{43}}{161243136} + \frac{164749 \sqrt[3]{2} \eta^{41}}{40310784} - \frac{202895 \cdot 2^{\frac{2}{3}} \eta^{37}}{53747712} - \frac{8671 \sqrt[3]{2} \eta^{35}}{1679616} + \frac{7315 \cdot 2^{\frac{2}{3}} \eta^{31}}{1492992} + \frac{5083 \sqrt[3]{2} \eta^{29}}{746496} - \frac{209 \cdot 2^{\frac{2}{3}} \eta^{25}}{31104} - \frac{595 \sqrt[3]{2} \eta^{23}}{62208} + \frac{13 \cdot 2^{\frac{2}{3}} \eta^{19}}{1296} + \frac{77 \sqrt[3]{2} \eta^{17}}{5184} - \frac{5 \cdot 2^{\frac{2}{3}} \eta^{13}}{288} - \frac{\sqrt[3]{2} \eta^{11}}{36} + \frac{2^{\frac{2}{3}} \eta^{7}}{24} + \frac{\sqrt[3]{2} \eta^{5}}{12} - \frac{2^{\frac{2}{3}} \eta}{2} + \frac{\sqrt[3]{2}}{\eta}$
```python
for i in range(-1,Nlow,2):
coeff=lowaser.coeff(eta,i)
ixtype=numpy.floor(i/4)-i/4+1/4
if ixtype<0:
coeff=coeff/sympy.Rational(2,1)**sympy.Rational(1,3)
else:
coeff=coeff/sympy.Rational(2,1)**sympy.Rational(2,3)
num=sympy.numer(coeff)
den=sympy.denom(coeff)
print('fp_t numer='+str(num)+';')
print('fp_t denom='+str(den)+';')
if ixtype<0:
print('fp_t term=to13*numer/denom*pow(eta,'+str(i)+');')
else:
print('fp_t term=to23*numer/denom*pow(eta,'+str(i)+');')
```
fp_t numer=1;
fp_t denom=1;
fp_t term=to13*numer/denom*pow(eta,-1);
fp_t numer=-1;
fp_t denom=2;
fp_t term=to23*numer/denom*pow(eta,1);
fp_t numer=0;
fp_t denom=1;
fp_t term=to13*numer/denom*pow(eta,3);
fp_t numer=2**(2/3);
fp_t denom=24;
fp_t term=to23*numer/denom*pow(eta,5);
fp_t numer=2**(1/3);
fp_t denom=24;
fp_t term=to13*numer/denom*pow(eta,7);
fp_t numer=0;
fp_t denom=1;
fp_t term=to23*numer/denom*pow(eta,9);
fp_t numer=-1;
fp_t denom=36;
fp_t term=to13*numer/denom*pow(eta,11);
fp_t numer=-5;
fp_t denom=288;
fp_t term=to23*numer/denom*pow(eta,13);
fp_t numer=0;
fp_t denom=1;
fp_t term=to13*numer/denom*pow(eta,15);
fp_t numer=77*2**(2/3);
fp_t denom=10368;
fp_t term=to23*numer/denom*pow(eta,17);
fp_t numer=13*2**(1/3);
fp_t denom=1296;
fp_t term=to13*numer/denom*pow(eta,19);
fp_t numer=0;
fp_t denom=1;
fp_t term=to23*numer/denom*pow(eta,21);
fp_t numer=-595;
fp_t denom=62208;
fp_t term=to13*numer/denom*pow(eta,23);
fp_t numer=-209;
fp_t denom=31104;
fp_t term=to23*numer/denom*pow(eta,25);
fp_t numer=0;
fp_t denom=1;
fp_t term=to13*numer/denom*pow(eta,27);
fp_t numer=5083*2**(2/3);
fp_t denom=1492992;
fp_t term=to23*numer/denom*pow(eta,29);
fp_t numer=7315*2**(1/3);
fp_t denom=1492992;
fp_t term=to13*numer/denom*pow(eta,31);
fp_t numer=0;
fp_t denom=1;
fp_t term=to23*numer/denom*pow(eta,33);
fp_t numer=-8671;
fp_t denom=1679616;
fp_t term=to13*numer/denom*pow(eta,35);
fp_t numer=-202895;
fp_t denom=53747712;
fp_t term=to23*numer/denom*pow(eta,37);
fp_t numer=0;
fp_t denom=1;
fp_t term=to13*numer/denom*pow(eta,39);
fp_t numer=164749*2**(2/3);
fp_t denom=80621568;
fp_t term=to23*numer/denom*pow(eta,41);
fp_t numer=487475*2**(1/3);
fp_t denom=161243136;
fp_t term=to13*numer/denom*pow(eta,43);
fp_t numer=0;
fp_t denom=1;
fp_t term=to23*numer/denom*pow(eta,45);
fp_t numer=-3230513;
fp_t denom=967458816;
fp_t term=to13*numer/denom*pow(eta,47);
fp_t numer=-19284511;
fp_t denom=7739670528;
fp_t term=to23*numer/denom*pow(eta,49);
fp_t numer=0;
fp_t denom=1;
fp_t term=to13*numer/denom*pow(eta,51);
fp_t numer=291988675*2**(2/3);
fp_t denom=208971104256;
fp_t term=to23*numer/denom*pow(eta,53);
fp_t numer=1754890501*2**(1/3);
fp_t denom=835884417024;
fp_t term=to13*numer/denom*pow(eta,55);
fp_t numer=0;
fp_t denom=1;
fp_t term=to23*numer/denom*pow(eta,57);
fp_t numer=-747088265;
fp_t denom=313456656384;
fp_t term=to13*numer/denom*pow(eta,59);
fp_t numer=-18058389349;
fp_t denom=10030613004288;
fp_t term=to23*numer/denom*pow(eta,61);
fp_t numer=0;
fp_t denom=1;
fp_t term=to13*numer/denom*pow(eta,63);
fp_t numer=124220403335*2**(2/3);
fp_t denom=120367356051456;
fp_t term=to23*numer/denom*pow(eta,65);
fp_t numer=5890704547*2**(1/3);
fp_t denom=3761479876608;
fp_t term=to13*numer/denom*pow(eta,67);
fp_t numer=0;
fp_t denom=1;
fp_t term=to23*numer/denom*pow(eta,69);
fp_t numer=-3921815591005;
fp_t denom=2166612408926208;
fp_t term=to13*numer/denom*pow(eta,71);
fp_t numer=-373343301695;
fp_t denom=270826551115776;
fp_t term=to23*numer/denom*pow(eta,73);
fp_t numer=0;
fp_t denom=1;
fp_t term=to13*numer/denom*pow(eta,75);
fp_t numer=41710969868705*2**(2/3);
fp_t denom=51998697814228992;
fp_t term=to23*numer/denom*pow(eta,77);
fp_t numer=7966571683861*2**(1/3);
fp_t denom=6499837226778624;
fp_t term=to13*numer/denom*pow(eta,79);
fp_t numer=0;
fp_t denom=1;
fp_t term=to23*numer/denom*pow(eta,81);
fp_t numer=-55953740067775;
fp_t denom=38999023360671744;
fp_t term=to13*numer/denom*pow(eta,83);
fp_t numer=-85726663402079;
fp_t denom=77998046721343488;
fp_t term=to23*numer/denom*pow(eta,85);
fp_t numer=0;
fp_t denom=1;
fp_t term=to13*numer/denom*pow(eta,87);
fp_t numer=3630889060034345*2**(2/3);
fp_t denom=5615859363936731136;
fp_t term=to23*numer/denom*pow(eta,89);
fp_t numer=696995045921251*2**(1/3);
fp_t denom=701982420492091392;
fp_t term=to13*numer/denom*pow(eta,91);
fp_t numer=0;
fp_t denom=1;
fp_t term=to23*numer/denom*pow(eta,93);
fp_t numer=-158136806508304345;
fp_t denom=134780624734481547264;
fp_t term=to13*numer/denom*pow(eta,95);
fp_t numer=-60837710436840623;
fp_t denom=67390312367240773632;
fp_t term=to23*numer/denom*pow(eta,97);
```python
f2=f.subs(alpha,1/beta**2)
display(f2)
```
$\displaystyle - \frac{\sqrt[3]{\sqrt{1 + \frac{1}{\beta^{2}}} - 1}}{\sqrt[6]{\frac{1}{\beta^{2}}}} + \frac{\sqrt[6]{\frac{1}{\beta^{2}}}}{\sqrt[3]{\sqrt{1 + \frac{1}{\beta^{2}}} - 1}}$
```python
Nhigh=39
```
```python
highaser=sympy.series(f2,beta,0,Nhigh).removeO()
display(highaser)
```
$\displaystyle \frac{120013116328880593960960 \beta^{37}}{58149737003040059690390169} - \frac{1611206569001662545920 \beta^{35}}{717897987691852588770249} + \frac{195646511950201880576 \beta^{33}}{79766443076872509863361} - \frac{7963410292145127424 \beta^{31}}{2954312706550833698643} + \frac{326196774651822080 \beta^{29}}{109418989131512359209} - \frac{40376795886780416 \beta^{27}}{12157665459056928801} + \frac{186663707869184 \beta^{25}}{50031545098999707} - \frac{7843012935680 \beta^{23}}{1853020188851841} + \frac{1000142274560 \beta^{21}}{205891132094649} - \frac{43109580800 \beta^{19}}{7625597484987} + \frac{630063104 \beta^{17}}{94143178827} - \frac{84672512 \beta^{15}}{10460353203} + \frac{3899392 \beta^{13}}{387420489} - \frac{186368 \beta^{11}}{14348907} + \frac{28160 \beta^{9}}{1594323} - \frac{512 \beta^{7}}{19683} + \frac{32 \beta^{5}}{729} - \frac{8 \beta^{3}}{81} + \frac{2 \beta}{3}$
```python
for i in range(1,Nhigh,2):
print('fp_t numer='+str(sympy.numer(highaser.coeff(beta,i)))+';')
print('fp_t denom='+str(sympy.denom(highaser.coeff(beta,i)))+';')
print('fp_t term=numer/denom*pow(eta,'+str(i)+');')
```
fp_t numer=2;
fp_t denom=3;
fp_t term=numer/denom*pow(eta,1);
fp_t numer=-8;
fp_t denom=81;
fp_t term=numer/denom*pow(eta,3);
fp_t numer=32;
fp_t denom=729;
fp_t term=numer/denom*pow(eta,5);
fp_t numer=-512;
fp_t denom=19683;
fp_t term=numer/denom*pow(eta,7);
fp_t numer=28160;
fp_t denom=1594323;
fp_t term=numer/denom*pow(eta,9);
fp_t numer=-186368;
fp_t denom=14348907;
fp_t term=numer/denom*pow(eta,11);
fp_t numer=3899392;
fp_t denom=387420489;
fp_t term=numer/denom*pow(eta,13);
fp_t numer=-84672512;
fp_t denom=10460353203;
fp_t term=numer/denom*pow(eta,15);
fp_t numer=630063104;
fp_t denom=94143178827;
fp_t term=numer/denom*pow(eta,17);
fp_t numer=-43109580800;
fp_t denom=7625597484987;
fp_t term=numer/denom*pow(eta,19);
fp_t numer=1000142274560;
fp_t denom=205891132094649;
fp_t term=numer/denom*pow(eta,21);
fp_t numer=-7843012935680;
fp_t denom=1853020188851841;
fp_t term=numer/denom*pow(eta,23);
fp_t numer=186663707869184;
fp_t denom=50031545098999707;
fp_t term=numer/denom*pow(eta,25);
fp_t numer=-40376795886780416;
fp_t denom=12157665459056928801;
fp_t term=numer/denom*pow(eta,27);
fp_t numer=326196774651822080;
fp_t denom=109418989131512359209;
fp_t term=numer/denom*pow(eta,29);
fp_t numer=-7963410292145127424;
fp_t denom=2954312706550833698643;
fp_t term=numer/denom*pow(eta,31);
fp_t numer=195646511950201880576;
fp_t denom=79766443076872509863361;
fp_t term=numer/denom*pow(eta,33);
fp_t numer=-1611206569001662545920;
fp_t denom=717897987691852588770249;
fp_t term=numer/denom*pow(eta,35);
fp_t numer=120013116328880593960960;
fp_t denom=58149737003040059690390169;
fp_t term=numer/denom*pow(eta,37);
```python
```
|
awsteinerREPO_NAMEo2sclPATH_START.@o2scl_extracted@o2scl-main@doc@o2scl@python@massless_ferm.ipynb@.PATH_END.py
|
{
"filename": "test_auto.py",
"repo_name": "AFD-Illinois/ebhlight",
"repo_path": "ebhlight_extracted/ebhlight-master/test/test_auto.py",
"type": "Python"
}
|
################################################################################
# #
# RUN ALL TESTS AND CHECK FOR ACCURACY #
# #
################################################################################
from __future__ import print_function,division
import os
import sys; sys.dont_write_bytecode = True
sys.path.insert(0, '../script/')
import util
import subprocess as sp
import numpy as np
import glob
import pickle
from scipy.interpolate import interp1d as interp
import time
# DON'T RUN IF COMPUTER IS BEING USED FOR SOMETHING ELSE
import psutil
if psutil.cpu_percent() > 25.:
sys.exit()
# EMAIL OPTIONS
TO = []
FROM = None
SUBJECT = 'BHLIGHT TESTING REPORT'
LOGNAME = 'test_auto.txt'
# SSH OPTIONS
hostname = None
port = None
username = None
ERROR_THRESHOLD = 0.01
#os.chdir(os.path.dirname(os.path.abspath(__file__)))
#print(os.path.abspath(__file__))
# Printing
logfile = open(LOGNAME, "w")
def print2(str):
print(str)
logfile.write(str + '\n')
TESTS = ['bhtherm.py', 'bondi.py', 'brem.py', 'brem_mpi.py', 'kshocks.py',
'mhdmodes1d.py', 'sod.py', 'sod_mpi.py', 'thermalization.py',
'thermalization_mpi.py']
SEND_REPORT = '-email' in sys.argv
FAST = '-fast' in sys.argv
VERBOSE = '-verbose' in sys.argv
print("")
print("********************************************************************************")
print("")
print(" AUTOMATED TESTING")
print("")
print("********************************************************************************")
DATE = time.strftime('%Y/%m/%d')
TIME = time.strftime('%H:%M:%S')
MACHINE = os.uname()[1]
HASH = 'None'
popen = sp.Popen(['git', 'show', '-s', '--format=%H'], stdout=sp.PIPE,
universal_newlines=True)
for line in iter(popen.stdout.readline, ""):
HASH = line.lstrip().rstrip()
popen = sp.Popen(['git', 'branch'], stdout=sp.PIPE, universal_newlines=True)
BRANCH = 'None'
for line in iter(popen.stdout.readline, ""):
if line[0] == '*':
BRANCH = line[2:].rstrip()
print2('\n DATE: ' + DATE)
print2(' TIME: ' + TIME)
print2(' MACHINE: ' + MACHINE)
print2(' BRANCH: ' + BRANCH)
print2(' COMMIT: ' + HASH + '\n')
def name_to_args(namestring):
"""Takes a script name which may contain CLI args
and splits out the args.
Assumes string is generically of the form
'<script name>.py -arg --arg'
"""
namestring = namestring.rstrip().lstrip()
if namestring[-3:] == '.py':
return [namestring]
args = namestring.split('.py ')
args = ([args[0].lstrip().rstrip() + '.py']
+ [s.lstrip().rstrip() for s in args[1].split()])
return args
# USE INTERPOLATION ON A (ANALYTIC SOLUTION) TO COMPARE TO B
def sanitize_array(a):
a = np.array(a) # ensure a is a numpy array
if len(a.shape) == 1:
return a
if np.prod(a.shape[1:]) > 1:
return a
#raise ValueError(
# "Array should be 1d. Array shape = {}".format(a.shape)
#)
return a.reshape(a.shape[0])
def L1_norm_1d(xa, ya, xb, yb):
#xa,ya,xb,yb = [sanitize_array(a) for a in [xa,ya,xb,yb]]
if xa[0] > xb[0]:
xb = xb[1:]
yb = yb[1:]
fa = interp(xa, ya)
norm = 0.
nodenom = np.max(ya) <= 1e-12
for n in range(len(xb)):
num = np.fabs(yb[n] - fa(xb[n]))
denom = np.fabs((yb[n] + fa(xb[n]))/2.)
if nodenom:
norm += num
else:
norm += num/denom
return norm
def L1_norm(xa, ya, xb, yb):
if xa is None:
if hasattr(ya, 'shape'):
L1s = np.zeros(len(ya))
for n in range(len(L1s)):
L1s[n] = 2*np.fabs(yb[n] - ya[n])/np.fabs(ya[n] + yb[n])
return L1s.max()
xa,ya,xb,yb = [sanitize_array(a) for a in [xa,ya,xb,yb]]
# special case for 0d arrays
if len(xa) == len(xb) == len(ya) == len(yb) == 1:
if np.abs(yb[0]) <= 1e-12:
return np.fabs(ya[0] - yb[0])
return np.fabs((ya[0] - yb[0])/yb[0])
# special case for 2d arrays, return max L1
if xa.ndim > 1 or xb.ndim > 1 or ya.ndim > 1 or yb.ndim > 1:
L1s = np.zeros(len(xa))
for n in range(len(L1s)):
L1s[n] = L1_norm_1d(xa[n], ya[n], xb[n], yb[n])/len(xb[n])
if (np.isnan(L1s[n])):
print(len(xb[n]))
print(xa[n])
print(ya[n])
print(xb[n])
print(yb[n])
sys.exit()
return L1s.max()
return L1_norm_1d(xa, ya, xb, yb)/len(xb)
# PUT /len(xb) INTO L1_NORM_1D AND FIX THE DIVIDE BY ZERO PROBLEM IN SOME SHOCK SOLNS!!!
# #xa,ya,xb,yb = [sanitize_array(a) for a in [xa,ya,xb,yb]]
# if xa[0] > xb[0]:
# xb = xb[1:]
# yb = yb[1:]
# fa = interp(xa, ya)
# print('shapes...')
# print(xa.shape)
# print(ya.shape)
# norm = 0.
# nodenom = np.max(ya) <= 1e-12
# print(len(xb))
# for n in range(len(xb)):
# num = np.fabs(yb[n] - fa(xb[n]))
# print('%e ' % yb[n] + '%e ' % fa(xb[n]) + '%e' % num)
# denom = np.fabs((yb[n] + fa(xb[n]))/2.)
# if nodenom:
# norm += num
# else:
# norm += num/denom
# print('norm = %e' % norm)
# return (norm/len(xb))
FAIL = False
for TEST in TESTS:
print2(' ' + TEST[:-3])
args = [sys.executable, TEST, '-auto']
if FAST:
args += ['-fast']
popen = sp.Popen(args,
stdout=sp.PIPE,
stderr=sp.PIPE,
universal_newlines=True)
for line in iter(popen.stdout.readline, ""):
if VERBOSE:
print2(line.rstrip())
if line.lstrip().rstrip() == 'BUILD SUCCESSFUL':
print2(' BUILD SUCCESSFUL')
print2(' RUN FINISHED')
popen.wait()
if not os.path.isfile('data.p'):
raise RuntimeError("Test did not succesfully complete.")
with open('data.p', 'rb') as f:
data = pickle.load(f)
if 'SOLX' in data.keys():
xa = data['SOLX']
ya = data['SOLY']
xb = data['CODEX']
yb = data['CODEY']
elif 'SOL' in data.keys():
xa = data['SOL'][0]
ya = data['SOL'][1]
xb = data['CODE'][0]
yb = data['CODE'][1]
elif 'SOL_SCALAR' in data.keys():
xa = None
xb = None
ya = data['SOL_SCALAR']
yb = data['CODE_SCALAR']
if 'THRESHOLD' in data.keys():
error_threshold = data['THRESHOLD']
else:
error_threshold = ERROR_THRESHOLD
norm = L1_norm(xa, ya, xb, yb)
print(' ERROR: %.2g %%' % (100*norm))
if norm < error_threshold:
print(' PASS\n')
else:
print2(' FAIL' + '\n')
SUBJECT += ' - FAIL'
FAIL = True
sp.call(['rm', 'data.p'])
logfile.close()
if SEND_REPORT:
import paramiko
scp = paramiko.Transport((hostname, 22))
client = paramiko.SSHClient()
#client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
k = paramiko.RSAKey.from_private_key_file('/path/to/private/ssh/key')
client.connect(hostname, port=port, username=username, pkey=k)
def ssh_cmd(str):
stdin, stdout, stderr = client.exec_command(str)
report = 'email_report.sh'
ssh_cmd('rm ' + report)
ssh_cmd('echo "#!/bin/bash" >> ' + report)
ssh_cmd('echo -ne "postfix-3.3.1/bin/sendmail " >> ' + report)
for address in TO:
ssh_cmd('echo -ne "' + address + ' " >> ' + report)
ssh_cmd('echo "<<EOF" >> ' + report)
ssh_cmd('echo "subject: ' + SUBJECT + '" >> ' + report)
ssh_cmd('echo "from: ' + FROM + '" >> ' + report)
with open('test_auto.txt') as lfil:
for line in lfil:
ssh_cmd('echo "' + line.rstrip() + '" >> ' + report)
ssh_cmd('echo "EOF" >> ' + report)
ssh_cmd('chmod +x ' + report)
ssh_cmd('./' + report)
client.close()
|
AFD-IllinoisREPO_NAMEebhlightPATH_START.@ebhlight_extracted@ebhlight-master@test@test_auto.py@.PATH_END.py
|
{
"filename": "xpobsdisplay.py",
"repo_name": "lucabaldini/ixpeobssim",
"repo_path": "ixpeobssim_extracted/ixpeobssim-main/ixpeobssim/bin/xpobsdisplay.py",
"type": "Python"
}
|
#!/usr/bin/env python
#
# Copyright (C) 2022, the ixpeobssim team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""xpobsdisplay app.
"""
from __future__ import print_function, division
import os
from astropy.io import fits
from astropy.visualization.wcsaxes import WCSAxes
import matplotlib
import numpy
from ixpeobssim import IXPEOBSSIM_DATA
from ixpeobssim.binning.base import xEventBinningBase
from ixpeobssim.core.hist import xHistogram1d
from ixpeobssim.irf import load_arf, load_modf
from ixpeobssim.evt.clustering import DBscan
from ixpeobssim.evt.display import xL1EventFile, xXpolGrid, xDisplayArgumentParser,\
load_event_list, display_event, xDisplayCard
from ixpeobssim.evt.event import xEventFile
from ixpeobssim.evt.kislat2015 import xStokesAnalysis
from ixpeobssim.utils.logging_ import logger
from ixpeobssim.utils.matplotlib_ import plt, setup_gca, setup_gca_stokes,\
plot_ellipse, DEFAULT_COLORS
__description__ = \
"""IXPE composite carousel to display observations.
This application extends xpevtdisplay to include the most relevant cumulative
distributions for a given observation.
"""
PARSER = xDisplayArgumentParser(__description__)
PARSER.add_irfname()
PARSER.add_argument('--pdmax', type=float, default=0.2,
help='maximum polarization degree for the Stokes plot')
PARSER.add_argument('--pdstep', type=float, default=0.05,
help='polarization degree step for the Stokes plot grid')
PARSER.add_argument('--xref', type=float, default=None,
help='the horizontal position of the image center')
PARSER.add_argument('--yref', type=float, default=None,
help='the vertical position of the image center')
PARSER.add_argument('--npix', type=int, default=200,
help='number of pixels per side for the count map in sky coordinates')
PARSER.add_argument('--pixsize', type=float, default=None,
help='pixel size in arcseconds for the count map in sky coordinates')
PARSER.add_argument('--subtitle', type=str, default=None,
help='subtitle for the animation')
def composite_figure(wcs, obs_name=None, figsize=(18., 9.), left_pad=0.06,
right_pad=0.01, bot_pad=0.1, title_height=0.09):
"""Create a composite figure hosting all the graphical elements for the observation
display.
I have been struggling with this for many hours, trying with subplots and
gridspecs, and finally came to the realization that the only sensible way to
achieve a reasonable positioning for the various pads was to use
matplotlib.figure.add_axes() and calculate all the coordinates by hand.
"""
width, height = figsize
aspect_ratio = width / height
display_width = height / width
plot_width = 0.5 * (1. - display_width) - left_pad - right_pad
display_height = 1. - title_height
plot_height = 0.5 * display_height
# Create the top-level figure.
fig = plt.figure('Observation display (%s)' % obs_name, figsize=figsize)
# The axis for the title
ax_title = fig.add_axes((0., display_height, display_width, title_height))
plt.axis('off')
# The axes for the event display---this is guaranteed to be square, and
# placed right on the left of the figure, spanning its entire height.
ax_display = fig.add_axes((0., 0., display_width, display_height))
# Now the count map and the polarization axes on the top left of the figure.
# Note these have a square aspect ratio, and will naturally place themselves
# in the right place vertically.
rect = (display_width + left_pad, 0.5, plot_width, 0.5)
ax_cmap = fig.add_axes(rect, projection=wcs)
ax_cmap.set_aspect('equal')
# This additional, ghost axes object is to hold the color bar count map,
# so that we get it displayed without the count map axes being resized---this
# a horrible hack, but I could not figure out a better way to do it.
rect = (display_width + left_pad, 0.525, plot_width, 0.5)
ax_cmap_colorbar = fig.add_axes(rect)
plt.axis('off')
rect = (display_width + plot_width + 2. * left_pad, 0.5, plot_width, 0.5)
ax_polarization = fig.add_axes(rect)
#setup_gca_stokes()
# The spectrum axes on the bottom-right corner.
rect = (display_width + plot_width + 2. * left_pad, bot_pad, plot_width, 0.475 - bot_pad)
ax_spectrum = fig.add_axes(rect)
rect = (display_width, 0., plot_width * width / height, 0.5)
ax_text = fig.add_axes(rect)
plt.axis('off')
ax_text.set_aspect('equal')
return fig, ax_title, ax_display, ax_cmap, ax_cmap_colorbar, ax_polarization, ax_spectrum, ax_text
def polarization_analysis(q, u, energy, modf, aeff, mask):
"""
"""
analysis = xStokesAnalysis(q, u, energy, modf, aeff, None)
I, Q, U = analysis._sum_stokes_parameters(mask)
mu = analysis._effective_mu(mask)
W2 = analysis.W2(mask)
QN, UN, dI, dQ, dU, dQN, dUN, cov, pval, conf, sig = analysis.calculate_stokes_errors(I, Q, U, mu, W2)
#pd, pd_err, pa, pa_err = analysis.calculate_polarization(I, Q, U, mu, W2)
return QN, UN, dQN, dUN, sig
def met_span(event_file):
"""Return the times for the first and the last event in a given (Level-1 or Level-2)
file.
"""
met = event_file.hdu_list['EVENTS'].data['TIME']
first_met, last_met = met[0], met[-1]
span = (last_met - first_met) / 1000.
logger.info('Wall-clock file span: %.6f--%.6f s (%.3f ks)', first_met, last_met, span)
return first_met, last_met
def xpobsdisplay(**kwargs):
"""Run the observation event display.
"""
# We do need an event list, here...
if not kwargs.get('evtlist'):
raise RuntimeError('Please provide an event list...')
# Set the random seed, if necessary.
random_seed = kwargs.get('seed')
if random_seed is not None:
logger.info('Setting random seed to %d...', random_seed)
numpy.random.seed(random_seed)
# Cache the global settings...
file_path = kwargs.get('file')
emin, emax = kwargs.get('emin'), kwargs.get('emax')
npix = kwargs.get('npix')
pixsize = kwargs.get('pixsize')
pdmax = kwargs.get('pdmax')
pdstep = kwargs.get('pdstep')
pd_grid = numpy.arange(pdstep, pdmax, pdstep)
sig_color = 'black'
sig_arrowprops=dict(arrowstyle='->', connectionstyle='angle3', color=sig_color)
sig_kwargs = dict(xycoords='data', textcoords='axes fraction',
arrowprops=sig_arrowprops, backgroundcolor='white', color=sig_color, ha='center')
base_file_name = os.path.basename(file_path).replace('.fits', '')
grid = xXpolGrid(cmap_name=kwargs.get('cmap'), cmap_offset=kwargs.get('cmapoffset'))
# Open the Level-1 file and retrieve the necessary information.
l1_file = xL1EventFile(file_path)
l1_first_met, l1_last_met = met_span(l1_file)
threshold = l1_file.zero_sup_threshold()
logger.info('Zero suppression threshold: %d', threshold)
# Setup the DBscan
dbscan = DBscan(threshold, min_density_points=kwargs.get('clumindensity'),
min_cluster_size=kwargs.get('cluminsize'))
# Open the Level-2 file and retrieve the necessary info,
l2_file = xEventFile(kwargs.get('evtlist'))
l2_first_met, l2_last_met = met_span(l2_file)
xref, yref = l2_file.wcs_reference()
if kwargs.get('xref') is not None:
xref = kwargs.get('xref')
if kwargs.get('yref') is not None:
yref = kwargs.get('yref')
wcs_kwargs = dict(xref=xref, yref=yref, npix=npix, pixsize=pixsize)
wcs_ = xEventBinningBase._build_image_wcs(default_img_side=10., **wcs_kwargs)
time_data = l2_file.time_data()
energy_data = l2_file.energy_data()
ra_data, dec_data = l2_file.sky_position_data()
q_data, u_data = l2_file.stokes_data()
energy_mask = numpy.logical_and(energy_data >= emin, energy_data < emax)
total_events = energy_mask.sum()
# Setup all the binned data products.
card = xDisplayCard(kwargs.get('targetname'), l2_file.hdu_list['EVENTS'].header)
energy_binning = numpy.arange(0., 12.02, 0.04)
hist_spec = xHistogram1d(energy_binning, xlabel='Energy [keV]', ylabel='Counts')
cmap_data = numpy.zeros((npix, npix), dtype=float)
aeff = load_arf(kwargs.get('irfname'), l2_file.du_id())
modf = load_modf(kwargs.get('irfname'), l2_file.du_id())
# Load the event data from the event list.
event_list = load_event_list(kwargs.get('evtlist'), **kwargs)
previous_met = time_data[0]
# Start the loop over the event list.
for i, (met, energy, ra, dec, q, u) in enumerate(zip(*event_list)):
# Retrieve the actual event from the underlying level-1 file.
event = l1_file.bisect_met(met)
assert abs(event.timestamp - met) <= 2.e-6
# Create the mask for the current chunck of data.
logger.info('Filtering events between MET %.6f and %.6f...', previous_met, met)
mask = numpy.logical_and(time_data >= previous_met, time_data < met)
logger.info('Done, %d event(s) left before the energy cut.', mask.sum())
# Update all the binned products: the energy spcetrum...
hist_spec.fill(energy_data[mask])
# ... the count map...
mask *= energy_mask
x, y, binning = xEventBinningBase._pixelize_skycoords(ra_data[mask], dec_data[mask], wcs_)
counts, _, _ = numpy.histogram2d(x, y, bins=binning)
cmap_data += counts
# ... and the polarization analysis. Note that, instead of keeping track
# of all the necessary stuff to accumulate the polarization analysis in
# cuncks, we repeat the entire calculation feeding in all the events up to
# the current met---this is slightly suboptimal, but simpler and less
# error-prone.
mask = time_data < met
mask *= energy_mask
qn, un, dqn, dun, sig = polarization_analysis(q_data, u_data, energy_data, modf, aeff, mask)
# Since we are at it, we take advantage of the fact that the polarization
# analysis is re-done from the beginning every time to cache the event
# statistics.
num_events = mask.sum()
elapsed_time = met - time_data[0]
# Create the composite panel---I can't seem to be able to understand why
# the event display is not refreshed if I don't create and delete the
# damned thing within the event loop and destroy it at each event.
# I am sure this is pointing at something fundamentally wrong in the
# code and I should look at it in details...
fig, ax_title, ax_display, ax_cmap, ax_cmap_colorbar, ax_polarization,\
ax_spectrum, ax_text = composite_figure(wcs_)
# Set the title.
plt.sca(ax_title)
title = 'Replay of a sample of events obtained by one of IXPE\'s three detectors'
subtitle = kwargs.get('subtitle')
plt.text(0.05, 0.7, title, size='x-large', va='center', ha='left')
if subtitle is not None:
plt.text(0.05, 0.3, '(%s)' % subtitle, size='large', va='center', ha='left')
# Update the count map.
plt.sca(ax_cmap)
im = ax_cmap.imshow(cmap_data)
ax_cmap.set_xlabel('Right Ascension')
ax_cmap.set_ylabel('Declination')
plt.grid()
plt.colorbar(im, ax=ax_cmap_colorbar, location='top')
# Update the polarization plot.
plt.sca(ax_polarization)
color = DEFAULT_COLORS[0]
plt.plot(qn, un, 'o', color=color)
for sigma in (1., 2., 3):
plot_ellipse((qn, un), 2. * sigma * dqn, 2. * sigma * dun, zorder=10, color=color)
delta = 0.5 * (dqn + dun) * sigma * numpy.sqrt(0.5)
x0 = qn - delta * numpy.sign(qn)
y0 = un - delta * numpy.sign(un)
plt.text(x0, y0, '%.d$\\sigma$' % sigma, color=color, backgroundcolor='white',
ha='center', va='center', zorder=11, clip_on=True,
bbox=dict(boxstyle='square,pad=0.', fc='white', ec='none'))
if sig > 3.:
text = 'Polarization significance: %.2f $\\sigma$' % sig
plt.gca().annotate(text, xy=(qn, un), xytext=(0.5, 1.1), **sig_kwargs)
setup_gca_stokes(side=pdmax, pd_grid=pd_grid)
# Update the spectrum.
plt.sca(ax_spectrum)
hist_spec.plot()
setup_gca(logy=True, grids=True, xticks=numpy.arange(0., 12.1, 2.))
for x in emin, emax:
plt.axvline(x, color='gray')
plt.axvspan(0., emin, alpha=0.25, color='gray')
plt.axvspan(emax, 12., alpha=0.25, color='gray')
# Update the text card.
plt.sca(ax_text)
# Update the cumulative statistics.
card.update_cumulative_statistics(num_events, emin, emax)
# Update the event data.
card.set_event_data(met, energy, ra, dec, q, u)
card.draw(x0=0.02, y0=0.99, line_spacing=0.08)
# Draw the small progress bar.
frac = num_events / total_events
radius = 0.085
pos = (radius + 0.025, 0.575)
plt.gca().pie([1. - frac, frac], wedgeprops={'width': 0.025}, startangle=90,
colors=['lightgray', color], center=pos, radius=radius)
plt.text(*pos, '%.1f%%' % (100. * frac), size='small', ha='center',
va='center', color='black')
# I am not sure why, but we do have to reset the canvas size to get the display right.
plt.gca().set_xlim(0., 1.)
plt.gca().set_ylim(0., 1.)
# And, finally, the actual event display---since this is blocking,
# it needs to go last.
plt.sca(ax_display)
file_name = '%s_%04d.%s' % (base_file_name, i, kwargs.get('imgformat'))
display_event(event, grid, threshold, dbscan, file_name, **kwargs)
# See the initial remark about the need to destroy the figure.
plt.close(fig)
def main():
"""main() entry point.
"""
xpobsdisplay(**PARSER.parse_args().__dict__)
if __name__ == '__main__':
main()
|
lucabaldiniREPO_NAMEixpeobssimPATH_START.@ixpeobssim_extracted@ixpeobssim-main@ixpeobssim@bin@xpobsdisplay.py@.PATH_END.py
|
{
"filename": "_unselected.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/splom/_unselected.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class UnselectedValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="unselected", parent_name="splom", **kwargs):
super(UnselectedValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Unselected"),
data_docs=kwargs.pop(
"data_docs",
"""
marker
:class:`plotly.graph_objects.splom.unselected.M
arker` instance or dict with compatible
properties
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@splom@_unselected.py@.PATH_END.py
|
{
"filename": "FaultState.py",
"repo_name": "ACS-Community/ACS",
"repo_path": "ACS_extracted/ACS-master/LGPL/CommonSoftware/acsalarmpy/src/Acsalarmpy/FaultState.py",
"type": "Python"
}
|
#! /usr/bin/env python
#*******************************************************************************
# ALMA - Atacama Large Millimiter Array
# (c) National Research Council of Canada, 2008
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# "@(#) $Id: FaultState.py,v 1.2 2008/10/09 19:13:20 agrimstrup Exp $"
#
# who when what
# -------- -------- ----------------------------------------------
# agrimstrup 2008-09-30 created
#
class Properties(dict):
"""
Collection of name/value pairs that represent properties.
"""
def toXML(self, amountToIndent=6):
"""
Create an XML fragment representing the names and values contained
in this object.
Parameters: amountToIndent is the level of indentation for this fragment
Returns: an indented XML string
"""
if len(self) > 0:
pad = '\t'.expandtabs(amountToIndent+3)
endpad = '\t'.expandtabs(amountToIndent)
taglist = ['<user-properties>\n']
for k in self:
taglist.append('<property name="%s" value="%s"/>\n' % (k, self[k]))
rtn = pad.join(taglist)
return endpad.join([rtn, '</user-properties>\n'])
else:
return ''
# FaultState constants
ACTIVE_STRING = "ACTIVE"
TERMINATE_STRING = "TERMINATE"
CHANGE_STRING = "CHANGE"
INSTANT_STRING = "INSTANT_STRING";
ASI_PREFIX_PROPERTY_STRING = "ASI_PREFIX_PROPERTY";
ASI_SUFFIX_PROPERTY_STRING = "ASI_SUFFIX_PROPERTY";
class FaultState(object):
"""
Class representing a single fault to be sent from an alarm source to the LASER
alarm server.
"""
def __init__(self, family=None, member=None, code=None):
"""
Create a fault state object.
Parameters: family is the name of the alarm family
member is the name of the alarm family member
code is the error code being reported
Returns: initialized FaultState object
"""
if family is not None and member is not None and code is not None:
self.family = family
self.member = member
self.code = code
else:
self.family = None
self.member = None
self.code = None
self.userProperties = Properties()
self.userTimestamp = None
self.descriptor = None
self.activatedByBackup = None
self.terminatedByBackup = None
def toXML(self, amountToIndent=3):
"""
Create an XML fragment representing the fault's state.
Parameter: amountToIndent is the indentation level for this fragment
Returns: an indented XML string
Raises: exception if the family, member or code has not been set correctly
"""
taglist = []
pad = '\t'.expandtabs(amountToIndent)
if self.family is None or self.member is None or self.code is None:
raise TypeError, "Family, member and code information must be provided"
taglist.append('<fault-state family="%s" member="%s" code="%d">\n' % (self.family, self.member, self.code))
if self.descriptor is not None:
taglist.append('<descriptor>%s</descriptor>\n' % self.descriptor)
if len(self.userProperties) > 0:
taglist.append(self.userProperties.toXML(amountToIndent))
if self.userTimestamp is not None:
taglist.append(self.userTimestamp.toXML(amountToIndent=0))
rtn = pad.join(taglist)
return rtn + '</fault-state>\n'
#
# ___oOo___
|
ACS-CommunityREPO_NAMEACSPATH_START.@ACS_extracted@ACS-master@LGPL@CommonSoftware@acsalarmpy@src@Acsalarmpy@FaultState.py@.PATH_END.py
|
{
"filename": "modulation.py",
"repo_name": "jtdinsmore/leakagelib",
"repo_path": "leakagelib_extracted/leakagelib-main/src/modulation.py",
"type": "Python"
}
|
import numpy as np
from scipy.interpolate import interp1d
from .settings import LEAKAGE_DATA_DIRECTORY
def load_generic_modf_file(filename):
energies, modfs = np.load(filename)
return interp1d(energies, modfs, bounds_error=False, fill_value=1)
def get_mom_modf(energies):
'''Returns the modulation factor as a function of energy for moments data.
These data were generated using the IXPEobssim load_modf function using ixpe:obssim:v12 data, averaged over all three detectors. Credit to the ixpeobssim team for making the code available; we do not import the ixpeobssim function in order to avoid requiring ixpeobssim as a dependency to LeakageLib.'''
return load_generic_modf_file(f"{LEAKAGE_DATA_DIRECTORY}/modulation/mom.npy")(energies)
def get_nn_modf(energies):
'''Returns the modulation factor as a function of energy for neural nets data.
The data were generated by taking the average spectrum of weights across several distributions, using the fact that neural net weights are nearly optimal and optimal weights equal the modulation factor of the instrument.'''
print("WARNING: The NN modulation factor is experimental and is in active development by the authors. Please contact them (jtd@stanford.edu) for more information.")
return load_generic_modf_file(f"{LEAKAGE_DATA_DIRECTORY}/modulation/nn.npy")(energies)
|
jtdinsmoreREPO_NAMEleakagelibPATH_START.@leakagelib_extracted@leakagelib-main@src@modulation.py@.PATH_END.py
|
{
"filename": "run.py",
"repo_name": "aasensio/hazel",
"repo_path": "hazel_extracted/hazel-master/run/run.py",
"type": "Python"
}
|
#!/usr/bin/env python
from __future__ import print_function
from configobj import ConfigObj
import sys
import os
from subprocess import call
def lower_to_sep(string, separator='='):
line=string.partition(separator)
string=str(line[0]).lower()+str(line[1])+str(line[2])
return string
if (len(sys.argv) < 2):
print( "Example usage: runHazel conf.ini")
sys.exit()
print("Using configuration file = "+sys.argv[1])
# Transform all keys to lowercase to avoid problems with
# upper/lower case
f = open(sys.argv[1],'r')
input_lines = f.readlines()
f.close()
input_lower = ['']
for l in input_lines:
input_lower.append(lower_to_sep(l)) # Convert keys to lowercase
# Parse configuration file
config = ConfigObj(input_lower)
#*********************************
# Save config_inversion.dat file
#*********************************
f = open('config_inversion.dat','w')
f.write('***********************************************\n')
f.write('* Configuration file for the multi-term program\n')
f.write('***********************************************\n')
f.write('\n')
f.write("# Input model file\n")
f.write("'"+config['files']['input model file']+"'\n")
f.write('\n')
f.write('# Initial parameters file\n')
f.write("'init_parameters.dat'\n")
f.write('\n')
f.write('# Range of parameters for the DIRECT method\n')
f.write("'direct_range.dat'\n")
f.write('\n')
f.write("# Output file for the upper level rho^K_Q(J,J') in the reference frame of the vertical\n")
f.write("'ATOMIC_POL/vertical_upper.rho'\n")
f.write('\n')
f.write("# Output file for the lower level rho^K_Q(J,J') in the reference frame of the vertical\n")
f.write("'ATOMIC_POL/vertical_lower.rho'\n")
f.write('\n')
f.write("# Output file for the upper level rho^K_Q(J,J') in the reference frame of the magnetic field\n")
f.write("'ATOMIC_POL/magnetic_upper.rho'\n")
f.write('\n')
f.write("# Output file for the lower level rho^K_Q(J,J') in the reference frame of the magnetic field\n")
f.write("'ATOMIC_POL/magnetic_lower.rho'\n")
f.write('\n')
f.write("# Output absorption/emission coefficients\n")
f.write("'INVERTED/rtcoef.emer'\n")
f.write('\n')
f.write("# Output absorption/emission coefficients neglecting atomic polarization\n")
f.write("'INVERTED/rtcoef_noatompol.emer'\n")
f.write('\n')
f.write("# File with the observed profiles\n")
f.write("'"+config['files']['file with observations']+"'\n")
f.write('\n')
f.write("# File with the inverted profiles\n")
f.write("'"+config['files']['file with inverted profiles']+"'\n")
f.write('\n')
f.write("# File with the parameters from the inversion\n")
f.write("'"+config['files']['file with inverted parameters']+"'\n")
f.write('\n')
f.write("# File with the final errors\n")
f.write("'"+config['files']['file with errors in inverted parameters']+"'\n")
f.write('\n')
f.write("# File that sets the parameters to invert\n")
f.write("'invert_parameters.dat'\n")
f.write('\n')
f.write("# Verbose mode (0-> no, 1-> yes)\n")
if (config['working mode']['verbose'] == 'yes'):
f.write('1\n')
else:
f.write('0\n')
f.write('\n')
f.write("# Linear system solver (0-> LU, 1-> CG)\n")
if (config['working mode']['linear system solver'] == 'LU'):
f.write('0\n')
else:
f.write('1\n')
f.write('\n')
f.write("# Optically thin (0), slab no-MO (1), M-E (2), slab DELOPAR (3), simplified slab (4), exact slab (5)\n")
if (config['general parameters']['synthesis mode'] == 'exact'):
f.write('5\n')
elif (config['general parameters']['synthesis mode'] == 'thin'):
f.write('0\n')
else:
print("Synthesis mode not supported : {0}".format(config['general parameters']['synthesis mode']))
sys.exit()
f.write('\n')
f.write("# Synthesis mode -> 0 , Inversion mode -> 1\n")
if (config['working mode']['action'] == 'synthesis'):
f.write('0')
elif (config['working mode']['action'] == 'inversion'):
f.write('1')
else:
print("Action mode not supported : {0}".format(config['working mode']['action']))
sys.exit()
f.close()
#*********************************
# Save direct_range.dat file
#*********************************
f = open('direct_range.dat','w')
f.write("***********************************************\n")
f.write("* Ranges for the DIRECT method\n")
f.write("***********************************************\n")
f.write("\n")
f.write("# Maximum number of function evaluations (<0 -> don't use this criteria)\n")
f.write("-1\n")
f.write("\n")
f.write("# Reduction in the volume (<0 -> don't use this criteria, typically 0.01)\n")
f.write(config['working mode']['stopping volume for direct']+"\n")
f.write("\n")
f.write("# Magnetic field (0-Bmax)\n")
f.write(" ".join(config['ranges']['slab 1']['b'])+"\n")
f.write("\n")
f.write("# thetab (0 .. 180)\n")
f.write(" ".join(config['ranges']['slab 1']['thetab'])+"\n")
f.write("\n")
f.write("# chib (0 .. 180)\n")
f.write(" ".join(config['ranges']['slab 1']['chib'])+"\n")
f.write("\n")
f.write("# vdopp (0 .. 20)\n")
f.write(" ".join(config['ranges']['slab 1']['vdopp'])+"\n")
f.write("\n")
f.write("# dtau (0 .. 5)\n")
f.write(" ".join(config['ranges']['slab 1']['tau'])+"\n")
f.write("\n")
f.write("# delta_collision (0 .. 18)\n")
f.write("0.d0 18.d0\n")
f.write("\n")
f.write("# vmacro (-10 .. 10)\n")
f.write(" ".join(config['ranges']['slab 1']['vmac'])+"\n")
f.write("\n")
f.write("# damping (0 .. 4)\n")
f.write(" ".join(config['ranges']['a'])+"\n")
f.write("\n")
f.write("# beta (0 .. 10)\n")
f.write(" ".join(config['ranges']['slab 1']['beta'])+"\n")
f.write("\n")
f.write("# height (0 .. 100)\n")
f.write("0.d0 100.d0\n")
f.write("\n")
f.write("# dtau2 (0 .. 5)\n")
f.write(" ".join(config['ranges']['slab 2']['tau'])+"\n")
f.write("\n")
f.write("# vmacro2 (-10 .. 10)\n")
f.write(" ".join(config['ranges']['slab 2']['vmac'])+"\n")
f.write("\n")
f.write("# Magnetic field 2 (0-Bmax)\n")
f.write(" ".join(config['ranges']['slab 2']['b'])+"\n")
f.write("\n")
f.write("# thetab 2 (0 .. 180)\n")
f.write(" ".join(config['ranges']['slab 2']['thetab'])+"\n")
f.write("\n")
f.write("# chib 2 (0 .. 180)\n")
f.write(" ".join(config['ranges']['slab 2']['chib'])+"\n")
f.write("\n")
f.write("# vdopp 2 (0 .. 20)\n")
f.write(" ".join(config['ranges']['slab 2']['vdopp'])+"\n")
f.write("\n")
f.write("# ff\n")
f.write(" ".join(config['ranges']['ff'])+"\n")
f.write("\n")
f.write("# beta2 (0 .. 10)\n")
f.write(" ".join(config['ranges']['slab 2']['beta'])+"\n")
f.close()
#*********************************
# Save invert_parameters.dat file
#*********************************
method = {'DIRECT': '2', 'LM': '1'}
f = open('invert_parameters.dat','w')
f.write("***********************************************\n")
f.write("* File defining the parameters to invert\n")
f.write("***********************************************\n")
f.write("\n")
f.write("# Maximum number of iterations\n")
f.write(config['inversion']['iterations in lm']+"\n")
f.write("\n")
f.write("# Number of cycles\n")
f.write(config['inversion']['number of cycles']+"\n")
f.write("\n")
f.write("# Invert the magnetic field strength\n")
f.write(" ".join(config['inversion']['cycles']['slab 1']['b'])+"\n")
f.write("\n")
f.write("# Invert the magnetic field inclination\n")
f.write(" ".join(config['inversion']['cycles']['slab 1']['thetab'])+"\n")
f.write("\n")
f.write("# Invert the magnetic field azimuth\n")
f.write(" ".join(config['inversion']['cycles']['slab 1']['chib'])+"\n")
f.write("\n")
f.write("# Invert the Doppler width\n")
f.write(" ".join(config['inversion']['cycles']['slab 1']['vdopp'])+"\n")
f.write("\n")
f.write("# Invert the optical depth or strength of the line\n")
f.write(" ".join(config['inversion']['cycles']['slab 1']['tau'])+"\n")
f.write("\n")
f.write("# Invert the D^2 of the lower level\n")
f.write(int(config['inversion']['number of cycles'])*" 0" + "\n")
f.write("\n")
f.write("# Invert the macroscopic velocity\n")
f.write(" ".join(config['inversion']['cycles']['slab 1']['vmac'])+"\n")
f.write("\n")
f.write("# Invert the damping\n")
f.write(" ".join(config['inversion']['cycles']['a'])+"\n")
f.write("\n")
f.write("# Invert beta\n")
f.write(" ".join(config['inversion']['cycles']['slab 2']['beta'])+"\n")
f.write("\n")
f.write("# Invert the height of the He atoms\n")
f.write(int(config['inversion']['number of cycles'])*" 0" + "\n")
f.write("\n")
f.write("# Invert the optical depth or strength of the line of component 2\n")
f.write(" ".join(config['inversion']['cycles']['slab 2']['tau'])+"\n")
f.write("\n")
f.write("# Invert the macroscopic velocity of component 2\n")
f.write(" ".join(config['inversion']['cycles']['slab 2']['vmac'])+"\n")
f.write("\n")
f.write("# Invert the magnetic field strength of component 2\n")
f.write(" ".join(config['inversion']['cycles']['slab 2']['b'])+"\n")
f.write("\n")
f.write("# Invert the magnetic field inclination of component 2\n")
f.write(" ".join(config['inversion']['cycles']['slab 2']['thetab'])+"\n")
f.write("\n")
f.write("# Invert the magnetic field azimuth of component 2\n")
f.write(" ".join(config['inversion']['cycles']['slab 2']['chib'])+"\n")
f.write("\n")
f.write("# Invert the Doppler width of component 2\n")
f.write(" ".join(config['inversion']['cycles']['slab 2']['vdopp'])+"\n")
f.write("\n")
f.write("# Invert filling factor\n")
f.write(" ".join(config['inversion']['cycles']['ff'])+"\n")
f.write("\n")
f.write("# Invert beta2\n")
f.write(" ".join(config['inversion']['cycles']['slab 2']['beta'])+"\n")
f.write("\n")
f.write("# Weights for Stokes I in each cycle\n")
f.write(" ".join(config['inversion']['weights']['stokes i'])+"\n")
f.write("\n")
f.write("# Weights for Stokes Q in each cycle\n")
f.write(" ".join(config['inversion']['weights']['stokes q'])+"\n")
f.write("\n")
f.write("# Weights for Stokes U in each cycle\n")
f.write(" ".join(config['inversion']['weights']['stokes u'])+"\n")
f.write("\n")
f.write("# Weights for Stokes V in each cycle\n")
f.write(" ".join(config['inversion']['weights']['stokes v'])+"\n")
f.write("\n")
f.write("# Inversion modes (1-> LM, 2-> DIRECT, 3-> PIKAIA)\n")
f.write(" ".join([method[i] for i in config['inversion']['inversion modes']]))
f.close()
#*********************************
# Save init_parameters.dat file
#*********************************
# Define some dictionaries to facilitate writing the file
yesno = {'yes': '1', 'no': '0'}
slabs = {'1': '1', '1+1': '2', '1+1B': '3', '2': '-2'}
multiplets = {'10830': '1', '3889': '2', '7065': '3', '5876': '4'}
lambda0 = {'10830': '10829.0911', '3889': '3888.6046', '7065': '7065.7085', '5876': '5876.9663'}
if (config['synthesis']['number of slabs'] == '1'):
nSlabs = 1
else:
nSlabs = 2
includeff = False
if (config['synthesis']['number of slabs'] == '2'):
includeff = True
f = open('init_parameters.dat','w')
f.write("***********************************************\n")
f.write("* File defining the specific experiment to solve\n")
f.write("***********************************************\n")
f.write("\n")
f.write("# Include stimulated emission (0-> no, 1-> yes)\n")
f.write(yesno[config['general parameters']['include stimulated emission']]+"\n")
f.write("\n")
f.write("# Include magnetic field (0-> no, 1-> yes)\n")
f.write(yesno[config['general parameters']['include magnetic field']]+"\n")
f.write("\n")
f.write("# Include depolarization rates (0-> no, 1-> yes)\n")
f.write("0\n")
f.write("\n")
f.write("# Value of delta if depolarization rates are included (not used if the previous value is 0)\n")
f.write("0.0\n")
f.write("\n")
f.write("# Include Paschen-Back effect (0-> no, 1-> yes)\n")
f.write(yesno[config['general parameters']['include paschen-back effect']]+"\n")
f.write("\n")
f.write("# Number of slabs (1-> 1 slab, 2-> 2 slabs with same B, 3-> 2 slabs with different B (add field below))\n")
f.write(slabs[config['synthesis']['number of slabs']]+"\n")
f.write("\n")
f.write("# Magnetic field strength [G], thetaB [degrees], chiB [degrees]\n")
if (nSlabs == 1):
f.write(config['synthesis']['slab 1']['b']+" "+config['synthesis']['slab 1']['thetab']+" "+config['synthesis']['slab 1']['chib']+"\n")
else:
f.write(config['synthesis']['slab 1']['b']+" "+config['synthesis']['slab 1']['thetab']+" "+config['synthesis']['slab 1']['chib']+" "+\
config['synthesis']['slab 2']['b']+" "+config['synthesis']['slab 2']['thetab']+" "+config['synthesis']['slab 2']['chib']+"\n")
f.write("\n")
f.write("# Apparent height (if negative) or real height (if positive) of the atoms in arcsec\n")
f.write(config['synthesis']['height']+"\n")
f.write("\n")
f.write("# Optical depth of the slab in the maximum of I (slab) or strength of the line (ME)\n")
if (nSlabs == 1):
f.write(config['synthesis']['slab 1']['tau']+"\n")
else:
if (includeff):
f.write(config['synthesis']['slab 1']['tau']+" "+config['synthesis']['slab 2']['tau']+" "+config['synthesis']['ff']+"\n")
else:
f.write(config['synthesis']['slab 1']['tau']+" "+config['synthesis']['slab 2']['tau']+"\n")
f.write("\n")
f.write("# Source function enhancement\n")
if (nSlabs == 1):
f.write(config['synthesis']['slab 1']['beta']+"\n")
else:
f.write(config['synthesis']['slab 1']['beta']+" "+config['synthesis']['slab 2']['beta']+"\n")
f.write("\n")
f.write("# Boundary Stokes parameters (I0,Q0,U0,V0) 4.098093d-5 for 10830 A at disk center\n")
if (type(config['synthesis']['boundary condition']) is list):
f.write("'single'\n")
f.write(" ".join(config['synthesis']['boundary condition'])+"\n")
else:
f.write("'file'\n")
f.write("'{0}'\n".format(config['synthesis']['boundary condition']))
f.write("\n")
f.write("# Transition where to compute the emergent Stokes profiles\n")
f.write(multiplets[config['general parameters']['multiplet']]+"\n")
f.write("\n")
f.write("# Include atomic level polarization? (0-> no, 1-> yes)\n")
f.write(yesno[config['general parameters']['include atomic level polarization']]+"\n")
f.write("\n")
f.write("# Observation angle with respect to the local solar vertical theta,chi,gamma [degrees]\n")
f.write(" ".join(config['general parameters']['line-of-sight angles'])+"\n")
f.write("\n")
f.write("# Wavelength axis: minimum, maximum and number of grid points\n")
rightLambda = -float(config['general parameters']['wavelength axis'][0]) * 1e8 / float(lambda0[config['general parameters']['multiplet']])**2
leftLambda = -float(config['general parameters']['wavelength axis'][1]) * 1e8 / float(lambda0[config['general parameters']['multiplet']])**2
l = [str(leftLambda), str(rightLambda), config['general parameters']['wavelength axis'][2]]
f.write(" ".join(l)+"\n")
f.write("\n")
f.write("# Line wavelength [A], Doppler velocity [km/s] and damping [a]\n")
if (nSlabs == 1):
f.write(lambda0[config['general parameters']['multiplet']]+" "+config['synthesis']['slab 1']['vdopp']+" "+config['synthesis']['a']+"\n")
else:
f.write(lambda0[config['general parameters']['multiplet']]+" "+config['synthesis']['slab 1']['vdopp']+" "+config['synthesis']['slab 2']['vdopp']+" "+config['synthesis']['a']+"\n")
f.write("\n")
f.write("# Macroscopic velocity [km/s] (>0 is a redshift)\n")
if (nSlabs == 1):
f.write(config['synthesis']['slab 1']['vmac']+"\n")
else:
f.write(config['synthesis']['slab 1']['vmac']+" "+config['synthesis']['slab 2']['vmac']+"\n")
f.write("\n")
f.write("# Include magneto-optical effects in the RT\n")
f.write(yesno[config['general parameters']['include magneto-optical effects in the rt']]+"\n")
f.write("\n")
f.write("# Include stimulated emission in the RT\n")
f.write(yesno[config['general parameters']['include stimulated emission in the rt']])
f.close()
# Run the code
try:
call(['./hazel'])
except:
print("A problem occured when calling Hazel. Exiting...")
|
aasensioREPO_NAMEhazelPATH_START.@hazel_extracted@hazel-master@run@run.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "bradkav/EarthShadow",
"repo_path": "EarthShadow_extracted/EarthShadow-master/scripts/README.md",
"type": "Markdown"
}
|
### Plotting scripts
Here, I will add python scripts for processing and plotting the `EarthShadow` results.
At the moment, the script `PlotRates_CRESST_Earth.py` can be used to plot a whole-Earth map of the CRESST-II rate for low mass DM.
More scripts (for plotting orthographic projections, animations etc.) will be added soon.
|
bradkavREPO_NAMEEarthShadowPATH_START.@EarthShadow_extracted@EarthShadow-master@scripts@README.md@.PATH_END.py
|
{
"filename": "UniquelyIdentifyingParticlesWithHashes.ipynb",
"repo_name": "tigerchenlu98/rebound",
"repo_path": "rebound_extracted/rebound-main/ipython_examples/UniquelyIdentifyingParticlesWithHashes.ipynb",
"type": "Jupyter Notebook"
}
|
# Uniquely Identifying Particles With Hashes
In many cases, one can just identify particles by their position in the particle array, e.g. using ``sim.particles[5]``. However, in cases where particles might get reordered in the particle array finding a particle might be difficult. This is why we added a *hash* attribute to particles.
In REBOUND particles might get rearranged when a tree code is used for the gravity or collision routine, when particles merge, when a particle leaves the simulation box, or when you manually remove or add particles. In general, therefore, the user should not assume that particles stay at the same index or in the same location in memory. The reliable way to access particles is to assign them hashes and to access particles through them. Assigning hashes make ``sim.particles`` to behave like Python's `dict` while keeping list-like integer-based indexing at the same time.
**Note**: When you don't assign particles a hash, they automatically get set to 0. The user is responsible for making sure hashes are unique, so if you set up particles without a hash and later set a particle's hash to 0, you don't know which one you'll get back when you access hash 0. See [Possible Pitfalls](#Possible-Pitfalls) below.
In this example, we show the basic usage of the *hash* attribute.
```python
import rebound
sim = rebound.Simulation()
sim.add(m=1., hash=999)
sim.add(a=0.4, hash="mercury")
sim.add(a=1., hash="earth")
sim.add(a=5., hash="jupiter")
sim.add(a=7.)
```
We can now not only access the Earth particle with:
```python
sim.particles[2]
```
<rebound.Particle object, m=0.0 x=1.0 y=0.0 z=0.0 vx=0.0 vy=1.0 vz=0.0>
but also with
```python
sim.particles["earth"]
```
<rebound.Particle object, m=0.0 x=1.0 y=0.0 z=0.0 vx=0.0 vy=1.0 vz=0.0>
We can access particles with negative indices like a list. We can get the last particle with
```python
sim.particles[-1]
```
<rebound.Particle object, m=0.0 x=7.0 y=0.0 z=0.0 vx=0.0 vy=0.3779644730092272 vz=0.0>
We can also set hash after particle is added.
```python
sim.particles[-1].hash = 'pluto'
sim.particles['pluto']
```
<rebound.Particle object, m=0.0 x=7.0 y=0.0 z=0.0 vx=0.0 vy=0.3779644730092272 vz=0.0>
### Details
We usually use strings as hashes, however, under the hood hash is an unsigned integer (`c_uint`). There is a function `rebound.hash` that calculates actual hash of a string.
```python
from rebound import hash as h
h("earth")
```
c_uint(1424801690)
The same function can be applied to integers. In this case it just casts the value to the underlying C datatype (`c_uint`).
```python
h(999)
```
c_uint(999)
```python
h(-2)
```
c_uint(4294967294)
When we above set the hash to some value, REBOUND converted this value to an unsigned integer using the same `rebound.hash` function.
```python
sim.particles[0].hash # particle was created with sim.add(m=1., hash=999)
```
c_uint(999)
```python
sim.particles[2].hash
# particle was created with sim.add(a=1., hash="earth")
# so the hash is the same as h("earth") above
```
c_uint(1424801690)
When we use string as an index to access particle, function `rebound.hash` is applied to the index and a particle with this hash is returned. On the other hand, if we use integer index, it is not treated as a hash, REBOUND just returns a particle with given position in array, i.e. `sim.particles[0]` is the first particle, etc.
We can access particles through their hash directly. However, to differentiate from passing an integer index, we have to first cast the hash to the underlying C datatype by using `rebound.hash` manually.
```python
sim.particles[h(999)]
```
<rebound.Particle object, m=1.0 x=0.0 y=0.0 z=0.0 vx=0.0 vy=0.0 vz=0.0>
which corresponds to `particles[0]` as it should. `sim.particles[999]` would try to access index 999, which doesn't exist in the simulation, and REBOUND would raise an AttributeError.
The hash attribute always returns the appropriate unsigned integer ctypes type. (Depending on your computer architecture, `ctypes.c_uint32` can be an alias for another `ctypes` type).
So we could also access the earth with:
```python
sim.particles[h(1424801690)]
```
<rebound.Particle object, m=0.0 x=1.0 y=0.0 z=0.0 vx=0.0 vy=1.0 vz=0.0>
The numeric hashes could be useful in cases where you have a lot of particles you don't want to assign individual names, but you still need to keep track of them individually as they get rearranged:
```python
for i in range(1,100):
sim.add(m=0., a=i, hash=i)
```
```python
sim.particles[99].a
```
95.0
```python
sim.particles[h(99)].a
```
98.99999999999999
### Possible Pitfalls
The user is responsible for making sure the hashes are unique. If two particles share the same hash, you could get either one when you access them using their hash (in most cases the first hit in the `particles` array). Two random strings used for hashes have a $\sim 10^{-9}$ chance of clashing. The most common case is setting a hash to 0:
```python
sim = rebound.Simulation()
sim.add(m=1., hash=0)
sim.add(a=1., hash="earth")
sim.add(a=5.)
sim.particles[h(0)]
```
<rebound.Particle object, m=0.0 x=5.0 y=0.0 z=0.0 vx=0.0 vy=0.4472135954999579 vz=0.0>
Here we expected to get back the first particle, but instead got the last one. This is because we didn't assign a hash to the last particle and it got automatically set to 0. If we give hashes to all the particles in the simulation, then there's no clash:
```python
sim = rebound.Simulation()
sim.add(m=1., hash=0)
sim.add(a=1., hash="earth")
sim.add(a=5., hash="jupiter")
sim.particles[h(0)]
```
<rebound.Particle object, m=1.0 x=0.0 y=0.0 z=0.0 vx=0.0 vy=0.0 vz=0.0>
Due to details of the `ctypes` library, comparing two `ctypes.c_uint32` instances for equality fails:
```python
h(32) == h(32)
```
False
You have to compare the value
```python
h(32).value == h(32).value
```
True
See the docs for further information: https://docs.python.org/3/library/ctypes.html
```python
```
|
tigerchenlu98REPO_NAMEreboundPATH_START.@rebound_extracted@rebound-main@ipython_examples@UniquelyIdentifyingParticlesWithHashes.ipynb@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.