metadata
dict | text
stringlengths 0
40.6M
| id
stringlengths 14
255
|
|---|---|---|
{
"filename": "test_opt.py",
"repo_name": "mikecokina/elisa",
"repo_path": "elisa_extracted/elisa-master/unittests/test_opt.py",
"type": "Python"
}
|
# keep it first
# due to stupid astropy units/constants implementation
from unittests import set_astropy_units
import numpy as np
from elisa.opt.newton import newton
from unittests.utils import ElisaTestCase
set_astropy_units()
class NewtonSolverTestCase(ElisaTestCase):
@staticmethod
def x_square(x, *args):
a, = args[0]
return a * np.power(x + 1, 3) - args[1]
@staticmethod
def d_x_square(x, *args):
a, = args
return 3.0 * a * np.power(x + 1, 2)
def test_solver(self):
x0 = 1.001
args = ((1.0,), 0.0)
expected = -1.0
obtained = round(newton(self.x_square, x0, fprime=self.d_x_square, args=args, maxiter=100, rtol=1e-10), 7)
self.assertEqual(expected, obtained)
def test_return_matrix(self):
x0 = np.array([1.001, 1.002])
args = ((np.array([1.0, 3.0]), ), 0.0)
expected = np.array([-1., -1.])
obtained = np.round(newton(self.x_square, x0, fprime=self.d_x_square, args=args, maxiter=100, rtol=1e-10), 7)
self.assertTrue(np.all(expected == obtained))
|
mikecokinaREPO_NAMEelisaPATH_START.@elisa_extracted@elisa-master@unittests@test_opt.py@.PATH_END.py
|
{
"filename": "tensorflow_datasets.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/community/langchain_community/document_loaders/tensorflow_datasets.py",
"type": "Python"
}
|
from typing import Callable, Dict, Iterator, Optional
from langchain_core.documents import Document
from langchain_community.document_loaders.base import BaseLoader
from langchain_community.utilities.tensorflow_datasets import TensorflowDatasets
class TensorflowDatasetLoader(BaseLoader):
"""Load from `TensorFlow Dataset`.
Attributes:
dataset_name: the name of the dataset to load
split_name: the name of the split to load.
load_max_docs: a limit to the number of loaded documents. Defaults to 100.
sample_to_document_function: a function that converts a dataset sample
into a Document
Example:
.. code-block:: python
from langchain_community.document_loaders import TensorflowDatasetLoader
def mlqaen_example_to_document(example: dict) -> Document:
return Document(
page_content=decode_to_str(example["context"]),
metadata={
"id": decode_to_str(example["id"]),
"title": decode_to_str(example["title"]),
"question": decode_to_str(example["question"]),
"answer": decode_to_str(example["answers"]["text"][0]),
},
)
tsds_client = TensorflowDatasetLoader(
dataset_name="mlqa/en",
split_name="test",
load_max_docs=100,
sample_to_document_function=mlqaen_example_to_document,
)
"""
def __init__(
self,
dataset_name: str,
split_name: str,
load_max_docs: Optional[int] = 100,
sample_to_document_function: Optional[Callable[[Dict], Document]] = None,
):
"""Initialize the TensorflowDatasetLoader.
Args:
dataset_name: the name of the dataset to load
split_name: the name of the split to load.
load_max_docs: a limit to the number of loaded documents. Defaults to 100.
sample_to_document_function: a function that converts a dataset sample
into a Document.
"""
self.dataset_name: str = dataset_name
self.split_name: str = split_name
self.load_max_docs = load_max_docs
"""The maximum number of documents to load."""
self.sample_to_document_function: Optional[Callable[[Dict], Document]] = (
sample_to_document_function
)
"""Custom function that transform a dataset sample into a Document."""
self._tfds_client = TensorflowDatasets( # type: ignore[call-arg]
dataset_name=self.dataset_name,
split_name=self.split_name,
load_max_docs=self.load_max_docs, # type: ignore[arg-type]
sample_to_document_function=self.sample_to_document_function,
)
def lazy_load(self) -> Iterator[Document]:
yield from self._tfds_client.lazy_load()
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@community@langchain_community@document_loaders@tensorflow_datasets.py@.PATH_END.py
|
{
"filename": "_orientation.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/barpolar/marker/colorbar/_orientation.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OrientationValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self,
plotly_name="orientation",
parent_name="barpolar.marker.colorbar",
**kwargs,
):
super(OrientationValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["h", "v"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@barpolar@marker@colorbar@_orientation.py@.PATH_END.py
|
{
"filename": "_linewidth.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/scene/yaxis/_linewidth.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LinewidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self, plotly_name="linewidth", parent_name="layout.scene.yaxis", **kwargs
):
super(LinewidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@scene@yaxis@_linewidth.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/layout/shape/line/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._width import WidthValidator
from ._dash import DashValidator
from ._color import ColorValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__,
[],
["._width.WidthValidator", "._dash.DashValidator", "._color.ColorValidator"],
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@layout@shape@line@__init__.py@.PATH_END.py
|
{
"filename": "_variantsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/surface/hoverlabel/font/_variantsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class VariantsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="variantsrc", parent_name="surface.hoverlabel.font", **kwargs
):
super(VariantsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@surface@hoverlabel@font@_variantsrc.py@.PATH_END.py
|
{
"filename": "_font.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/streamtube/hoverlabel/_font.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class FontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="font", parent_name="streamtube.hoverlabel", **kwargs
):
super(FontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Font"),
data_docs=kwargs.pop(
"data_docs",
"""
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud
for `lineposition`.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
shadowsrc
Sets the source reference on Chart Studio Cloud
for `shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
style
Sets whether a font should be styled with a
normal or italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud
for `style`.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud
for `textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud
for `variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud
for `weight`.
""",
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@streamtube@hoverlabel@_font.py@.PATH_END.py
|
{
"filename": "_ticktextsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scattermapbox/marker/colorbar/_ticktextsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicktextsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self,
plotly_name="ticktextsrc",
parent_name="scattermapbox.marker.colorbar",
**kwargs,
):
super(TicktextsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scattermapbox@marker@colorbar@_ticktextsrc.py@.PATH_END.py
|
{
"filename": "_delaunayaxis.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/mesh3d/_delaunayaxis.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class DelaunayaxisValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="delaunayaxis", parent_name="mesh3d", **kwargs):
super(DelaunayaxisValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
values=kwargs.pop("values", ["x", "y", "z"]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@mesh3d@_delaunayaxis.py@.PATH_END.py
|
{
"filename": "icecube_dom_acceptance.py",
"repo_name": "claudiok/clsim",
"repo_path": "clsim_extracted/clsim-master/resources/plots/icecube_dom_acceptance.py",
"type": "Python"
}
|
#!/usr/bin/env python
#--------------------------------------------
# plot_icecube_dom_acceptance.py
#
# A script to plot the wavelength dependent
# acceptance of the IceCube DOM as implemented
# in python/GetIceCubeDOMAcceptance.py
#--------------------------------------------
import matplotlib
matplotlib.use("PDF")
import matplotlib.pylab as plt
from icecube import icetray, dataclasses
from icecube.clsim import I3CLSimFunctionFromTable
from icecube.clsim.GetIceCubeDOMAcceptance import *
params = {'backend': 'pdf',
'axes.labelsize': 10,
'text.fontsize': 10,
'legend.fontsize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'text.usetex': True}
matplotlib.rcParams.update(params)
matplotlib.rc('font',**{'family':'serif','serif':['Computer Modern']})
#Get the acceptance
dom_acceptance = GetIceCubeDOMAcceptance()
#Evaluate the function
x = [float(i)/10. for i in range(2500,7001)]
dom_acc = [dom_acceptance.GetValue(i*I3Units.nanometer) for i in x]
#Make the plot
fig = plt.figure(1, figsize=[10,7])
fig.canvas.set_window_title("Acceptance of an IceCube DOM")
plt.subplot(111)
plt.xlabel("Wavelength $\\lambda$ [nm]")
plt.ylabel("Acceptance")
plt.title("Acceptance of an IceCube DOM as function of wavelength")
plt.plot(x, dom_acc)
plt.grid()
fig.savefig("icecube_dom_acceptance.pdf")
|
claudiokREPO_NAMEclsimPATH_START.@clsim_extracted@clsim-master@resources@plots@icecube_dom_acceptance.py@.PATH_END.py
|
{
"filename": "BackyardWorldsCafe2.ipynb",
"repo_name": "astro-datalab/notebooks-latest",
"repo_path": "notebooks-latest_extracted/notebooks-latest-master/06_EPO/e-TeenAstronomyCafe/12_Discovering_New_Neighbors_of_the_Sun/BackyardWorldsCafe2.ipynb",
"type": "Jupyter Notebook"
}
|
<img src="Figures/logo154.svg" alt="to Go logo" width="100" align="right"/>
<br>
<br>
<font size='6'><u><b>Discovering New Neighbors of the Sun</b></u></font>
<br>
_**Written by Aaron Meisner, NSF's NOIRLab**_
# Table of Contents
* [How to Use This Notebook](#How-to-Use-This-Notebook)
* [Pre-Activity Setup](#Pre-Activity-Setup)
* [Objective: Discovering Brown Dwarfs](#Objective:-Discovering-Brown-Dwarfs)
* [Activity 1: Motions of stars (and brown dwarfs)](#Activity-1:-Motions-of-stars-(and-brown-dwarfs))
* [Activity 2: Colors of stars and brown dwarfs](#Activity-2:Colors-of-stars-and-brown-dwarfs)
* [Looking for brown dwarfs based on motion & color](#Looking-for-brown-dwarfs-based-on-motion-&-color)
* [Activity 3: Discover the Brown Dwarfs!](#Activity-3:-Discover-the-Brown-Dwarfs!)
# How to Use This Notebook
The webpage you are in is actually an app - much like the ones on your cellphone. This app consists of cells.
An *input* cell looks like a light grey box with an `In [ ]:` on its left. Input cells each contain code - instructions to make the computer do something.
To activate or select a cell, click anywhere inside of it.
<div class='alert alert-info'>
<font size='3'><b>Select the cell below and read its contents.</b></font>
</div>
```python
# Text that follows a "#" is known as a comment.
# Comments do not affect your code in any way.
# You should always read the comments at the top of each cell you interact with.
# Comments will be used to describe what the cell's code is actually doing.
```
To execute or run a selected cell, hit `[Shift + Enter]` on your keyboard.
<div class='alert alert-info'>
<font size='3'><b>Select the cell below and read its contents. Then, run the cell.</b></font>
<br> If a warning appears, just click <em>"Run Anyway"</em>, this code is safe ;)
<br> Also, if you want to save your progress, click the <em>"Copy to Drive"</em> button at the top.
</div>
```python
# Text that DOESN'T follow a "#" is considered code.
# Lines of code are instructions given to your computer.
# The line of code below is a "print" statement.
# A print statement literally prints out the text between its quotes.
print("Congrats! You have successfully run your first cell!")
```
Congrats! You have successfully run your first cell!
Running a cell creates an *output* directly below it. An output can be some text, a graph, an interactive slider, or even nothing at all! For that last case, you know you have run a cell when the `In [ ]:` becomes `In [#]:`, where "#" is any number.
You can learn more about how Python notebooks work at https://try.jupyter.org/
___
# Pre-Activity Setup
In order for any of the activities to work properly, you must import the libraries needed for the code in this notebook.
Go to the “Runtime” menu and select the option to “Run all.” Running all helps to ensure a cell was not skipped and all libraries are imported to help the activities work properly. As you work through the Python Notebook, you may also re-run each cell individually.
<div class='alert alert-info'>
<font size='3'><b>Select and run the cell below.</b></font>
</div>
```python
# Here, you are importing the libraries needed for this notebook.
# These libraries set up the plotting environment in your browser.
%matplotlib inline
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.collections import PatchCollection
plt.style.use('ggplot')
from collections import OrderedDict
import numpy as np
import astropy.units as u
from astropy.units import imperial
from IPython.display import display
from ipywidgets import interactive, interact, FloatSlider
print("\nDone! You have successfully imported the libraries.")
```
Done! You have successfully imported the libraries.
**ANY QUESTIONS?**
# Objective: Discovering Brown Dwarfs
In this notebook we'll explore a bit of the physics, astronomy and math behind how we can discover new nearby neighbors of the Sun referred to as [brown dwarfs](https://en.wikipedia.org/wiki/Brown_dwarf). Then we'll look through some telescope data to identify real brown dwarfs hidden among other stars, galaxies and detector noise.
Brown dwarfs are celestial objects with masses in between those of stars and those of giant planets like Jupiter. Below is an artist rendering of a brown dwarf. You can think of them like oversized versions of Jupiter, but floating around all alone in interstellar space.
<img src="https://upload.wikimedia.org/wikipedia/commons/thumb/e/e0/Artist%E2%80%99s_conception_of_a_brown_dwarf_like_2MASSJ22282889-431026.jpg/1920px-Artist%E2%80%99s_conception_of_a_brown_dwarf_like_2MASSJ22282889-431026.jpg" width="800">
You might wonder: why should we care about brown dwarfs? There are many different ways to answer this. The atmospheres of brown dwarfs are very similar to those of giant exoplanets, but can be characterized in detail without the glare of a much brighter host star interfering. These brown dwarf atmospheres show strong signatures of water and methane. Some brown dwarfs may in fact be rogue planets ejected from the star system in which they were originally born.
Because brown dwarfs are so dim, it still remains possible that there could be an as-yet overlooked brown dwarf closer to us than [Proxima Centauri](https://en.wikipedia.org/wiki/Proxima_Centauri), awaiting discovery in existing telescope data. This would be a historic discovery and makes the search for new brown dwarfs very exciting!
In the [Backyard Worlds](https://en.wikipedia.org/wiki/Backyard_Worlds) project, we search for brown dwarfs in [infrared](https://en.wikipedia.org/wiki/Infrared) images taken by NASA's [Wide-field Infrared Survey Explorer](https://en.wikipedia.org/wiki/Wide-field_Infrared_Survey_Explorer) (WISE) telescope.
# Activity 1: Motions of stars (and brown dwarfs)
Brown dwarfs are cold and dim, so they can only be detected when nearby to us. All inhabitants of our Milky Way Galaxy, such as stars and brown dwarfs, are moving relative to the Sun. But we perceive the objects close to us as having a high apparent motion across the sky. This can be seen in the Barnard's Star example image blink from today's lecture.
<center><img src="https://upload.wikimedia.org/wikipedia/commons/6/6c/Barnard2005.gif" width="400"></center>
Note that the above animation is on a loop; Barnard's star doesn't actually "jump backward" at any point, it just keeps moving along essentially a straight line trajectory.
Mathematically, for a star of fixed speed $v$ (in the plane perpendicular to the line of sight from us to the star), we have:
$\mu \propto v/d$
Here $v$ would be in units of distance over time (like m/s or km/s), $d$ is the distance between us and the star (in units like light years) and $\mu$ is the rate of apparent motion across the sky, which astronomers refer to as **proper motion**. Proper motion has units of angular displacement per unit time. It turns out that the convenient unit of proper motion for nearby stars is [arcseconds](https://en.wikipedia.org/wiki/Minute_and_second_of_arc) per year, where one arcsecond is 1/3600 of a degree. Barnard's Star has the highest proper motion of any currently known star or brown dwarf, at roughly 10.4 arcseconds/year. It is possible that through searches like Backyard Worlds, we could discover a brown dwarf that breaks this proper motion record!
Let's take a look at how the fast apparent motion of Barnard's Star plays out in a modern astronomical catalog (from the [Gaia](https://en.wikipedia.org/wiki/Gaia_(spacecraft)) mission). The code below shows the locations of stars in a 40 [arcminute](https://en.wikipedia.org/wiki/Minute_and_second_of_arc) diameter patch of the sky, similar in size to the full Moon.
```python
# set up Python environment...
from ipywidgets import *
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
```
```python
url = 'https://raw.githubusercontent.com/ameisner/cafe/master/etc/gaia_barnards_star.csv'
df_gaia = pd.read_csv(url)
```
```python
def plot_stars(dt_yr=0):
global df_gaia
xmin = np.min(df_gaia['RA'])
xmax = np.max(df_gaia['RA'])
ymin = np.min(df_gaia['DEC'])
ymax = np.max(df_gaia['DEC'])
ra = df_gaia['RA'] + dt_yr*np.cos(df_gaia['DEC']/(180/np.pi))*df_gaia['PMRA']/(3600.0*1000.0)
dec = df_gaia['DEC'] + dt_yr*df_gaia['PMDEC']/(3600.0*1000.0)
plt.figure(figsize=(8, 8))
plt.scatter(ra, dec, edgecolor='black', s=10, c='k', marker='.') #needed to avoid a deprecation warning in matplotlib.pyplot (2023-07-10, Marco Moreno)
###plt.scatter(ra[400], dec[400], marker='o', facecolor='none',
### s=100, edgecolor='r')
plt.xlim((xmax, xmin)) # east to the left...
plt.ylim((ymax, ymin)) # to match the Barnard's Star GIF shown earlier...
plt.xlabel('right ascension (degrees)')
plt.ylabel('declination (degrees)')
plt.show() # This line is important due to an update of the ipywidgets library (2023-07-10, Marco Moreno)
print('NOTE: CLICKING THE SLIDER THEN HOLDING THE RIGHT ARROW WILL GIVE YOU A CONTINUOUS "MOVIE" OF THE MOTIONS')
interact(plot_stars, dt_yr=(0, 100, 1))
```
NOTE: CLICKING THE SLIDER THEN HOLDING THE RIGHT ARROW WILL GIVE YOU A CONTINUOUS "MOVIE" OF THE MOTIONS
interactive(children=(IntSlider(value=0, description='dt_yr'), Output()), _dom_classes=('widget-interact',))
<function __main__.plot_stars>
Most stars in this sky patch are a few thousand light years from the Sun. For comparison, Barnard's Star is just under 6 light years away from the Sun! Notice that during one century, a star many thousands of light years away appears to stay at essentially a fixed position.
**Can you find Barnard's Star by eye?** Hint: Barnard's Star has the largest proper motion of any star on the entire sky, so it will look like it's moving a lot faster than any other star in this small sky region.
**ANY QUESTIONS?**
**GOING FURTHER (OPTIONAL)**
What if we look at the motions on longer timescales, say 0 to 10,000 years in increments of 100 years? Here's what we see:
```python
interact(plot_stars, dt_yr=(0, 10000, 100))
```
interactive(children=(IntSlider(value=0, description='dt_yr', max=10000, step=100), Output()), _dom_classes=('…
<function __main__.plot_stars>
So over ~10,000 years, all the stars, even the distant ones, appear to move noticeably.
**Bonus Question**: why don't we see the motion of Barnard's Star using the above slider with 100 year time increments?
**GOING FURTHER (OPTIONAL)**
What about even longer time scales, 1 million years in increments of 1,000 years? That's shown here:
```python
interact(plot_stars, dt_yr=(0, 1000000, 1000))
```
interactive(children=(IntSlider(value=0, description='dt_yr', max=1000000, step=1000), Output()), _dom_classes…
<function __main__.plot_stars>
**Bonus Question**: after a long time (millions of years), all the stars in this visualization "go away". If we were to actually look at a patch of sky for millions of years, would all the stars "go away"?
# Activity 2: Colors of stars and brown dwarfs
If you've looked at the night sky, you've probably noticed that stars come in different colors. The color of a star is indicative of its temperature. A blue/white looking star is relatively hot, while a red star (say, a red dwarf like Barnard's Star) will look more yellow/orange in color.
We can think of this in a simplified way, approximating the [spectrum](https://en.wikipedia.org/wiki/Spectrum) -- light emission as a function of [wavelength](https://en.wikipedia.org/wiki/Wavelength) -- of each star as a [blackbody curve](https://en.wikipedia.org/wiki/Planck%27s_law).
The following code makes plots illustrating how the color of a star shifts from blue to red as you go from the hottest stars ($\sim$50,000 Kelvin) to cold brown dwarfs (temperature = 700 Kelvin). [Kelvin](https://en.wikipedia.org/wiki/Kelvin) is a temperature scale used by physicists and astronomers. The transition from star to brown dwarf happens at roughly 2,200 Kelvin. For comparison, the Sun has a temperature of ~5,800 Kelvin.
The [peak wavelength](https://skyserver.sdss.org/dr1/en/proj/advanced/color/peakwavelength.asp) of the light emission shifts from short (blue) wavelengths to long (red) wavelengths as the temperature decreases.
```python
url = 'https://raw.githubusercontent.com/ameisner/cafe/master/etc/blackbody_wise_flux_ratios.csv'
df_bb = pd.read_csv(url)
def plot_planck(T):
# make sure to normalize
plt.figure(figsize=(18, 4))
plt.subplot(1, 2, 1)
lambda_values_nm = np.arange(100, 8000)
lambda_values_m = (1e-9)*lambda_values_nm
c = 3e8
nu_values = c/lambda_values_m
k_b = 1.380649e-23
h = 6.62607015e-34
B_nu_T = np.power(nu_values, 3)/(np.exp(h*nu_values/(k_b*T)) - 1)
B_nu_T /= np.max(B_nu_T) # normalize so that the peak has value 1
lambda_values_um = lambda_values_nm/1000
plt.plot(lambda_values_um, B_nu_T, c='k')
wav_lim_um_blue = (3, 3.9)
wav_lim_um_red = (4.1, 5)
plt.plot([wav_lim_um_blue[0]]*2, [0, 1], c='b')
plt.plot([3.9,3.9], [0, 1], c='b')
plt.plot([4.1,4.1], [0, 1], c='r')
plt.plot([5, 5], [0, 1], c='r')
plt.text(3.07, 0.55, 'WISE low\nwavelength\nchannel', c='b', fontsize=12,
rotation=90)
plt.text(4.17, 0.55, 'WISE long\nwavelength\nchannel', c='r', fontsize=12,
rotation=90)
plt.ylim((0, 1.025))
plt.xlim((0, 8))
title = 'temperature = ' + str(int(T)) + ' Kelvin ('
title += 'STAR' if T > 2200 else 'BROWN DWARF'
title += ')'
plt.title(title)
plt.xlabel('wavelength (microns)')
plt.ylabel('normalized flux per unit frequency')
plt.subplot(1, 2, 2)
flux_ratio_num = np.sum(B_nu_T[(lambda_values_um > wav_lim_um_red[0]) & (lambda_values_um < wav_lim_um_red[1])])
flux_ratio_den = np.sum(B_nu_T[(lambda_values_um > wav_lim_um_blue[0]) & (lambda_values_um < wav_lim_um_blue[1])])
flux_ratio = flux_ratio_num/flux_ratio_den
global df_bb
plt.plot(df_bb['t_vals'], df_bb['flux_ratio_vals'], c='k')
plt.xlim((700, 30000))
plt.ylim((0, 2))
plt.scatter([T], [flux_ratio], marker='o', s=100, c='k')
plt.xlabel('temperature', fontsize=16)
plt.ylabel('(red flux) / (blue flux) ratio', fontsize=16)
plt.title(title)
ax = plt.gca()
ax.tick_params(axis='both', which='major', labelsize=14)
ax.tick_params(axis='both', which='minor', labelsize=14)
plt.show() # This line is important due to an update of the ipywidgets library (2023-07-10, Marco Moreno)
layout = {'width':'initial'}
box_layout = {'display':'flex', 'flex_flow':'column', 'align-items':'center', 'border':'1px solid grey', 'width':'initial'}
# start at 5800 K because that's roughly the temperature of the Sun
T = FloatSlider(value=5800, min=700, max=30000, step=50, continuous_update=True, layout=layout)
plot4 = interactive(plot_planck, T=T)
plot4.layout = box_layout
display(plot4)
```
interactive(children=(FloatSlider(value=5800.0, description='T', layout=Layout(width='initial'), max=30000.0, …
The black line of the left hand plot shows how the overall spectrum and its peak location shift toward longer wavelengths as you go from a hotter to a colder star (assuming simplified blackbody emission for now). The right hand plot shows the ratio of amount of light emitted in two infrared wavelength ranges, one centered at $\sim$3.5 microns and the other centered at $\sim$4.5 microns. A [micron](https://en.wikipedia.org/wiki/Micrometre) is a unit of length, equal to 1000 [nanometers](https://en.wikipedia.org/wiki/Nanometre). For a sense of scale, the typical width of a strand of spider web silk is a few microns. You may have noticed the word ["flux"](https://en.wikipedia.org/wiki/Radiative_flux) in the plot labels. Flux is basically just another word for amount of energy emitted per unit time per unit area.
Most stars have a fairly similar ratio of light at 4.5 microns to light at 3.5 microns. But when we reach the coldest brown dwarfs, this ratio becomes dramatically higher. Thus, we can search for cold nearby brown dwarfs not only based on their proper motion, but also based on their "color" at infrared wavelengths. In this context, by "color" astronomers mean the ratio of amount of light emitted at two different wavelengths. Objects that look very bright at 4.5 microns relative to how bright they are at 3.5 microns will be strong brown dwarf candidates and are said to have a very "red color" because they emit more strongly at the longer wavelength of these two wavelengths. The plotted wavelength intervals have been chosen because these are the wavelengths at which the WISE telescope maps the sky, and we'll be looking at images from the WISE telescope later in this activity.
Incidentally, the detailed spectra of brown dwarfs look much weirder than a simple blackbody, primarily due to methane and water absorption that carve out deep valleys and sharp peaks in the profile of light as a function of wavelength. An example is shown below:
<img src="https://astrobites.org/wp-content/uploads/2017/05/fig2.png" width="800">
That's right -- there's water in the atmospheres of brown dwarfs, either in the form of steam / water vapor or, in the case of the coldest brown dwarfs, water ice clouds!
The temperature of [this object](https://en.wikipedia.org/wiki/WISEA_1101%2B5400) (the first brown dwarf discovered by Backyard Worlds citizen scientists!) is $\sim$1,200 Kelvin. For comparison, the Sun's temperature is a much hotter $\sim$5,800 Kelvin and Jupiter's temperature is significantly cooler, $\sim$150 Kelvin.
**ANY QUESTIONS?**
**GOING FURTHER (EXTRA INFO)**
Looking at this spectrum, there are a couple of features that tell us it's a brown dwarf:
(1) If we imagine a smooth blackbody-like envelope that ignores the deep water/methane absorption troughs, it would peak somewhere in the ~1.3 micron range. For comparison, the Sun (temperature ~5,800 K) peaks at roughly 0.5 microns (roughly 500 nanometers). From [Wien's law](https://en.wikipedia.org/wiki/Wien%27s_displacement_law), we know that the peak of the blackbody spectrum shifts redward as temperature gets colder. An object with spectrum peaking near 1.3 microns would need to be a few times cooler than the Sun, cold enough to be a brown dwarf.
(2) The extreme water/methane absorption features are a distinctive hallmark of brown dwarfs -- the spectrum of a [star](https://www.esa.int/ESA_Multimedia/Images/2017/12/Solar_spectrum) or galaxy just doesn't look like that at all. This type of absorption signature gets imprinted when a relatively cool gas (the brown dwarf upper atmosphere) is sitting in front of a relatively hot source (the deeper, hotter layers of the brown dwarf atmosphere). This absorption band creation is a consequence of [Kirchoff's laws](https://www.e-education.psu.edu/astro801/content/l3_p6.html).
# Looking for brown dwarfs based on motion & color
Now that we have some background on the distinctive motions and colors of brown dwarfs, let's discuss how Backyard Worlds uses both of these traits to discover new brown dwarfs nearby to the Sun. Backyard Worlds looks at images from NASA's [Wide-field Infrared Survey Explorer](https://en.wikipedia.org/wiki/Wide-field_Infrared_Survey_Explorer) (WISE) telescope. These images have square [pixels](https://en.wikipedia.org/wiki/Pixel) that are 2.75 arcseconds on a side, and the data set spans a $\sim$10 year time period (from roughly 2010 to 2020). So Barnard's Star would appear to move by
10.4 arcseconds/year $\times$ 10 years / (2.75 arcseconds/pixel) $\sim$ 38 WISE pixels
Over the course of the available data.
What is the limit of motions we can detected with the WISE data? Roughly speaking, we can 'see' a movement of about 1 pixel, which would correspond to:
2.75 arcseconds / 10 years = 0.275 arcseconds per year.
Let's visually examine a few examples of what recent brown dwarf discoveries made by Backyard Worlds citizen scientists look like in the actual WISE data used to discover them. Here's what a brown dwarf moving at roughly 1.5 arcsecond per year looks like.
<img src="https://raw.githubusercontent.com/ameisner/cafe/master/etc/w1930.gif" width="200">
Note that the above animation is on a loop. This animation and the others like it in the remainder of this notebook have a sidelength of 2 arcminutes.
The brown dwarf moves toward the bottom right corner of the image over the ~2010-2020 time period, then jumps back upward/leftward to its starting position when the loop resets. This brown dwarf was first discovered by Backyard Worlds citizen scientists in October 2017. The Backyard Worlds science team has subsequently observed it with the [Spitzer Space Telescope](https://en.wikipedia.org/wiki/Spitzer_Space_Telescope), finding that it is one of the coldest known brown dwarfs, with a temperature of 350 +/- 80 Kelvin. Notice that in addition to the motion of this object, it has a distinctive orange color very different from the black color of much more distant stars and galaxies in the image.
Here's a brown dwarf moving significantly slower ($\sim$0.5 arcseconds/year, roughly 20$\times$ slower than Barnard's star), but still in a way that's visually perceptible relative to the much more distant background stars and galaxies that appear to stay fixed.
<img src="https://raw.githubusercontent.com/ameisner/cafe/master/etc/w2243.gif" width="200">
This brown dwarf was first discovered by Backyard Worlds citizen scientists in April 2018. **Question**: Which direction do you see this brown dwarf moving over time?
Let's keep going on the theme of tricky cases. In addition to blending/confusion, detector noise and defects can also masquerade as red and/or moving sources, and therefore show up as "bogus" brown dwarf candidates. Here's one example, a so-called "ghost" in the WISE data:
<img src="https://raw.githubusercontent.com/ameisner/cafe/master/etc/ghost.gif" width="200">
It looks very red, but it has an odd donut-shaped appearance which is different from the shape of stars in the imagery, hence we know it can't be a real brown dwarf. If you follow [this link](https://www.legacysurvey.org/viewer/?ra=12.4397&dec=-13.5258&layer=unwise-neo6&zoom=13), you can see that this ghost is sourced by a bright star that's just "off the screen" in this animation.
Lastly, "crowded fields" -- areas of the sky such as the plane of our Milky Way Galaxy with lots of stars -- can be more difficult to comprehend, both for computer programs and for humans. Here's a crowded field example:
<img src="https://raw.githubusercontent.com/ameisner/cafe/master/etc/w1936.gif" width="200">
**ANY QUESTIONS?**
# Activity 3: Discover the brown dwarfs!
Here's a <a href="https://docs.google.com/spreadsheets/d/1bDp2B-GO7O-GdfMz68-Jv63HXdU0LQQu_NxMhcXCr8o/edit?usp=sharing">Google Sheet</a> with a couple dozen brown dwarf candidates. The candidate in each case is at the center of the field of view (but keep your eyes out for celestial objects that might be moving in other parts of each movie!). Each team should collaborate on its own copy of the spreadsheet, filling out the "NOTES" column with the team members' thoughts about whether each candidate is a brown dwarf or not. Then we will reconvene the whole group to discuss. By the way, several of the brown dwarfs in this example data set were first discovered by Backyard Worlds volunteers within the last few years!
**ANY QUESTIONS?**
## Like staring at movies of brown dwarf candidates?
Anyone can participate in the Backyard Worlds project, which is accessible online for free and with no registration required at:
https://backyardworlds.org/
|
astro-datalabREPO_NAMEnotebooks-latestPATH_START.@notebooks-latest_extracted@notebooks-latest-master@06_EPO@e-TeenAstronomyCafe@12_Discovering_New_Neighbors_of_the_Sun@BackyardWorldsCafe2.ipynb@.PATH_END.py
|
{
"filename": "hist.py",
"repo_name": "itseez/opencv",
"repo_path": "opencv_extracted/opencv-master/samples/python/hist.py",
"type": "Python"
}
|
#!/usr/bin/env python
''' This is a sample for histogram plotting for RGB images and grayscale images for better understanding of colour distribution
Benefit : Learn how to draw histogram of images
Get familier with cv.calcHist, cv.equalizeHist,cv.normalize and some drawing functions
Level : Beginner or Intermediate
Functions : 1) hist_curve : returns histogram of an image drawn as curves
2) hist_lines : return histogram of an image drawn as bins ( only for grayscale images )
Usage : python hist.py <image_file>
Abid Rahman 3/14/12 debug Gary Bradski
'''
# Python 2/3 compatibility
from __future__ import print_function
import numpy as np
import cv2 as cv
bins = np.arange(256).reshape(256,1)
def hist_curve(im):
h = np.zeros((300,256,3))
if len(im.shape) == 2:
color = [(255,255,255)]
elif im.shape[2] == 3:
color = [ (255,0,0),(0,255,0),(0,0,255) ]
for ch, col in enumerate(color):
hist_item = cv.calcHist([im],[ch],None,[256],[0,256])
cv.normalize(hist_item,hist_item,0,255,cv.NORM_MINMAX)
hist=np.int32(np.around(hist_item))
pts = np.int32(np.column_stack((bins,hist)))
cv.polylines(h,[pts],False,col)
y=np.flipud(h)
return y
def hist_lines(im):
h = np.zeros((300,256,3))
if len(im.shape)!=2:
print("hist_lines applicable only for grayscale images")
#print("so converting image to grayscale for representation"
im = cv.cvtColor(im,cv.COLOR_BGR2GRAY)
hist_item = cv.calcHist([im],[0],None,[256],[0,256])
cv.normalize(hist_item,hist_item,0,255,cv.NORM_MINMAX)
hist = np.int32(np.around(hist_item))
for x,y in enumerate(hist):
cv.line(h,(x,0),(x,y[0]),(255,255,255))
y = np.flipud(h)
return y
def main():
import sys
if len(sys.argv)>1:
fname = sys.argv[1]
else :
fname = 'lena.jpg'
print("usage : python hist.py <image_file>")
im = cv.imread(cv.samples.findFile(fname))
if im is None:
print('Failed to load image file:', fname)
sys.exit(1)
gray = cv.cvtColor(im,cv.COLOR_BGR2GRAY)
print(''' Histogram plotting \n
Keymap :\n
a - show histogram for color image in curve mode \n
b - show histogram in bin mode \n
c - show equalized histogram (always in bin mode) \n
d - show histogram for gray image in curve mode \n
e - show histogram for a normalized image in curve mode \n
Esc - exit \n
''')
cv.imshow('image',im)
while True:
k = cv.waitKey(0)
if k == ord('a'):
curve = hist_curve(im)
cv.imshow('histogram',curve)
cv.imshow('image',im)
print('a')
elif k == ord('b'):
print('b')
lines = hist_lines(im)
cv.imshow('histogram',lines)
cv.imshow('image',gray)
elif k == ord('c'):
print('c')
equ = cv.equalizeHist(gray)
lines = hist_lines(equ)
cv.imshow('histogram',lines)
cv.imshow('image',equ)
elif k == ord('d'):
print('d')
curve = hist_curve(gray)
cv.imshow('histogram',curve)
cv.imshow('image',gray)
elif k == ord('e'):
print('e')
norm = cv.normalize(gray, gray, alpha = 0,beta = 255,norm_type = cv.NORM_MINMAX)
lines = hist_lines(norm)
cv.imshow('histogram',lines)
cv.imshow('image',norm)
elif k == 27:
print('ESC')
cv.destroyAllWindows()
break
print('Done')
if __name__ == '__main__':
print(__doc__)
main()
cv.destroyAllWindows()
|
itseezREPO_NAMEopencvPATH_START.@opencv_extracted@opencv-master@samples@python@hist.py@.PATH_END.py
|
{
"filename": "correct.ipynb",
"repo_name": "j-faria/kima",
"repo_path": "kima_extracted/kima-master/celerite/paper/figures/simulated/correct.ipynb",
"type": "Jupyter Notebook"
}
|
```python
%matplotlib inline
%config IPython.matplotlib.backend = "retina"
from matplotlib import rcParams
rcParams["savefig.dpi"] = 300
rcParams["figure.dpi"] = 300
from celerite import plot_setup
plot_setup.setup(auto=False)
```
## Recovery of a celerite process
```python
import numpy as np
import matplotlib.pyplot as plt
import celerite
from celerite import terms
np.random.seed(123)
# Simulate some data
kernel = terms.SHOTerm(log_S0=0.0, log_omega0=2.0, log_Q=2.0,
bounds=[(-10, 10), (-10, 10), (-10, 10)])
gp = celerite.GP(kernel)
true_params = np.array(gp.get_parameter_vector())
omega = 2*np.pi*np.exp(np.linspace(-np.log(10.0), -np.log(0.1), 5000))
true_psd = gp.kernel.get_psd(omega)
N = 200
t = np.sort(np.random.uniform(0, 10, N))
yerr = 2.5
gp.compute(t, yerr)
y = gp.sample()
fig, ax = plt.subplots(1, 1)
ax.errorbar(t, y, yerr=yerr, fmt=".k", lw=1)
ax.set_ylim(-26, 26)
ax.set_xlim(0, 10)
ax.set_xlabel("time [day]")
ax.set_ylabel("relative flux [ppm]");
```
```python
import copy
from scipy.optimize import minimize
def nll(params, gp, y):
gp.set_parameter_vector(params)
if not np.isfinite(gp.log_prior()):
return 1e10
ll = gp.log_likelihood(y)
return -ll if np.isfinite(ll) else 1e10
p0 = true_params + 1e-4*np.random.randn(len(true_params))
soln = minimize(nll, p0, method="L-BFGS-B", args=(gp, y))
gp.set_parameter_vector(soln.x)
ml_psd = gp.kernel.get_psd(omega)
ml_gp = copy.deepcopy(gp)
ml_gp.log_likelihood(y)
```
```python
import emcee
def log_probability(params):
gp.set_parameter_vector(params)
lp = gp.log_prior()
if not np.isfinite(lp):
return -np.inf
ll = gp.log_likelihood(y)
return ll + lp if np.isfinite(ll) else -np.inf
ndim = len(soln.x)
nwalkers = 32
coords = soln.x + 1e-4 * np.random.randn(nwalkers, ndim)
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_probability)
coords, _, _ = sampler.run_mcmc(coords, 500)
sampler.reset()
coords, _, _ = sampler.run_mcmc(coords, 2000)
```
```python
# Compute the posterior PSD inference
samples = sampler.flatchain[::15, :]
post_psd = np.empty((len(samples), len(omega)))
for i, s in enumerate(samples):
gp.set_parameter_vector(s)
post_psd[i] = gp.kernel.get_psd(omega)
q = np.percentile(post_psd, [16, 50, 84], axis=0)
```
```python
x = np.linspace(-0.5, 10.5, 500)
mu, var = ml_gp.predict(y, x, return_var=True)
std = np.sqrt(var)
fig = plt.figure(figsize=plot_setup.get_figsize(1, 2.3))
ax1 = plt.subplot2grid((3, 2), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 2), (2, 0), rowspan=1)
ax3 = plt.subplot2grid((3, 2), (0, 1), rowspan=3)
fig.subplots_adjust(hspace=0, wspace=0.4)
ax1.errorbar(t, y, yerr=yerr, fmt=".k", lw=1)
ax1.plot(x, mu)
ax1.fill_between(x, mu+std, mu-std, alpha=0.5, edgecolor="none", zorder=100)
ax1.set_xticklabels([])
ax1.annotate("simulated data", xy=(0, 1), xycoords="axes fraction",
xytext=(5, -5), textcoords="offset points",
ha="left", va="top")
ax1.annotate("N = {0}".format(len(t)), xy=(0, 0),
xycoords="axes fraction",
xytext=(5, 5), textcoords="offset points",
ha="left", va="bottom")
pred_mu, pred_var = ml_gp.predict(y, return_var=True)
std = np.sqrt(yerr**2 + pred_var)
ax2.errorbar(t, y - pred_mu, yerr=std, fmt=".k", lw=1)
ax2.axhline(0.0, color="k", lw=0.75)
ax1.set_ylim(-26, 26)
ax1.set_xlim(-0.5, 10.5)
ax2.set_ylim(-9, 9)
ax2.set_xlim(-0.5, 10.5)
ax2.set_xlabel("time [day]")
ax1.set_ylabel("relative flux [ppm]")
ax2.set_ylabel("residuals")
for ax in [ax1, ax2]:
ax.yaxis.set_label_coords(-0.2, 0.5)
# plot the PSD comparison
factor = 1.0 / (2*np.pi)
f = omega * factor
ax3.plot(f, q[1] * factor)
ax3.fill_between(f, q[0] * factor, q[2] * factor, alpha=0.3)
ax3.plot(f, true_psd * factor, "--k")
ax3.set_xlim(f[0], f[-1])
ax3.set_yscale("log")
ax3.set_xscale("log")
ax3.set_xlabel("frequency [day$^{-1}$]")
ax3.set_ylabel("power [ppm$^2$ day]")
ax2.xaxis.set_label_coords(0.5, -0.3)
ax3.xaxis.set_label_coords(0.5, -0.1)
fig.savefig("correct.pdf", bbox_inches="tight")
```
```python
from scipy.linalg import cho_solve, cho_factor
p0 = gp.get_parameter_vector()
fast_timing = %timeit -o log_probability(p0)
def _time_this():
K = gp.get_matrix(include_diagonal=True)
factor = cho_factor(K, overwrite_a=True)
ld = 2.0 * np.sum(np.log(np.diag(factor[0])))
ll = -0.5*(np.dot(y, cho_solve(factor, y))+ld) + gp.log_prior()
slow_timing = %timeit -o _time_this()
```
```python
tau = np.mean(sampler.get_autocorr_time(c=3))
neff = len(sampler.flatchain) / tau
```
```python
import json
c = gp.kernel.coefficients
with open("correct.json", "w") as f:
json.dump(dict(
N=len(t),
J=len(c[0]) + len(c[2]),
tau=tau,
neff=neff,
time=fast_timing.average,
direct_time=slow_timing.average,
ndim=ndim,
nwalkers=nwalkers,
nburn=500,
nsteps=2000,
), f)
```
```python
name_map = {
"kernel:log_S0": r"$\ln(S_0)$",
"kernel:log_Q": r"$\ln(Q)$",
"kernel:log_omega0": r"$\ln(\omega_0)$",
}
params = list(zip(
(name_map[n] for n in gp.get_parameter_names()),
gp.get_parameter_bounds()
))
with open("correct-params.json", "w") as f:
json.dump(params, f)
```
```python
```
|
j-fariaREPO_NAMEkimaPATH_START.@kima_extracted@kima-master@celerite@paper@figures@simulated@correct.ipynb@.PATH_END.py
|
{
"filename": "test_transforms.py",
"repo_name": "pytorch/vision",
"repo_path": "vision_extracted/vision-main/test/test_transforms.py",
"type": "Python"
}
|
import math
import os
import random
import re
import sys
from functools import partial
import numpy as np
import pytest
import torch
import torchvision.transforms as transforms
import torchvision.transforms._functional_tensor as F_t
import torchvision.transforms.functional as F
from PIL import Image
from torch._utils_internal import get_file_path_2
try:
import accimage
except ImportError:
accimage = None
try:
from scipy import stats
except ImportError:
stats = None
from common_utils import assert_equal, cycle_over, float_dtypes, int_dtypes
GRACE_HOPPER = get_file_path_2(
os.path.dirname(os.path.abspath(__file__)), "assets", "encode_jpeg", "grace_hopper_517x606.jpg"
)
def _get_grayscale_test_image(img, fill=None):
img = img.convert("L")
fill = (fill[0],) if isinstance(fill, tuple) else fill
return img, fill
class TestConvertImageDtype:
@pytest.mark.parametrize("input_dtype, output_dtype", cycle_over(float_dtypes()))
def test_float_to_float(self, input_dtype, output_dtype):
input_image = torch.tensor((0.0, 1.0), dtype=input_dtype)
transform = transforms.ConvertImageDtype(output_dtype)
transform_script = torch.jit.script(F.convert_image_dtype)
output_image = transform(input_image)
output_image_script = transform_script(input_image, output_dtype)
torch.testing.assert_close(output_image_script, output_image, rtol=0.0, atol=1e-6)
actual_min, actual_max = output_image.tolist()
desired_min, desired_max = 0.0, 1.0
assert abs(actual_min - desired_min) < 1e-7
assert abs(actual_max - desired_max) < 1e-7
@pytest.mark.parametrize("input_dtype", float_dtypes())
@pytest.mark.parametrize("output_dtype", int_dtypes())
def test_float_to_int(self, input_dtype, output_dtype):
input_image = torch.tensor((0.0, 1.0), dtype=input_dtype)
transform = transforms.ConvertImageDtype(output_dtype)
transform_script = torch.jit.script(F.convert_image_dtype)
if (input_dtype == torch.float32 and output_dtype in (torch.int32, torch.int64)) or (
input_dtype == torch.float64 and output_dtype == torch.int64
):
with pytest.raises(RuntimeError):
transform(input_image)
else:
output_image = transform(input_image)
output_image_script = transform_script(input_image, output_dtype)
torch.testing.assert_close(output_image_script, output_image, rtol=0.0, atol=1e-6)
actual_min, actual_max = output_image.tolist()
desired_min, desired_max = 0, torch.iinfo(output_dtype).max
assert actual_min == desired_min
assert actual_max == desired_max
@pytest.mark.parametrize("input_dtype", int_dtypes())
@pytest.mark.parametrize("output_dtype", float_dtypes())
def test_int_to_float(self, input_dtype, output_dtype):
input_image = torch.tensor((0, torch.iinfo(input_dtype).max), dtype=input_dtype)
transform = transforms.ConvertImageDtype(output_dtype)
transform_script = torch.jit.script(F.convert_image_dtype)
output_image = transform(input_image)
output_image_script = transform_script(input_image, output_dtype)
torch.testing.assert_close(output_image_script, output_image, rtol=0.0, atol=1e-6)
actual_min, actual_max = output_image.tolist()
desired_min, desired_max = 0.0, 1.0
assert abs(actual_min - desired_min) < 1e-7
assert actual_min >= desired_min
assert abs(actual_max - desired_max) < 1e-7
assert actual_max <= desired_max
@pytest.mark.parametrize("input_dtype, output_dtype", cycle_over(int_dtypes()))
def test_dtype_int_to_int(self, input_dtype, output_dtype):
input_max = torch.iinfo(input_dtype).max
input_image = torch.tensor((0, input_max), dtype=input_dtype)
output_max = torch.iinfo(output_dtype).max
transform = transforms.ConvertImageDtype(output_dtype)
transform_script = torch.jit.script(F.convert_image_dtype)
output_image = transform(input_image)
output_image_script = transform_script(input_image, output_dtype)
torch.testing.assert_close(
output_image_script,
output_image,
rtol=0.0,
atol=1e-6,
msg=f"{output_image_script} vs {output_image}",
)
actual_min, actual_max = output_image.tolist()
desired_min, desired_max = 0, output_max
# see https://github.com/pytorch/vision/pull/2078#issuecomment-641036236 for details
if input_max >= output_max:
error_term = 0
else:
error_term = 1 - (torch.iinfo(output_dtype).max + 1) // (torch.iinfo(input_dtype).max + 1)
assert actual_min == desired_min
assert actual_max == (desired_max + error_term)
@pytest.mark.parametrize("input_dtype, output_dtype", cycle_over(int_dtypes()))
def test_int_to_int_consistency(self, input_dtype, output_dtype):
input_max = torch.iinfo(input_dtype).max
input_image = torch.tensor((0, input_max), dtype=input_dtype)
output_max = torch.iinfo(output_dtype).max
if output_max <= input_max:
return
transform = transforms.ConvertImageDtype(output_dtype)
inverse_transfrom = transforms.ConvertImageDtype(input_dtype)
output_image = inverse_transfrom(transform(input_image))
actual_min, actual_max = output_image.tolist()
desired_min, desired_max = 0, input_max
assert actual_min == desired_min
assert actual_max == desired_max
@pytest.mark.skipif(accimage is None, reason="accimage not available")
class TestAccImage:
def test_accimage_to_tensor(self):
trans = transforms.PILToTensor()
expected_output = trans(Image.open(GRACE_HOPPER).convert("RGB"))
output = trans(accimage.Image(GRACE_HOPPER))
torch.testing.assert_close(output, expected_output)
def test_accimage_pil_to_tensor(self):
trans = transforms.PILToTensor()
expected_output = trans(Image.open(GRACE_HOPPER).convert("RGB"))
output = trans(accimage.Image(GRACE_HOPPER))
assert expected_output.size() == output.size()
torch.testing.assert_close(output, expected_output)
def test_accimage_resize(self):
trans = transforms.Compose(
[
transforms.Resize(256, interpolation=Image.LINEAR),
transforms.PILToTensor(),
transforms.ConvertImageDtype(dtype=torch.float),
]
)
# Checking if Compose, Resize and ToTensor can be printed as string
trans.__repr__()
expected_output = trans(Image.open(GRACE_HOPPER).convert("RGB"))
output = trans(accimage.Image(GRACE_HOPPER))
assert expected_output.size() == output.size()
assert np.abs((expected_output - output).mean()) < 1e-3
assert (expected_output - output).var() < 1e-5
# note the high absolute tolerance
torch.testing.assert_close(output.numpy(), expected_output.numpy(), rtol=1e-5, atol=5e-2)
def test_accimage_crop(self):
trans = transforms.Compose(
[transforms.CenterCrop(256), transforms.PILToTensor(), transforms.ConvertImageDtype(dtype=torch.float)]
)
# Checking if Compose, CenterCrop and ToTensor can be printed as string
trans.__repr__()
expected_output = trans(Image.open(GRACE_HOPPER).convert("RGB"))
output = trans(accimage.Image(GRACE_HOPPER))
assert expected_output.size() == output.size()
torch.testing.assert_close(output, expected_output)
class TestToTensor:
@pytest.mark.parametrize("channels", [1, 3, 4])
def test_to_tensor(self, channels):
height, width = 4, 4
trans = transforms.ToTensor()
np_rng = np.random.RandomState(0)
input_data = torch.ByteTensor(channels, height, width).random_(0, 255).float().div_(255)
img = transforms.ToPILImage()(input_data)
output = trans(img)
torch.testing.assert_close(output, input_data)
ndarray = np_rng.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8)
output = trans(ndarray)
expected_output = ndarray.transpose((2, 0, 1)) / 255.0
torch.testing.assert_close(output.numpy(), expected_output, check_dtype=False)
ndarray = np_rng.rand(height, width, channels).astype(np.float32)
output = trans(ndarray)
expected_output = ndarray.transpose((2, 0, 1))
torch.testing.assert_close(output.numpy(), expected_output, check_dtype=False)
# separate test for mode '1' PIL images
input_data = torch.ByteTensor(1, height, width).bernoulli_()
img = transforms.ToPILImage()(input_data.mul(255)).convert("1")
output = trans(img)
torch.testing.assert_close(input_data, output, check_dtype=False)
def test_to_tensor_errors(self):
height, width = 4, 4
trans = transforms.ToTensor()
np_rng = np.random.RandomState(0)
with pytest.raises(TypeError):
trans(np_rng.rand(1, height, width).tolist())
with pytest.raises(ValueError):
trans(np_rng.rand(height))
with pytest.raises(ValueError):
trans(np_rng.rand(1, 1, height, width))
@pytest.mark.parametrize("dtype", [torch.float16, torch.float, torch.double])
def test_to_tensor_with_other_default_dtypes(self, dtype):
np_rng = np.random.RandomState(0)
current_def_dtype = torch.get_default_dtype()
t = transforms.ToTensor()
np_arr = np_rng.randint(0, 255, (32, 32, 3), dtype=np.uint8)
img = Image.fromarray(np_arr)
torch.set_default_dtype(dtype)
res = t(img)
assert res.dtype == dtype, f"{res.dtype} vs {dtype}"
torch.set_default_dtype(current_def_dtype)
@pytest.mark.parametrize("channels", [1, 3, 4])
def test_pil_to_tensor(self, channels):
height, width = 4, 4
trans = transforms.PILToTensor()
np_rng = np.random.RandomState(0)
input_data = torch.ByteTensor(channels, height, width).random_(0, 255)
img = transforms.ToPILImage()(input_data)
output = trans(img)
torch.testing.assert_close(input_data, output)
input_data = np_rng.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8)
img = transforms.ToPILImage()(input_data)
output = trans(img)
expected_output = input_data.transpose((2, 0, 1))
torch.testing.assert_close(output.numpy(), expected_output)
input_data = torch.as_tensor(np_rng.rand(channels, height, width).astype(np.float32))
img = transforms.ToPILImage()(input_data) # CHW -> HWC and (* 255).byte()
output = trans(img) # HWC -> CHW
expected_output = (input_data * 255).byte()
torch.testing.assert_close(output, expected_output)
# separate test for mode '1' PIL images
input_data = torch.ByteTensor(1, height, width).bernoulli_()
img = transforms.ToPILImage()(input_data.mul(255)).convert("1")
output = trans(img).view(torch.uint8).bool().to(torch.uint8)
torch.testing.assert_close(input_data, output)
def test_pil_to_tensor_errors(self):
height, width = 4, 4
trans = transforms.PILToTensor()
np_rng = np.random.RandomState(0)
with pytest.raises(TypeError):
trans(np_rng.rand(1, height, width).tolist())
with pytest.raises(TypeError):
trans(np_rng.rand(1, height, width))
def test_randomresized_params():
height = random.randint(24, 32) * 2
width = random.randint(24, 32) * 2
img = torch.ones(3, height, width)
to_pil_image = transforms.ToPILImage()
img = to_pil_image(img)
size = 100
epsilon = 0.05
min_scale = 0.25
for _ in range(10):
scale_min = max(round(random.random(), 2), min_scale)
scale_range = (scale_min, scale_min + round(random.random(), 2))
aspect_min = max(round(random.random(), 2), epsilon)
aspect_ratio_range = (aspect_min, aspect_min + round(random.random(), 2))
randresizecrop = transforms.RandomResizedCrop(size, scale_range, aspect_ratio_range, antialias=True)
i, j, h, w = randresizecrop.get_params(img, scale_range, aspect_ratio_range)
aspect_ratio_obtained = w / h
assert (
min(aspect_ratio_range) - epsilon <= aspect_ratio_obtained
and aspect_ratio_obtained <= max(aspect_ratio_range) + epsilon
) or aspect_ratio_obtained == 1.0
assert isinstance(i, int)
assert isinstance(j, int)
assert isinstance(h, int)
assert isinstance(w, int)
@pytest.mark.parametrize(
"height, width",
[
# height, width
# square image
(28, 28),
(27, 27),
# rectangular image: h < w
(28, 34),
(29, 35),
# rectangular image: h > w
(34, 28),
(35, 29),
],
)
@pytest.mark.parametrize(
"osize",
[
# single integer
22,
27,
28,
36,
# single integer in tuple/list
[
22,
],
(27,),
],
)
@pytest.mark.parametrize("max_size", (None, 37, 1000))
def test_resize(height, width, osize, max_size):
img = Image.new("RGB", size=(width, height), color=127)
t = transforms.Resize(osize, max_size=max_size, antialias=True)
result = t(img)
msg = f"{height}, {width} - {osize} - {max_size}"
osize = osize[0] if isinstance(osize, (list, tuple)) else osize
# If size is an int, smaller edge of the image will be matched to this number.
# i.e, if height > width, then image will be rescaled to (size * height / width, size).
if height < width:
exp_w, exp_h = (int(osize * width / height), osize) # (w, h)
if max_size is not None and max_size < exp_w:
exp_w, exp_h = max_size, int(max_size * exp_h / exp_w)
assert result.size == (exp_w, exp_h), msg
elif width < height:
exp_w, exp_h = (osize, int(osize * height / width)) # (w, h)
if max_size is not None and max_size < exp_h:
exp_w, exp_h = int(max_size * exp_w / exp_h), max_size
assert result.size == (exp_w, exp_h), msg
else:
exp_w, exp_h = (osize, osize) # (w, h)
if max_size is not None and max_size < osize:
exp_w, exp_h = max_size, max_size
assert result.size == (exp_w, exp_h), msg
@pytest.mark.parametrize(
"height, width",
[
# height, width
# square image
(28, 28),
(27, 27),
# rectangular image: h < w
(28, 34),
(29, 35),
# rectangular image: h > w
(34, 28),
(35, 29),
],
)
@pytest.mark.parametrize(
"osize",
[
# two integers sequence output
[22, 22],
[22, 28],
[22, 36],
[27, 22],
[36, 22],
[28, 28],
[28, 37],
[37, 27],
[37, 37],
],
)
def test_resize_sequence_output(height, width, osize):
img = Image.new("RGB", size=(width, height), color=127)
oheight, owidth = osize
t = transforms.Resize(osize, antialias=True)
result = t(img)
assert (owidth, oheight) == result.size
def test_resize_antialias_error():
osize = [37, 37]
img = Image.new("RGB", size=(35, 29), color=127)
with pytest.warns(UserWarning, match=r"Anti-alias option is always applied for PIL Image input"):
t = transforms.Resize(osize, antialias=False)
t(img)
@pytest.mark.parametrize("height, width", ((32, 64), (64, 32)))
def test_resize_size_equals_small_edge_size(height, width):
# Non-regression test for https://github.com/pytorch/vision/issues/5405
# max_size used to be ignored if size == small_edge_size
max_size = 40
img = Image.new("RGB", size=(width, height), color=127)
small_edge = min(height, width)
t = transforms.Resize(small_edge, max_size=max_size, antialias=True)
result = t(img)
assert max(result.size) == max_size
def test_resize_equal_input_output_sizes():
# Regression test for https://github.com/pytorch/vision/issues/7518
height, width = 28, 27
img = Image.new("RGB", size=(width, height))
t = transforms.Resize((height, width), antialias=True)
result = t(img)
assert result is img
class TestPad:
@pytest.mark.parametrize("fill", [85, 85.0])
def test_pad(self, fill):
height = random.randint(10, 32) * 2
width = random.randint(10, 32) * 2
img = torch.ones(3, height, width, dtype=torch.uint8)
padding = random.randint(1, 20)
result = transforms.Compose(
[
transforms.ToPILImage(),
transforms.Pad(padding, fill=fill),
transforms.PILToTensor(),
]
)(img)
assert result.size(1) == height + 2 * padding
assert result.size(2) == width + 2 * padding
# check that all elements in the padded region correspond
# to the pad value
h_padded = result[:, :padding, :]
w_padded = result[:, :, :padding]
torch.testing.assert_close(h_padded, torch.full_like(h_padded, fill_value=fill), rtol=0.0, atol=0.0)
torch.testing.assert_close(w_padded, torch.full_like(w_padded, fill_value=fill), rtol=0.0, atol=0.0)
pytest.raises(ValueError, transforms.Pad(padding, fill=(1, 2)), transforms.ToPILImage()(img))
def test_pad_with_tuple_of_pad_values(self):
height = random.randint(10, 32) * 2
width = random.randint(10, 32) * 2
img = transforms.ToPILImage()(torch.ones(3, height, width))
padding = tuple(random.randint(1, 20) for _ in range(2))
output = transforms.Pad(padding)(img)
assert output.size == (width + padding[0] * 2, height + padding[1] * 2)
padding = [random.randint(1, 20) for _ in range(4)]
output = transforms.Pad(padding)(img)
assert output.size[0] == width + padding[0] + padding[2]
assert output.size[1] == height + padding[1] + padding[3]
# Checking if Padding can be printed as string
transforms.Pad(padding).__repr__()
def test_pad_with_non_constant_padding_modes(self):
"""Unit tests for edge, reflect, symmetric padding"""
img = torch.zeros(3, 27, 27).byte()
img[:, :, 0] = 1 # Constant value added to leftmost edge
img = transforms.ToPILImage()(img)
img = F.pad(img, 1, (200, 200, 200))
# pad 3 to all sidess
edge_padded_img = F.pad(img, 3, padding_mode="edge")
# First 6 elements of leftmost edge in the middle of the image, values are in order:
# edge_pad, edge_pad, edge_pad, constant_pad, constant value added to leftmost edge, 0
edge_middle_slice = np.asarray(edge_padded_img).transpose(2, 0, 1)[0][17][:6]
assert_equal(edge_middle_slice, np.asarray([200, 200, 200, 200, 1, 0], dtype=np.uint8))
assert transforms.PILToTensor()(edge_padded_img).size() == (3, 35, 35)
# Pad 3 to left/right, 2 to top/bottom
reflect_padded_img = F.pad(img, (3, 2), padding_mode="reflect")
# First 6 elements of leftmost edge in the middle of the image, values are in order:
# reflect_pad, reflect_pad, reflect_pad, constant_pad, constant value added to leftmost edge, 0
reflect_middle_slice = np.asarray(reflect_padded_img).transpose(2, 0, 1)[0][17][:6]
assert_equal(reflect_middle_slice, np.asarray([0, 0, 1, 200, 1, 0], dtype=np.uint8))
assert transforms.PILToTensor()(reflect_padded_img).size() == (3, 33, 35)
# Pad 3 to left, 2 to top, 2 to right, 1 to bottom
symmetric_padded_img = F.pad(img, (3, 2, 2, 1), padding_mode="symmetric")
# First 6 elements of leftmost edge in the middle of the image, values are in order:
# sym_pad, sym_pad, sym_pad, constant_pad, constant value added to leftmost edge, 0
symmetric_middle_slice = np.asarray(symmetric_padded_img).transpose(2, 0, 1)[0][17][:6]
assert_equal(symmetric_middle_slice, np.asarray([0, 1, 200, 200, 1, 0], dtype=np.uint8))
assert transforms.PILToTensor()(symmetric_padded_img).size() == (3, 32, 34)
# Check negative padding explicitly for symmetric case, since it is not
# implemented for tensor case to compare to
# Crop 1 to left, pad 2 to top, pad 3 to right, crop 3 to bottom
symmetric_padded_img_neg = F.pad(img, (-1, 2, 3, -3), padding_mode="symmetric")
symmetric_neg_middle_left = np.asarray(symmetric_padded_img_neg).transpose(2, 0, 1)[0][17][:3]
symmetric_neg_middle_right = np.asarray(symmetric_padded_img_neg).transpose(2, 0, 1)[0][17][-4:]
assert_equal(symmetric_neg_middle_left, np.asarray([1, 0, 0], dtype=np.uint8))
assert_equal(symmetric_neg_middle_right, np.asarray([200, 200, 0, 0], dtype=np.uint8))
assert transforms.PILToTensor()(symmetric_padded_img_neg).size() == (3, 28, 31)
def test_pad_raises_with_invalid_pad_sequence_len(self):
with pytest.raises(ValueError):
transforms.Pad(())
with pytest.raises(ValueError):
transforms.Pad((1, 2, 3))
with pytest.raises(ValueError):
transforms.Pad((1, 2, 3, 4, 5))
def test_pad_with_mode_F_images(self):
pad = 2
transform = transforms.Pad(pad)
img = Image.new("F", (10, 10))
padded_img = transform(img)
assert_equal(padded_img.size, [edge_size + 2 * pad for edge_size in img.size])
@pytest.mark.parametrize(
"fn, trans, kwargs",
[
(F.invert, transforms.RandomInvert, {}),
(F.posterize, transforms.RandomPosterize, {"bits": 4}),
(F.solarize, transforms.RandomSolarize, {"threshold": 192}),
(F.adjust_sharpness, transforms.RandomAdjustSharpness, {"sharpness_factor": 2.0}),
(F.autocontrast, transforms.RandomAutocontrast, {}),
(F.equalize, transforms.RandomEqualize, {}),
(F.vflip, transforms.RandomVerticalFlip, {}),
(F.hflip, transforms.RandomHorizontalFlip, {}),
(partial(F.to_grayscale, num_output_channels=3), transforms.RandomGrayscale, {}),
],
)
@pytest.mark.parametrize("seed", range(10))
@pytest.mark.parametrize("p", (0, 1))
def test_randomness(fn, trans, kwargs, seed, p):
torch.manual_seed(seed)
img = transforms.ToPILImage()(torch.rand(3, 16, 18))
expected_transformed_img = fn(img, **kwargs)
randomly_transformed_img = trans(p=p, **kwargs)(img)
if p == 0:
assert randomly_transformed_img == img
elif p == 1:
assert randomly_transformed_img == expected_transformed_img
trans(**kwargs).__repr__()
def test_autocontrast_equal_minmax():
img_tensor = torch.tensor([[[10]], [[128]], [[245]]], dtype=torch.uint8).expand(3, 32, 32)
img_pil = F.to_pil_image(img_tensor)
img_tensor = F.autocontrast(img_tensor)
img_pil = F.autocontrast(img_pil)
torch.testing.assert_close(img_tensor, F.pil_to_tensor(img_pil))
class TestToPil:
def _get_1_channel_tensor_various_types():
img_data_float = torch.Tensor(1, 4, 4).uniform_()
expected_output = img_data_float.mul(255).int().float().div(255).numpy()
yield img_data_float, expected_output, "L"
img_data_byte = torch.ByteTensor(1, 4, 4).random_(0, 255)
expected_output = img_data_byte.float().div(255.0).numpy()
yield img_data_byte, expected_output, "L"
img_data_short = torch.ShortTensor(1, 4, 4).random_()
expected_output = img_data_short.numpy()
yield img_data_short, expected_output, "I;16" if sys.byteorder == "little" else "I;16B"
img_data_int = torch.IntTensor(1, 4, 4).random_()
expected_output = img_data_int.numpy()
yield img_data_int, expected_output, "I"
def _get_2d_tensor_various_types():
img_data_float = torch.Tensor(4, 4).uniform_()
expected_output = img_data_float.mul(255).int().float().div(255).numpy()
yield img_data_float, expected_output, "L"
img_data_byte = torch.ByteTensor(4, 4).random_(0, 255)
expected_output = img_data_byte.float().div(255.0).numpy()
yield img_data_byte, expected_output, "L"
img_data_short = torch.ShortTensor(4, 4).random_()
expected_output = img_data_short.numpy()
yield img_data_short, expected_output, "I;16" if sys.byteorder == "little" else "I;16B"
img_data_int = torch.IntTensor(4, 4).random_()
expected_output = img_data_int.numpy()
yield img_data_int, expected_output, "I"
@pytest.mark.parametrize("with_mode", [False, True])
@pytest.mark.parametrize("img_data, expected_output, expected_mode", _get_1_channel_tensor_various_types())
def test_1_channel_tensor_to_pil_image(self, with_mode, img_data, expected_output, expected_mode):
transform = transforms.ToPILImage(mode=expected_mode) if with_mode else transforms.ToPILImage()
to_tensor = transforms.ToTensor()
img = transform(img_data)
assert img.mode == expected_mode
torch.testing.assert_close(expected_output, to_tensor(img).numpy())
def test_1_channel_float_tensor_to_pil_image(self):
img_data = torch.Tensor(1, 4, 4).uniform_()
# 'F' mode for torch.FloatTensor
img_F_mode = transforms.ToPILImage(mode="F")(img_data)
assert img_F_mode.mode == "F"
torch.testing.assert_close(
np.array(Image.fromarray(img_data.squeeze(0).numpy(), mode="F")), np.array(img_F_mode)
)
@pytest.mark.parametrize("with_mode", [False, True])
@pytest.mark.parametrize(
"img_data, expected_mode",
[
(torch.Tensor(4, 4, 1).uniform_().numpy(), "L"),
(torch.ByteTensor(4, 4, 1).random_(0, 255).numpy(), "L"),
(torch.ShortTensor(4, 4, 1).random_().numpy(), "I;16" if sys.byteorder == "little" else "I;16B"),
(torch.IntTensor(4, 4, 1).random_().numpy(), "I"),
],
)
def test_1_channel_ndarray_to_pil_image(self, with_mode, img_data, expected_mode):
transform = transforms.ToPILImage(mode=expected_mode) if with_mode else transforms.ToPILImage()
img = transform(img_data)
assert img.mode == expected_mode
if np.issubdtype(img_data.dtype, np.floating):
img_data = (img_data * 255).astype(np.uint8)
# note: we explicitly convert img's dtype because pytorch doesn't support uint16
# and otherwise assert_close wouldn't be able to construct a tensor from the uint16 array
torch.testing.assert_close(img_data[:, :, 0], np.asarray(img).astype(img_data.dtype))
@pytest.mark.parametrize("expected_mode", [None, "LA"])
def test_2_channel_ndarray_to_pil_image(self, expected_mode):
img_data = torch.ByteTensor(4, 4, 2).random_(0, 255).numpy()
if expected_mode is None:
img = transforms.ToPILImage()(img_data)
assert img.mode == "LA" # default should assume LA
else:
img = transforms.ToPILImage(mode=expected_mode)(img_data)
assert img.mode == expected_mode
split = img.split()
for i in range(2):
torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]))
def test_2_channel_ndarray_to_pil_image_error(self):
img_data = torch.ByteTensor(4, 4, 2).random_(0, 255).numpy()
transforms.ToPILImage().__repr__()
# should raise if we try a mode for 4 or 1 or 3 channel images
with pytest.raises(ValueError, match=r"Only modes \['LA'\] are supported for 2D inputs"):
transforms.ToPILImage(mode="RGBA")(img_data)
with pytest.raises(ValueError, match=r"Only modes \['LA'\] are supported for 2D inputs"):
transforms.ToPILImage(mode="P")(img_data)
with pytest.raises(ValueError, match=r"Only modes \['LA'\] are supported for 2D inputs"):
transforms.ToPILImage(mode="RGB")(img_data)
@pytest.mark.parametrize("expected_mode", [None, "LA"])
def test_2_channel_tensor_to_pil_image(self, expected_mode):
img_data = torch.Tensor(2, 4, 4).uniform_()
expected_output = img_data.mul(255).int().float().div(255)
if expected_mode is None:
img = transforms.ToPILImage()(img_data)
assert img.mode == "LA" # default should assume LA
else:
img = transforms.ToPILImage(mode=expected_mode)(img_data)
assert img.mode == expected_mode
split = img.split()
for i in range(2):
torch.testing.assert_close(expected_output[i].numpy(), F.to_tensor(split[i]).squeeze(0).numpy())
def test_2_channel_tensor_to_pil_image_error(self):
img_data = torch.Tensor(2, 4, 4).uniform_()
# should raise if we try a mode for 4 or 1 or 3 channel images
with pytest.raises(ValueError, match=r"Only modes \['LA'\] are supported for 2D inputs"):
transforms.ToPILImage(mode="RGBA")(img_data)
with pytest.raises(ValueError, match=r"Only modes \['LA'\] are supported for 2D inputs"):
transforms.ToPILImage(mode="P")(img_data)
with pytest.raises(ValueError, match=r"Only modes \['LA'\] are supported for 2D inputs"):
transforms.ToPILImage(mode="RGB")(img_data)
@pytest.mark.parametrize("with_mode", [False, True])
@pytest.mark.parametrize("img_data, expected_output, expected_mode", _get_2d_tensor_various_types())
def test_2d_tensor_to_pil_image(self, with_mode, img_data, expected_output, expected_mode):
transform = transforms.ToPILImage(mode=expected_mode) if with_mode else transforms.ToPILImage()
to_tensor = transforms.ToTensor()
img = transform(img_data)
assert img.mode == expected_mode
torch.testing.assert_close(expected_output, to_tensor(img).numpy()[0])
@pytest.mark.parametrize("with_mode", [False, True])
@pytest.mark.parametrize(
"img_data, expected_mode",
[
(torch.Tensor(4, 4).uniform_().numpy(), "L"),
(torch.ByteTensor(4, 4).random_(0, 255).numpy(), "L"),
(torch.ShortTensor(4, 4).random_().numpy(), "I;16" if sys.byteorder == "little" else "I;16B"),
(torch.IntTensor(4, 4).random_().numpy(), "I"),
],
)
def test_2d_ndarray_to_pil_image(self, with_mode, img_data, expected_mode):
transform = transforms.ToPILImage(mode=expected_mode) if with_mode else transforms.ToPILImage()
img = transform(img_data)
assert img.mode == expected_mode
if np.issubdtype(img_data.dtype, np.floating):
img_data = (img_data * 255).astype(np.uint8)
np.testing.assert_allclose(img_data, img)
@pytest.mark.parametrize("expected_mode", [None, "RGB", "HSV", "YCbCr"])
def test_3_channel_tensor_to_pil_image(self, expected_mode):
img_data = torch.Tensor(3, 4, 4).uniform_()
expected_output = img_data.mul(255).int().float().div(255)
if expected_mode is None:
img = transforms.ToPILImage()(img_data)
assert img.mode == "RGB" # default should assume RGB
else:
img = transforms.ToPILImage(mode=expected_mode)(img_data)
assert img.mode == expected_mode
split = img.split()
for i in range(3):
torch.testing.assert_close(expected_output[i].numpy(), F.to_tensor(split[i]).squeeze(0).numpy())
def test_3_channel_tensor_to_pil_image_error(self):
img_data = torch.Tensor(3, 4, 4).uniform_()
error_message_3d = r"Only modes \['RGB', 'YCbCr', 'HSV'\] are supported for 3D inputs"
# should raise if we try a mode for 4 or 1 or 2 channel images
with pytest.raises(ValueError, match=error_message_3d):
transforms.ToPILImage(mode="RGBA")(img_data)
with pytest.raises(ValueError, match=error_message_3d):
transforms.ToPILImage(mode="P")(img_data)
with pytest.raises(ValueError, match=error_message_3d):
transforms.ToPILImage(mode="LA")(img_data)
with pytest.raises(ValueError, match=r"pic should be 2/3 dimensional. Got \d+ dimensions."):
transforms.ToPILImage()(torch.Tensor(1, 3, 4, 4).uniform_())
@pytest.mark.parametrize("expected_mode", [None, "RGB", "HSV", "YCbCr"])
def test_3_channel_ndarray_to_pil_image(self, expected_mode):
img_data = torch.ByteTensor(4, 4, 3).random_(0, 255).numpy()
if expected_mode is None:
img = transforms.ToPILImage()(img_data)
assert img.mode == "RGB" # default should assume RGB
else:
img = transforms.ToPILImage(mode=expected_mode)(img_data)
assert img.mode == expected_mode
split = img.split()
for i in range(3):
torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]))
def test_3_channel_ndarray_to_pil_image_error(self):
img_data = torch.ByteTensor(4, 4, 3).random_(0, 255).numpy()
# Checking if ToPILImage can be printed as string
transforms.ToPILImage().__repr__()
error_message_3d = r"Only modes \['RGB', 'YCbCr', 'HSV'\] are supported for 3D inputs"
# should raise if we try a mode for 4 or 1 or 2 channel images
with pytest.raises(ValueError, match=error_message_3d):
transforms.ToPILImage(mode="RGBA")(img_data)
with pytest.raises(ValueError, match=error_message_3d):
transforms.ToPILImage(mode="P")(img_data)
with pytest.raises(ValueError, match=error_message_3d):
transforms.ToPILImage(mode="LA")(img_data)
@pytest.mark.parametrize("expected_mode", [None, "RGBA", "CMYK", "RGBX"])
def test_4_channel_tensor_to_pil_image(self, expected_mode):
img_data = torch.Tensor(4, 4, 4).uniform_()
expected_output = img_data.mul(255).int().float().div(255)
if expected_mode is None:
img = transforms.ToPILImage()(img_data)
assert img.mode == "RGBA" # default should assume RGBA
else:
img = transforms.ToPILImage(mode=expected_mode)(img_data)
assert img.mode == expected_mode
split = img.split()
for i in range(4):
torch.testing.assert_close(expected_output[i].numpy(), F.to_tensor(split[i]).squeeze(0).numpy())
def test_4_channel_tensor_to_pil_image_error(self):
img_data = torch.Tensor(4, 4, 4).uniform_()
error_message_4d = r"Only modes \['RGBA', 'CMYK', 'RGBX'\] are supported for 4D inputs"
# should raise if we try a mode for 3 or 1 or 2 channel images
with pytest.raises(ValueError, match=error_message_4d):
transforms.ToPILImage(mode="RGB")(img_data)
with pytest.raises(ValueError, match=error_message_4d):
transforms.ToPILImage(mode="P")(img_data)
with pytest.raises(ValueError, match=error_message_4d):
transforms.ToPILImage(mode="LA")(img_data)
@pytest.mark.parametrize("expected_mode", [None, "RGBA", "CMYK", "RGBX"])
def test_4_channel_ndarray_to_pil_image(self, expected_mode):
img_data = torch.ByteTensor(4, 4, 4).random_(0, 255).numpy()
if expected_mode is None:
img = transforms.ToPILImage()(img_data)
assert img.mode == "RGBA" # default should assume RGBA
else:
img = transforms.ToPILImage(mode=expected_mode)(img_data)
assert img.mode == expected_mode
split = img.split()
for i in range(4):
torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]))
def test_4_channel_ndarray_to_pil_image_error(self):
img_data = torch.ByteTensor(4, 4, 4).random_(0, 255).numpy()
error_message_4d = r"Only modes \['RGBA', 'CMYK', 'RGBX'\] are supported for 4D inputs"
# should raise if we try a mode for 3 or 1 or 2 channel images
with pytest.raises(ValueError, match=error_message_4d):
transforms.ToPILImage(mode="RGB")(img_data)
with pytest.raises(ValueError, match=error_message_4d):
transforms.ToPILImage(mode="P")(img_data)
with pytest.raises(ValueError, match=error_message_4d):
transforms.ToPILImage(mode="LA")(img_data)
def test_ndarray_bad_types_to_pil_image(self):
trans = transforms.ToPILImage()
reg_msg = r"Input type \w+ is not supported"
with pytest.raises(TypeError, match=reg_msg):
trans(np.ones([4, 4, 1], np.int64))
with pytest.raises(TypeError, match=reg_msg):
trans(np.ones([4, 4, 1], np.uint16))
with pytest.raises(TypeError, match=reg_msg):
trans(np.ones([4, 4, 1], np.uint32))
with pytest.raises(ValueError, match=r"pic should be 2/3 dimensional. Got \d+ dimensions."):
transforms.ToPILImage()(np.ones([1, 4, 4, 3]))
with pytest.raises(ValueError, match=r"pic should not have > 4 channels. Got \d+ channels."):
transforms.ToPILImage()(np.ones([4, 4, 6]))
def test_tensor_bad_types_to_pil_image(self):
with pytest.raises(ValueError, match=r"pic should be 2/3 dimensional. Got \d+ dimensions."):
transforms.ToPILImage()(torch.ones(1, 3, 4, 4))
with pytest.raises(ValueError, match=r"pic should not have > 4 channels. Got \d+ channels."):
transforms.ToPILImage()(torch.ones(6, 4, 4))
def test_adjust_brightness():
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
x_pil = Image.fromarray(x_np, mode="RGB")
# test 0
y_pil = F.adjust_brightness(x_pil, 1)
y_np = np.array(y_pil)
torch.testing.assert_close(y_np, x_np)
# test 1
y_pil = F.adjust_brightness(x_pil, 0.5)
y_np = np.array(y_pil)
y_ans = [0, 2, 6, 27, 67, 113, 18, 4, 117, 45, 127, 0]
y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
torch.testing.assert_close(y_np, y_ans)
# test 2
y_pil = F.adjust_brightness(x_pil, 2)
y_np = np.array(y_pil)
y_ans = [0, 10, 26, 108, 255, 255, 74, 16, 255, 180, 255, 2]
y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
torch.testing.assert_close(y_np, y_ans)
def test_adjust_contrast():
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
x_pil = Image.fromarray(x_np, mode="RGB")
# test 0
y_pil = F.adjust_contrast(x_pil, 1)
y_np = np.array(y_pil)
torch.testing.assert_close(y_np, x_np)
# test 1
y_pil = F.adjust_contrast(x_pil, 0.5)
y_np = np.array(y_pil)
y_ans = [43, 45, 49, 70, 110, 156, 61, 47, 160, 88, 170, 43]
y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
torch.testing.assert_close(y_np, y_ans)
# test 2
y_pil = F.adjust_contrast(x_pil, 2)
y_np = np.array(y_pil)
y_ans = [0, 0, 0, 22, 184, 255, 0, 0, 255, 94, 255, 0]
y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
torch.testing.assert_close(y_np, y_ans)
def test_adjust_hue():
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
x_pil = Image.fromarray(x_np, mode="RGB")
with pytest.raises(ValueError):
F.adjust_hue(x_pil, -0.7)
F.adjust_hue(x_pil, 1)
# test 0: almost same as x_data but not exact.
# probably because hsv <-> rgb floating point ops
y_pil = F.adjust_hue(x_pil, 0)
y_np = np.array(y_pil)
y_ans = [0, 5, 13, 54, 139, 226, 35, 8, 234, 91, 255, 1]
y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
torch.testing.assert_close(y_np, y_ans)
# test 1
y_pil = F.adjust_hue(x_pil, 0.25)
y_np = np.array(y_pil)
y_ans = [13, 0, 12, 224, 54, 226, 234, 8, 99, 1, 222, 255]
y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
torch.testing.assert_close(y_np, y_ans)
# test 2
y_pil = F.adjust_hue(x_pil, -0.25)
y_np = np.array(y_pil)
y_ans = [0, 13, 2, 54, 226, 58, 8, 234, 152, 255, 43, 1]
y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
torch.testing.assert_close(y_np, y_ans)
def test_adjust_sharpness():
x_shape = [4, 4, 3]
x_data = [
75,
121,
114,
105,
97,
107,
105,
32,
66,
111,
117,
114,
99,
104,
97,
0,
0,
65,
108,
101,
120,
97,
110,
100,
101,
114,
32,
86,
114,
121,
110,
105,
111,
116,
105,
115,
0,
0,
73,
32,
108,
111,
118,
101,
32,
121,
111,
117,
]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
x_pil = Image.fromarray(x_np, mode="RGB")
# test 0
y_pil = F.adjust_sharpness(x_pil, 1)
y_np = np.array(y_pil)
torch.testing.assert_close(y_np, x_np)
# test 1
y_pil = F.adjust_sharpness(x_pil, 0.5)
y_np = np.array(y_pil)
y_ans = [
75,
121,
114,
105,
97,
107,
105,
32,
66,
111,
117,
114,
99,
104,
97,
30,
30,
74,
103,
96,
114,
97,
110,
100,
101,
114,
32,
81,
103,
108,
102,
101,
107,
116,
105,
115,
0,
0,
73,
32,
108,
111,
118,
101,
32,
121,
111,
117,
]
y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
torch.testing.assert_close(y_np, y_ans)
# test 2
y_pil = F.adjust_sharpness(x_pil, 2)
y_np = np.array(y_pil)
y_ans = [
75,
121,
114,
105,
97,
107,
105,
32,
66,
111,
117,
114,
99,
104,
97,
0,
0,
46,
118,
111,
132,
97,
110,
100,
101,
114,
32,
95,
135,
146,
126,
112,
119,
116,
105,
115,
0,
0,
73,
32,
108,
111,
118,
101,
32,
121,
111,
117,
]
y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
torch.testing.assert_close(y_np, y_ans)
# test 3
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
x_pil = Image.fromarray(x_np, mode="RGB")
x_th = torch.tensor(x_np.transpose(2, 0, 1))
y_pil = F.adjust_sharpness(x_pil, 2)
y_np = np.array(y_pil).transpose(2, 0, 1)
y_th = F.adjust_sharpness(x_th, 2)
torch.testing.assert_close(y_np, y_th.numpy())
def test_adjust_gamma():
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
x_pil = Image.fromarray(x_np, mode="RGB")
# test 0
y_pil = F.adjust_gamma(x_pil, 1)
y_np = np.array(y_pil)
torch.testing.assert_close(y_np, x_np)
# test 1
y_pil = F.adjust_gamma(x_pil, 0.5)
y_np = np.array(y_pil)
y_ans = [0, 35, 57, 117, 186, 241, 97, 45, 245, 152, 255, 16]
y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
torch.testing.assert_close(y_np, y_ans)
# test 2
y_pil = F.adjust_gamma(x_pil, 2)
y_np = np.array(y_pil)
y_ans = [0, 0, 0, 11, 71, 201, 5, 0, 215, 31, 255, 0]
y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape)
torch.testing.assert_close(y_np, y_ans)
def test_adjusts_L_mode():
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
x_rgb = Image.fromarray(x_np, mode="RGB")
x_l = x_rgb.convert("L")
assert F.adjust_brightness(x_l, 2).mode == "L"
assert F.adjust_saturation(x_l, 2).mode == "L"
assert F.adjust_contrast(x_l, 2).mode == "L"
assert F.adjust_hue(x_l, 0.4).mode == "L"
assert F.adjust_sharpness(x_l, 2).mode == "L"
assert F.adjust_gamma(x_l, 0.5).mode == "L"
def test_rotate():
x = np.zeros((100, 100, 3), dtype=np.uint8)
x[40, 40] = [255, 255, 255]
with pytest.raises(TypeError, match=r"img should be PIL Image"):
F.rotate(x, 10)
img = F.to_pil_image(x)
result = F.rotate(img, 45)
assert result.size == (100, 100)
r, c, ch = np.where(result)
assert all(x in r for x in [49, 50])
assert all(x in c for x in [36])
assert all(x in ch for x in [0, 1, 2])
result = F.rotate(img, 45, expand=True)
assert result.size == (142, 142)
r, c, ch = np.where(result)
assert all(x in r for x in [70, 71])
assert all(x in c for x in [57])
assert all(x in ch for x in [0, 1, 2])
result = F.rotate(img, 45, center=(40, 40))
assert result.size == (100, 100)
r, c, ch = np.where(result)
assert all(x in r for x in [40])
assert all(x in c for x in [40])
assert all(x in ch for x in [0, 1, 2])
result_a = F.rotate(img, 90)
result_b = F.rotate(img, -270)
assert_equal(np.array(result_a), np.array(result_b))
@pytest.mark.parametrize("mode", ["L", "RGB", "F"])
def test_rotate_fill(mode):
img = F.to_pil_image(np.ones((100, 100, 3), dtype=np.uint8) * 255, "RGB")
num_bands = len(mode)
wrong_num_bands = num_bands + 1
fill = 127
img_conv = img.convert(mode)
img_rot = F.rotate(img_conv, 45.0, fill=fill)
pixel = img_rot.getpixel((0, 0))
if not isinstance(pixel, tuple):
pixel = (pixel,)
assert pixel == tuple([fill] * num_bands)
with pytest.raises(ValueError):
F.rotate(img_conv, 45.0, fill=tuple([fill] * wrong_num_bands))
def test_gaussian_blur_asserts():
np_img = np.ones((100, 100, 3), dtype=np.uint8) * 255
img = F.to_pil_image(np_img, "RGB")
with pytest.raises(ValueError, match=r"If kernel_size is a sequence its length should be 2"):
F.gaussian_blur(img, [3])
with pytest.raises(ValueError, match=r"If kernel_size is a sequence its length should be 2"):
F.gaussian_blur(img, [3, 3, 3])
with pytest.raises(ValueError, match=r"Kernel size should be a tuple/list of two integers"):
transforms.GaussianBlur([3, 3, 3])
with pytest.raises(ValueError, match=r"kernel_size should have odd and positive integers"):
F.gaussian_blur(img, [4, 4])
with pytest.raises(ValueError, match=r"Kernel size value should be an odd and positive number"):
transforms.GaussianBlur([4, 4])
with pytest.raises(ValueError, match=r"kernel_size should have odd and positive integers"):
F.gaussian_blur(img, [-3, -3])
with pytest.raises(ValueError, match=r"Kernel size value should be an odd and positive number"):
transforms.GaussianBlur([-3, -3])
with pytest.raises(ValueError, match=r"If sigma is a sequence, its length should be 2"):
F.gaussian_blur(img, 3, [1, 1, 1])
with pytest.raises(ValueError, match=r"sigma should be a single number or a list/tuple with length 2"):
transforms.GaussianBlur(3, [1, 1, 1])
with pytest.raises(ValueError, match=r"sigma should have positive values"):
F.gaussian_blur(img, 3, -1.0)
with pytest.raises(ValueError, match=r"If sigma is a single number, it must be positive"):
transforms.GaussianBlur(3, -1.0)
with pytest.raises(TypeError, match=r"kernel_size should be int or a sequence of integers"):
F.gaussian_blur(img, "kernel_size_string")
with pytest.raises(ValueError, match=r"Kernel size should be a tuple/list of two integers"):
transforms.GaussianBlur("kernel_size_string")
with pytest.raises(TypeError, match=r"sigma should be either float or sequence of floats"):
F.gaussian_blur(img, 3, "sigma_string")
with pytest.raises(ValueError, match=r"sigma should be a single number or a list/tuple with length 2"):
transforms.GaussianBlur(3, "sigma_string")
def test_lambda():
trans = transforms.Lambda(lambda x: x.add(10))
x = torch.randn(10)
y = trans(x)
assert_equal(y, torch.add(x, 10))
trans = transforms.Lambda(lambda x: x.add_(10))
x = torch.randn(10)
y = trans(x)
assert_equal(y, x)
# Checking if Lambda can be printed as string
trans.__repr__()
def test_to_grayscale():
"""Unit tests for grayscale transform"""
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
x_pil = Image.fromarray(x_np, mode="RGB")
x_pil_2 = x_pil.convert("L")
gray_np = np.array(x_pil_2)
# Test Set: Grayscale an image with desired number of output channels
# Case 1: RGB -> 1 channel grayscale
trans1 = transforms.Grayscale(num_output_channels=1)
gray_pil_1 = trans1(x_pil)
gray_np_1 = np.array(gray_pil_1)
assert gray_pil_1.mode == "L", "mode should be L"
assert gray_np_1.shape == tuple(x_shape[0:2]), "should be 1 channel"
assert_equal(gray_np, gray_np_1)
# Case 2: RGB -> 3 channel grayscale
trans2 = transforms.Grayscale(num_output_channels=3)
gray_pil_2 = trans2(x_pil)
gray_np_2 = np.array(gray_pil_2)
assert gray_pil_2.mode == "RGB", "mode should be RGB"
assert gray_np_2.shape == tuple(x_shape), "should be 3 channel"
assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1])
assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2])
assert_equal(gray_np, gray_np_2[:, :, 0])
# Case 3: 1 channel grayscale -> 1 channel grayscale
trans3 = transforms.Grayscale(num_output_channels=1)
gray_pil_3 = trans3(x_pil_2)
gray_np_3 = np.array(gray_pil_3)
assert gray_pil_3.mode == "L", "mode should be L"
assert gray_np_3.shape == tuple(x_shape[0:2]), "should be 1 channel"
assert_equal(gray_np, gray_np_3)
# Case 4: 1 channel grayscale -> 3 channel grayscale
trans4 = transforms.Grayscale(num_output_channels=3)
gray_pil_4 = trans4(x_pil_2)
gray_np_4 = np.array(gray_pil_4)
assert gray_pil_4.mode == "RGB", "mode should be RGB"
assert gray_np_4.shape == tuple(x_shape), "should be 3 channel"
assert_equal(gray_np_4[:, :, 0], gray_np_4[:, :, 1])
assert_equal(gray_np_4[:, :, 1], gray_np_4[:, :, 2])
assert_equal(gray_np, gray_np_4[:, :, 0])
# Checking if Grayscale can be printed as string
trans4.__repr__()
@pytest.mark.parametrize("seed", range(10))
@pytest.mark.parametrize("p", (0, 1))
def test_random_apply(p, seed):
torch.manual_seed(seed)
random_apply_transform = transforms.RandomApply([transforms.RandomRotation((45, 50))], p=p)
img = transforms.ToPILImage()(torch.rand(3, 30, 40))
out = random_apply_transform(img)
if p == 0:
assert out == img
elif p == 1:
assert out != img
# Checking if RandomApply can be printed as string
random_apply_transform.__repr__()
@pytest.mark.parametrize("seed", range(10))
@pytest.mark.parametrize("proba_passthrough", (0, 1))
def test_random_choice(proba_passthrough, seed):
random.seed(seed) # RandomChoice relies on python builtin random.choice, not pytorch
random_choice_transform = transforms.RandomChoice(
[
lambda x: x, # passthrough
transforms.RandomRotation((45, 50)),
],
p=[proba_passthrough, 1 - proba_passthrough],
)
img = transforms.ToPILImage()(torch.rand(3, 30, 40))
out = random_choice_transform(img)
if proba_passthrough == 1:
assert out == img
elif proba_passthrough == 0:
assert out != img
# Checking if RandomChoice can be printed as string
random_choice_transform.__repr__()
@pytest.mark.skipif(stats is None, reason="scipy.stats not available")
def test_random_order():
random_state = random.getstate()
random.seed(42)
random_order_transform = transforms.RandomOrder([transforms.Resize(20, antialias=True), transforms.CenterCrop(10)])
img = transforms.ToPILImage()(torch.rand(3, 25, 25))
num_samples = 250
num_normal_order = 0
resize_crop_out = transforms.CenterCrop(10)(transforms.Resize(20, antialias=True)(img))
for _ in range(num_samples):
out = random_order_transform(img)
if out == resize_crop_out:
num_normal_order += 1
p_value = stats.binomtest(num_normal_order, num_samples, p=0.5).pvalue
random.setstate(random_state)
assert p_value > 0.0001
# Checking if RandomOrder can be printed as string
random_order_transform.__repr__()
def test_linear_transformation():
num_samples = 1000
x = torch.randn(num_samples, 3, 10, 10)
flat_x = x.view(x.size(0), x.size(1) * x.size(2) * x.size(3))
# compute principal components
sigma = torch.mm(flat_x.t(), flat_x) / flat_x.size(0)
u, s, _ = np.linalg.svd(sigma.numpy())
zca_epsilon = 1e-10 # avoid division by 0
d = torch.Tensor(np.diag(1.0 / np.sqrt(s + zca_epsilon)))
u = torch.Tensor(u)
principal_components = torch.mm(torch.mm(u, d), u.t())
mean_vector = torch.sum(flat_x, dim=0) / flat_x.size(0)
# initialize whitening matrix
whitening = transforms.LinearTransformation(principal_components, mean_vector)
# estimate covariance and mean using weak law of large number
num_features = flat_x.size(1)
cov = 0.0
mean = 0.0
for i in x:
xwhite = whitening(i)
xwhite = xwhite.view(1, -1).numpy()
cov += np.dot(xwhite, xwhite.T) / num_features
mean += np.sum(xwhite) / num_features
# if rtol for std = 1e-3 then rtol for cov = 2e-3 as std**2 = cov
torch.testing.assert_close(
cov / num_samples, np.identity(1), rtol=2e-3, atol=1e-8, check_dtype=False, msg="cov not close to 1"
)
torch.testing.assert_close(
mean / num_samples, 0, rtol=1e-3, atol=1e-8, check_dtype=False, msg="mean not close to 0"
)
# Checking if LinearTransformation can be printed as string
whitening.__repr__()
@pytest.mark.parametrize("dtype", int_dtypes())
def test_max_value(dtype):
assert F_t._max_value(dtype) == torch.iinfo(dtype).max
# remove float testing as it can lead to errors such as
# runtime error: 5.7896e+76 is outside the range of representable values of type 'float'
# for dtype in float_dtypes():
# self.assertGreater(F_t._max_value(dtype), torch.finfo(dtype).max)
@pytest.mark.xfail(
reason="torch.iinfo() is not supported by torchscript. See https://github.com/pytorch/pytorch/issues/41492."
)
def test_max_value_iinfo():
@torch.jit.script
def max_value(image: torch.Tensor) -> int:
return 1 if image.is_floating_point() else torch.iinfo(image.dtype).max
@pytest.mark.parametrize("should_vflip", [True, False])
@pytest.mark.parametrize("single_dim", [True, False])
def test_ten_crop(should_vflip, single_dim):
to_pil_image = transforms.ToPILImage()
h = random.randint(5, 25)
w = random.randint(5, 25)
crop_h = random.randint(1, h)
crop_w = random.randint(1, w)
if single_dim:
crop_h = min(crop_h, crop_w)
crop_w = crop_h
transform = transforms.TenCrop(crop_h, vertical_flip=should_vflip)
five_crop = transforms.FiveCrop(crop_h)
else:
transform = transforms.TenCrop((crop_h, crop_w), vertical_flip=should_vflip)
five_crop = transforms.FiveCrop((crop_h, crop_w))
img = to_pil_image(torch.FloatTensor(3, h, w).uniform_())
results = transform(img)
expected_output = five_crop(img)
# Checking if FiveCrop and TenCrop can be printed as string
transform.__repr__()
five_crop.__repr__()
if should_vflip:
vflipped_img = img.transpose(Image.FLIP_TOP_BOTTOM)
expected_output += five_crop(vflipped_img)
else:
hflipped_img = img.transpose(Image.FLIP_LEFT_RIGHT)
expected_output += five_crop(hflipped_img)
assert len(results) == 10
assert results == expected_output
@pytest.mark.parametrize("single_dim", [True, False])
def test_five_crop(single_dim):
to_pil_image = transforms.ToPILImage()
h = random.randint(5, 25)
w = random.randint(5, 25)
crop_h = random.randint(1, h)
crop_w = random.randint(1, w)
if single_dim:
crop_h = min(crop_h, crop_w)
crop_w = crop_h
transform = transforms.FiveCrop(crop_h)
else:
transform = transforms.FiveCrop((crop_h, crop_w))
img = torch.FloatTensor(3, h, w).uniform_()
results = transform(to_pil_image(img))
assert len(results) == 5
for crop in results:
assert crop.size == (crop_w, crop_h)
to_pil_image = transforms.ToPILImage()
tl = to_pil_image(img[:, 0:crop_h, 0:crop_w])
tr = to_pil_image(img[:, 0:crop_h, w - crop_w :])
bl = to_pil_image(img[:, h - crop_h :, 0:crop_w])
br = to_pil_image(img[:, h - crop_h :, w - crop_w :])
center = transforms.CenterCrop((crop_h, crop_w))(to_pil_image(img))
expected_output = (tl, tr, bl, br, center)
assert results == expected_output
@pytest.mark.parametrize("policy", transforms.AutoAugmentPolicy)
@pytest.mark.parametrize("fill", [None, 85, (128, 128, 128)])
@pytest.mark.parametrize("grayscale", [True, False])
def test_autoaugment(policy, fill, grayscale):
random.seed(42)
img = Image.open(GRACE_HOPPER)
if grayscale:
img, fill = _get_grayscale_test_image(img, fill)
transform = transforms.AutoAugment(policy=policy, fill=fill)
for _ in range(100):
img = transform(img)
transform.__repr__()
@pytest.mark.parametrize("num_ops", [1, 2, 3])
@pytest.mark.parametrize("magnitude", [7, 9, 11])
@pytest.mark.parametrize("fill", [None, 85, (128, 128, 128)])
@pytest.mark.parametrize("grayscale", [True, False])
def test_randaugment(num_ops, magnitude, fill, grayscale):
random.seed(42)
img = Image.open(GRACE_HOPPER)
if grayscale:
img, fill = _get_grayscale_test_image(img, fill)
transform = transforms.RandAugment(num_ops=num_ops, magnitude=magnitude, fill=fill)
for _ in range(100):
img = transform(img)
transform.__repr__()
@pytest.mark.parametrize("fill", [None, 85, (128, 128, 128)])
@pytest.mark.parametrize("num_magnitude_bins", [10, 13, 30])
@pytest.mark.parametrize("grayscale", [True, False])
def test_trivialaugmentwide(fill, num_magnitude_bins, grayscale):
random.seed(42)
img = Image.open(GRACE_HOPPER)
if grayscale:
img, fill = _get_grayscale_test_image(img, fill)
transform = transforms.TrivialAugmentWide(fill=fill, num_magnitude_bins=num_magnitude_bins)
for _ in range(100):
img = transform(img)
transform.__repr__()
@pytest.mark.parametrize("fill", [None, 85, (128, 128, 128)])
@pytest.mark.parametrize("severity", [1, 10])
@pytest.mark.parametrize("mixture_width", [1, 2])
@pytest.mark.parametrize("chain_depth", [-1, 2])
@pytest.mark.parametrize("all_ops", [True, False])
@pytest.mark.parametrize("grayscale", [True, False])
def test_augmix(fill, severity, mixture_width, chain_depth, all_ops, grayscale):
random.seed(42)
img = Image.open(GRACE_HOPPER)
if grayscale:
img, fill = _get_grayscale_test_image(img, fill)
transform = transforms.AugMix(
fill=fill, severity=severity, mixture_width=mixture_width, chain_depth=chain_depth, all_ops=all_ops
)
for _ in range(100):
img = transform(img)
transform.__repr__()
def test_random_crop():
height = random.randint(10, 32) * 2
width = random.randint(10, 32) * 2
oheight = random.randint(5, (height - 2) // 2) * 2
owidth = random.randint(5, (width - 2) // 2) * 2
img = torch.ones(3, height, width, dtype=torch.uint8)
result = transforms.Compose(
[
transforms.ToPILImage(),
transforms.RandomCrop((oheight, owidth)),
transforms.PILToTensor(),
]
)(img)
assert result.size(1) == oheight
assert result.size(2) == owidth
padding = random.randint(1, 20)
result = transforms.Compose(
[
transforms.ToPILImage(),
transforms.RandomCrop((oheight, owidth), padding=padding),
transforms.PILToTensor(),
]
)(img)
assert result.size(1) == oheight
assert result.size(2) == owidth
result = transforms.Compose(
[transforms.ToPILImage(), transforms.RandomCrop((height, width)), transforms.PILToTensor()]
)(img)
assert result.size(1) == height
assert result.size(2) == width
torch.testing.assert_close(result, img)
result = transforms.Compose(
[
transforms.ToPILImage(),
transforms.RandomCrop((height + 1, width + 1), pad_if_needed=True),
transforms.PILToTensor(),
]
)(img)
assert result.size(1) == height + 1
assert result.size(2) == width + 1
t = transforms.RandomCrop(33)
img = torch.ones(3, 32, 32)
with pytest.raises(ValueError, match=r"Required crop size .+ is larger than input image size .+"):
t(img)
def test_center_crop():
height = random.randint(10, 32) * 2
width = random.randint(10, 32) * 2
oheight = random.randint(5, (height - 2) // 2) * 2
owidth = random.randint(5, (width - 2) // 2) * 2
img = torch.ones(3, height, width, dtype=torch.uint8)
oh1 = (height - oheight) // 2
ow1 = (width - owidth) // 2
imgnarrow = img[:, oh1 : oh1 + oheight, ow1 : ow1 + owidth]
imgnarrow.fill_(0)
result = transforms.Compose(
[
transforms.ToPILImage(),
transforms.CenterCrop((oheight, owidth)),
transforms.PILToTensor(),
]
)(img)
assert result.sum() == 0
oheight += 1
owidth += 1
result = transforms.Compose(
[
transforms.ToPILImage(),
transforms.CenterCrop((oheight, owidth)),
transforms.PILToTensor(),
]
)(img)
sum1 = result.sum()
assert sum1 > 1
oheight += 1
owidth += 1
result = transforms.Compose(
[
transforms.ToPILImage(),
transforms.CenterCrop((oheight, owidth)),
transforms.PILToTensor(),
]
)(img)
sum2 = result.sum()
assert sum2 > 0
assert sum2 > sum1
@pytest.mark.parametrize("odd_image_size", (True, False))
@pytest.mark.parametrize("delta", (1, 3, 5))
@pytest.mark.parametrize("delta_width", (-2, -1, 0, 1, 2))
@pytest.mark.parametrize("delta_height", (-2, -1, 0, 1, 2))
def test_center_crop_2(odd_image_size, delta, delta_width, delta_height):
"""Tests when center crop size is larger than image size, along any dimension"""
# Since height is independent of width, we can ignore images with odd height and even width and vice-versa.
input_image_size = (random.randint(10, 32) * 2, random.randint(10, 32) * 2)
if odd_image_size:
input_image_size = (input_image_size[0] + 1, input_image_size[1] + 1)
delta_height *= delta
delta_width *= delta
img = torch.ones(3, *input_image_size, dtype=torch.uint8)
crop_size = (input_image_size[0] + delta_height, input_image_size[1] + delta_width)
# Test both transforms, one with PIL input and one with tensor
output_pil = transforms.Compose(
[transforms.ToPILImage(), transforms.CenterCrop(crop_size), transforms.PILToTensor()],
)(img)
assert output_pil.size()[1:3] == crop_size
output_tensor = transforms.CenterCrop(crop_size)(img)
assert output_tensor.size()[1:3] == crop_size
# Ensure output for PIL and Tensor are equal
assert_equal(
output_tensor,
output_pil,
msg=f"image_size: {input_image_size} crop_size: {crop_size}",
)
# Check if content in center of both image and cropped output is same.
center_size = (min(crop_size[0], input_image_size[0]), min(crop_size[1], input_image_size[1]))
crop_center_tl, input_center_tl = [0, 0], [0, 0]
for index in range(2):
if crop_size[index] > input_image_size[index]:
crop_center_tl[index] = (crop_size[index] - input_image_size[index]) // 2
else:
input_center_tl[index] = (input_image_size[index] - crop_size[index]) // 2
output_center = output_pil[
:,
crop_center_tl[0] : crop_center_tl[0] + center_size[0],
crop_center_tl[1] : crop_center_tl[1] + center_size[1],
]
img_center = img[
:,
input_center_tl[0] : input_center_tl[0] + center_size[0],
input_center_tl[1] : input_center_tl[1] + center_size[1],
]
assert_equal(output_center, img_center)
def test_color_jitter():
color_jitter = transforms.ColorJitter(2, 2, 2, 0.1)
x_shape = [2, 2, 3]
x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1]
x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape)
x_pil = Image.fromarray(x_np, mode="RGB")
x_pil_2 = x_pil.convert("L")
for _ in range(10):
y_pil = color_jitter(x_pil)
assert y_pil.mode == x_pil.mode
y_pil_2 = color_jitter(x_pil_2)
assert y_pil_2.mode == x_pil_2.mode
# Checking if ColorJitter can be printed as string
color_jitter.__repr__()
@pytest.mark.parametrize("hue", [1, (-1, 1)])
def test_color_jitter_hue_out_of_bounds(hue):
with pytest.raises(ValueError, match=re.escape("hue values should be between (-0.5, 0.5)")):
transforms.ColorJitter(hue=hue)
@pytest.mark.parametrize("seed", range(10))
@pytest.mark.skipif(stats is None, reason="scipy.stats not available")
def test_random_erasing(seed):
torch.random.manual_seed(seed)
img = torch.ones(3, 128, 128)
t = transforms.RandomErasing(scale=(0.1, 0.1), ratio=(1 / 3, 3.0))
y, x, h, w, v = t.get_params(
img,
t.scale,
t.ratio,
[
t.value,
],
)
aspect_ratio = h / w
# Add some tolerance due to the rounding and int conversion used in the transform
tol = 0.05
assert 1 / 3 - tol <= aspect_ratio <= 3 + tol
# Make sure that h > w and h < w are equally likely (log-scale sampling)
aspect_ratios = []
random.seed(42)
trial = 1000
for _ in range(trial):
y, x, h, w, v = t.get_params(
img,
t.scale,
t.ratio,
[
t.value,
],
)
aspect_ratios.append(h / w)
count_bigger_then_ones = len([1 for aspect_ratio in aspect_ratios if aspect_ratio > 1])
p_value = stats.binomtest(count_bigger_then_ones, trial, p=0.5).pvalue
assert p_value > 0.0001
# Checking if RandomErasing can be printed as string
t.__repr__()
def test_random_rotation():
with pytest.raises(ValueError):
transforms.RandomRotation(-0.7)
with pytest.raises(ValueError):
transforms.RandomRotation([-0.7])
with pytest.raises(ValueError):
transforms.RandomRotation([-0.7, 0, 0.7])
t = transforms.RandomRotation(0, fill=None)
assert t.fill == 0
t = transforms.RandomRotation(10)
angle = t.get_params(t.degrees)
assert angle > -10 and angle < 10
t = transforms.RandomRotation((-10, 10))
angle = t.get_params(t.degrees)
assert -10 < angle < 10
# Checking if RandomRotation can be printed as string
t.__repr__()
t = transforms.RandomRotation((-10, 10), interpolation=Image.BILINEAR)
assert t.interpolation == transforms.InterpolationMode.BILINEAR
def test_random_rotation_error():
# assert fill being either a Sequence or a Number
with pytest.raises(TypeError):
transforms.RandomRotation(0, fill={})
def test_randomperspective():
for _ in range(10):
height = random.randint(24, 32) * 2
width = random.randint(24, 32) * 2
img = torch.ones(3, height, width)
to_pil_image = transforms.ToPILImage()
img = to_pil_image(img)
perp = transforms.RandomPerspective()
startpoints, endpoints = perp.get_params(width, height, 0.5)
tr_img = F.perspective(img, startpoints, endpoints)
tr_img2 = F.convert_image_dtype(F.pil_to_tensor(F.perspective(tr_img, endpoints, startpoints)))
tr_img = F.convert_image_dtype(F.pil_to_tensor(tr_img))
assert img.size[0] == width
assert img.size[1] == height
assert torch.nn.functional.mse_loss(
tr_img, F.convert_image_dtype(F.pil_to_tensor(img))
) + 0.3 > torch.nn.functional.mse_loss(tr_img2, F.convert_image_dtype(F.pil_to_tensor(img)))
@pytest.mark.parametrize("seed", range(10))
@pytest.mark.parametrize("mode", ["L", "RGB", "F"])
def test_randomperspective_fill(mode, seed):
torch.random.manual_seed(seed)
# assert fill being either a Sequence or a Number
with pytest.raises(TypeError):
transforms.RandomPerspective(fill={})
t = transforms.RandomPerspective(fill=None)
assert t.fill == 0
height = 100
width = 100
img = torch.ones(3, height, width)
to_pil_image = transforms.ToPILImage()
img = to_pil_image(img)
fill = 127
num_bands = len(mode)
img_conv = img.convert(mode)
perspective = transforms.RandomPerspective(p=1, fill=fill)
tr_img = perspective(img_conv)
pixel = tr_img.getpixel((0, 0))
if not isinstance(pixel, tuple):
pixel = (pixel,)
assert pixel == tuple([fill] * num_bands)
startpoints, endpoints = transforms.RandomPerspective.get_params(width, height, 0.5)
tr_img = F.perspective(img_conv, startpoints, endpoints, fill=fill)
pixel = tr_img.getpixel((0, 0))
if not isinstance(pixel, tuple):
pixel = (pixel,)
assert pixel == tuple([fill] * num_bands)
wrong_num_bands = num_bands + 1
with pytest.raises(ValueError):
F.perspective(img_conv, startpoints, endpoints, fill=tuple([fill] * wrong_num_bands))
@pytest.mark.skipif(stats is None, reason="scipy.stats not available")
def test_normalize():
def samples_from_standard_normal(tensor):
p_value = stats.kstest(list(tensor.view(-1)), "norm", args=(0, 1)).pvalue
return p_value > 0.0001
random_state = random.getstate()
random.seed(42)
for channels in [1, 3]:
img = torch.rand(channels, 10, 10)
mean = [img[c].mean() for c in range(channels)]
std = [img[c].std() for c in range(channels)]
normalized = transforms.Normalize(mean, std)(img)
assert samples_from_standard_normal(normalized)
random.setstate(random_state)
# Checking if Normalize can be printed as string
transforms.Normalize(mean, std).__repr__()
# Checking the optional in-place behaviour
tensor = torch.rand((1, 16, 16))
tensor_inplace = transforms.Normalize((0.5,), (0.5,), inplace=True)(tensor)
assert_equal(tensor, tensor_inplace)
@pytest.mark.parametrize("dtype1", [torch.float32, torch.float64])
@pytest.mark.parametrize("dtype2", [torch.int64, torch.float32, torch.float64])
def test_normalize_different_dtype(dtype1, dtype2):
img = torch.rand(3, 10, 10, dtype=dtype1)
mean = torch.tensor([1, 2, 3], dtype=dtype2)
std = torch.tensor([1, 2, 1], dtype=dtype2)
# checks that it doesn't crash
transforms.functional.normalize(img, mean, std)
def test_normalize_3d_tensor():
torch.manual_seed(28)
n_channels = 3
img_size = 10
mean = torch.rand(n_channels)
std = torch.rand(n_channels)
img = torch.rand(n_channels, img_size, img_size)
target = F.normalize(img, mean, std)
mean_unsqueezed = mean.view(-1, 1, 1)
std_unsqueezed = std.view(-1, 1, 1)
result1 = F.normalize(img, mean_unsqueezed, std_unsqueezed)
result2 = F.normalize(
img, mean_unsqueezed.repeat(1, img_size, img_size), std_unsqueezed.repeat(1, img_size, img_size)
)
torch.testing.assert_close(target, result1)
torch.testing.assert_close(target, result2)
class TestAffine:
@pytest.fixture(scope="class")
def input_img(self):
input_img = np.zeros((40, 40, 3), dtype=np.uint8)
for pt in [(16, 16), (20, 16), (20, 20)]:
for i in range(-5, 5):
for j in range(-5, 5):
input_img[pt[0] + i, pt[1] + j, :] = [255, 155, 55]
return input_img
def test_affine_translate_seq(self, input_img):
with pytest.raises(TypeError, match=r"Argument translate should be a sequence"):
F.affine(input_img, 10, translate=0, scale=1, shear=1)
@pytest.fixture(scope="class")
def pil_image(self, input_img):
return F.to_pil_image(input_img)
def _to_3x3_inv(self, inv_result_matrix):
result_matrix = np.zeros((3, 3))
result_matrix[:2, :] = np.array(inv_result_matrix).reshape((2, 3))
result_matrix[2, 2] = 1
return np.linalg.inv(result_matrix)
def _test_transformation(self, angle, translate, scale, shear, pil_image, input_img, center=None):
a_rad = math.radians(angle)
s_rad = [math.radians(sh_) for sh_ in shear]
cnt = [20, 20] if center is None else center
cx, cy = cnt
tx, ty = translate
sx, sy = s_rad
rot = a_rad
# 1) Check transformation matrix:
C = np.array([[1, 0, cx], [0, 1, cy], [0, 0, 1]])
T = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
Cinv = np.linalg.inv(C)
RS = np.array(
[
[scale * math.cos(rot), -scale * math.sin(rot), 0],
[scale * math.sin(rot), scale * math.cos(rot), 0],
[0, 0, 1],
]
)
SHx = np.array([[1, -math.tan(sx), 0], [0, 1, 0], [0, 0, 1]])
SHy = np.array([[1, 0, 0], [-math.tan(sy), 1, 0], [0, 0, 1]])
RSS = np.matmul(RS, np.matmul(SHy, SHx))
true_matrix = np.matmul(T, np.matmul(C, np.matmul(RSS, Cinv)))
result_matrix = self._to_3x3_inv(
F._get_inverse_affine_matrix(center=cnt, angle=angle, translate=translate, scale=scale, shear=shear)
)
assert np.sum(np.abs(true_matrix - result_matrix)) < 1e-10
# 2) Perform inverse mapping:
true_result = np.zeros((40, 40, 3), dtype=np.uint8)
inv_true_matrix = np.linalg.inv(true_matrix)
for y in range(true_result.shape[0]):
for x in range(true_result.shape[1]):
# Same as for PIL:
# https://github.com/python-pillow/Pillow/blob/71f8ec6a0cfc1008076a023c0756542539d057ab/
# src/libImaging/Geometry.c#L1060
input_pt = np.array([x + 0.5, y + 0.5, 1.0])
res = np.floor(np.dot(inv_true_matrix, input_pt)).astype(int)
_x, _y = res[:2]
if 0 <= _x < input_img.shape[1] and 0 <= _y < input_img.shape[0]:
true_result[y, x, :] = input_img[_y, _x, :]
result = F.affine(pil_image, angle=angle, translate=translate, scale=scale, shear=shear, center=center)
assert result.size == pil_image.size
# Compute number of different pixels:
np_result = np.array(result)
n_diff_pixels = np.sum(np_result != true_result) / 3
# Accept 3 wrong pixels
error_msg = (
f"angle={angle}, translate={translate}, scale={scale}, shear={shear}\nn diff pixels={n_diff_pixels}\n"
)
assert n_diff_pixels < 3, error_msg
def test_transformation_discrete(self, pil_image, input_img):
# Test rotation
angle = 45
self._test_transformation(
angle=angle, translate=(0, 0), scale=1.0, shear=(0.0, 0.0), pil_image=pil_image, input_img=input_img
)
# Test rotation
angle = 45
self._test_transformation(
angle=angle,
translate=(0, 0),
scale=1.0,
shear=(0.0, 0.0),
pil_image=pil_image,
input_img=input_img,
center=[0, 0],
)
# Test translation
translate = [10, 15]
self._test_transformation(
angle=0.0, translate=translate, scale=1.0, shear=(0.0, 0.0), pil_image=pil_image, input_img=input_img
)
# Test scale
scale = 1.2
self._test_transformation(
angle=0.0, translate=(0.0, 0.0), scale=scale, shear=(0.0, 0.0), pil_image=pil_image, input_img=input_img
)
# Test shear
shear = [45.0, 25.0]
self._test_transformation(
angle=0.0, translate=(0.0, 0.0), scale=1.0, shear=shear, pil_image=pil_image, input_img=input_img
)
# Test shear with top-left as center
shear = [45.0, 25.0]
self._test_transformation(
angle=0.0,
translate=(0.0, 0.0),
scale=1.0,
shear=shear,
pil_image=pil_image,
input_img=input_img,
center=[0, 0],
)
@pytest.mark.parametrize("angle", range(-90, 90, 36))
@pytest.mark.parametrize("translate", range(-10, 10, 5))
@pytest.mark.parametrize("scale", [0.77, 1.0, 1.27])
@pytest.mark.parametrize("shear", range(-15, 15, 5))
def test_transformation_range(self, angle, translate, scale, shear, pil_image, input_img):
self._test_transformation(
angle=angle,
translate=(translate, translate),
scale=scale,
shear=(shear, shear),
pil_image=pil_image,
input_img=input_img,
)
def test_random_affine():
with pytest.raises(ValueError):
transforms.RandomAffine(-0.7)
with pytest.raises(ValueError):
transforms.RandomAffine([-0.7])
with pytest.raises(ValueError):
transforms.RandomAffine([-0.7, 0, 0.7])
with pytest.raises(TypeError):
transforms.RandomAffine([-90, 90], translate=2.0)
with pytest.raises(ValueError):
transforms.RandomAffine([-90, 90], translate=[-1.0, 1.0])
with pytest.raises(ValueError):
transforms.RandomAffine([-90, 90], translate=[-1.0, 0.0, 1.0])
with pytest.raises(ValueError):
transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.0])
with pytest.raises(ValueError):
transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[-1.0, 1.0])
with pytest.raises(ValueError):
transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, -0.5])
with pytest.raises(ValueError):
transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 3.0, -0.5])
with pytest.raises(ValueError):
transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 0.5], shear=-7)
with pytest.raises(ValueError):
transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 0.5], shear=[-10])
with pytest.raises(ValueError):
transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 0.5], shear=[-10, 0, 10])
with pytest.raises(ValueError):
transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 0.5], shear=[-10, 0, 10, 0, 10])
# assert fill being either a Sequence or a Number
with pytest.raises(TypeError):
transforms.RandomAffine(0, fill={})
t = transforms.RandomAffine(0, fill=None)
assert t.fill == 0
x = np.zeros((100, 100, 3), dtype=np.uint8)
img = F.to_pil_image(x)
t = transforms.RandomAffine(10, translate=[0.5, 0.3], scale=[0.7, 1.3], shear=[-10, 10, 20, 40])
for _ in range(100):
angle, translations, scale, shear = t.get_params(t.degrees, t.translate, t.scale, t.shear, img_size=img.size)
assert -10 < angle < 10
assert -img.size[0] * 0.5 <= translations[0] <= img.size[0] * 0.5
assert -img.size[1] * 0.5 <= translations[1] <= img.size[1] * 0.5
assert 0.7 < scale < 1.3
assert -10 < shear[0] < 10
assert -20 < shear[1] < 40
# Checking if RandomAffine can be printed as string
t.__repr__()
t = transforms.RandomAffine(10, interpolation=transforms.InterpolationMode.BILINEAR)
assert "bilinear" in t.__repr__()
t = transforms.RandomAffine(10, interpolation=Image.BILINEAR)
assert t.interpolation == transforms.InterpolationMode.BILINEAR
def test_elastic_transformation():
with pytest.raises(TypeError, match=r"alpha should be float or a sequence of floats"):
transforms.ElasticTransform(alpha=True, sigma=2.0)
with pytest.raises(TypeError, match=r"alpha should be a sequence of floats"):
transforms.ElasticTransform(alpha=[1.0, True], sigma=2.0)
with pytest.raises(ValueError, match=r"alpha is a sequence its length should be 2"):
transforms.ElasticTransform(alpha=[1.0, 0.0, 1.0], sigma=2.0)
with pytest.raises(TypeError, match=r"sigma should be float or a sequence of floats"):
transforms.ElasticTransform(alpha=2.0, sigma=True)
with pytest.raises(TypeError, match=r"sigma should be a sequence of floats"):
transforms.ElasticTransform(alpha=2.0, sigma=[1.0, True])
with pytest.raises(ValueError, match=r"sigma is a sequence its length should be 2"):
transforms.ElasticTransform(alpha=2.0, sigma=[1.0, 0.0, 1.0])
t = transforms.transforms.ElasticTransform(alpha=2.0, sigma=2.0, interpolation=Image.BILINEAR)
assert t.interpolation == transforms.InterpolationMode.BILINEAR
with pytest.raises(TypeError, match=r"fill should be int or float"):
transforms.ElasticTransform(alpha=1.0, sigma=1.0, fill={})
x = torch.randint(0, 256, (3, 32, 32), dtype=torch.uint8)
img = F.to_pil_image(x)
t = transforms.ElasticTransform(alpha=0.0, sigma=0.0)
transformed_img = t(img)
assert transformed_img == img
# Smoke test on PIL images
t = transforms.ElasticTransform(alpha=0.5, sigma=0.23)
transformed_img = t(img)
assert isinstance(transformed_img, Image.Image)
# Checking if ElasticTransform can be printed as string
t.__repr__()
def test_random_grayscale_with_grayscale_input():
transform = transforms.RandomGrayscale(p=1.0)
image_tensor = torch.randint(0, 256, (1, 16, 16), dtype=torch.uint8)
output_tensor = transform(image_tensor)
torch.testing.assert_close(output_tensor, image_tensor)
image_pil = F.to_pil_image(image_tensor)
output_pil = transform(image_pil)
torch.testing.assert_close(F.pil_to_tensor(output_pil), image_tensor)
if __name__ == "__main__":
pytest.main([__file__])
|
pytorchREPO_NAMEvisionPATH_START.@vision_extracted@vision-main@test@test_transforms.py@.PATH_END.py
|
{
"filename": "extract_features.py",
"repo_name": "ahmedfgad/GeneticAlgorithmPython",
"repo_path": "GeneticAlgorithmPython_extracted/GeneticAlgorithmPython-master/examples/nn/extract_features.py",
"type": "Python"
}
|
import numpy
import skimage.io, skimage.color, skimage.feature
import os
dataset_dir = "../data/Fruit360"
fruits = ["apple", "raspberry", "mango", "lemon"]
# Number of samples in the datset used = 492+490+490+490=1,962
# 360 is the length of the feature vector.
dataset_features = numpy.zeros(shape=(1962, 360))
outputs = numpy.zeros(shape=(1962))
idx = 0
class_label = 0
for fruit_dir in fruits:
curr_dir = os.path.join(os.path.sep, fruit_dir)
all_imgs = os.listdir(os.path.join(dataset_dir+curr_dir))
for img_file in all_imgs:
if img_file.endswith(".jpg"): # Ensures reading only JPG files.
fruit_data = skimage.io.imread(fname=os.path.sep.join([dataset_dir, curr_dir, img_file]), as_gray=False)
fruit_data_hsv = skimage.color.rgb2hsv(rgb=fruit_data)
hist = numpy.histogram(a=fruit_data_hsv[:, :, 0], bins=360)
dataset_features[idx, :] = hist[0]
outputs[idx] = class_label
idx = idx + 1
class_label = class_label + 1
# Saving the extracted features and the outputs as NumPy files.
numpy.save("../data/dataset_features.npy", dataset_features)
numpy.save("../data/outputs.npy", outputs)
|
ahmedfgadREPO_NAMEGeneticAlgorithmPythonPATH_START.@GeneticAlgorithmPython_extracted@GeneticAlgorithmPython-master@examples@nn@extract_features.py@.PATH_END.py
|
{
"filename": "colorable.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipython/py2/IPython/utils/colorable.py",
"type": "Python"
}
|
#*****************************************************************************
# Copyright (C) 2016 The IPython Team <ipython-dev@scipy.org>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
from __future__ import absolute_import
"""
Color managing related utilities
"""
import pygments
from traitlets.config import Configurable
from traitlets import Unicode
available_themes = lambda : [s for s in pygments.styles.get_all_styles()]+['NoColor','LightBG','Linux', 'Neutral']
class Colorable(Configurable):
"""
A subclass of configurable for all the classes that have a `default_scheme`
"""
default_style=Unicode('lightbg').tag(config=True)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipython@py2@IPython@utils@colorable.py@.PATH_END.py
|
{
"filename": "matrix_dft.py",
"repo_name": "spacetelescope/jwst",
"repo_path": "jwst_extracted/jwst-main/jwst/ami/matrix_dft.py",
"type": "Python"
}
|
"""
Summary
-------
MatrixDFT: Matrix-based discrete Fourier transforms for computing PSFs.
See Soummer et al. 2007 JOSA
The main user interface in this module is a class MatrixFourierTransform.
Internally this will call one of several subfunctions depending on the
specified centering type. These have to do with where the (0, 0) element of
the Fourier transform is located, i.e. where the PSF center ends up.
- 'FFTSTYLE' centered on one pixel
- 'SYMMETRIC' centerd on crosshairs between middle pixel
- 'ADJUSTABLE', always centered in output array depending on
whether it is even or odd
'ADJUSTABLE' is the default.
This module was originally called "Slow Fourier Transform", and this
terminology still appears in some places in the code. Note that this is
'slow' only in the sense that if you perform the exact same calculation as
an FFT, the FFT algorithm is much faster. However this algorithm gives you
much more flexibility in choosing array sizes and sampling, and often lets
you replace "fast calculations on very large arrays" with "relatively slow
calculations on much smaller ones".
Example
-------
mf = matrixDFT.MatrixFourierTransform()
result = mf.perform(pupilArray, focalplane_size, focalplane_npix)
History
-------
Code originally by A. Sivaramakrishnan
2010-11-05 Revised normalizations for flux conservation consistent
with Soummer et al. 2007. Updated documentation. -- M. Perrin
2011-2012: Various enhancements, detailed history not kept, sorry.
2012-05-18: module renamed SFT.py -> matrixDFT.py
2012-09-26: minor big fixes
2015-01-21: Eliminate redundant code paths, correct parity flip,
PEP8 formatting pass (except variable names)-- J. Long
"""
__all__ = ['MatrixFourierTransform']
import numpy as np
import logging
_log = logging.getLogger('poppy')
FFTSTYLE = 'FFTSTYLE'
FFTRECT = 'FFTRECT'
SYMMETRIC = 'SYMMETRIC'
ADJUSTABLE = 'ADJUSTABLE'
CENTERING_CHOICES = (FFTSTYLE, SYMMETRIC, ADJUSTABLE, FFTRECT)
def matrix_dft(plane, nlamD, npix,
offset=None, inverse=False, centering=FFTSTYLE):
"""
Perform a matrix discrete Fourier transform with selectable output
sampling and centering. Where parameters can be supplied as either
scalars or 2-tuples, the first element of the 2-tuple is used for the
Y dimension and the second for the X dimension. This ordering matches
that of numpy.ndarray.shape attributes and that of Python indexing.
To achieve exact correspondence to the FFT set nlamD and npix to the size
of the input array in pixels and use 'FFTSTYLE' centering. (n.b. When
using `numpy.fft.fft2` you must `numpy.fft.fftshift` the input pupil both
before and after applying fft2 or else it will introduce a checkerboard
pattern in the signs of alternating pixels!)
Parameters
----------
plane : 2D ndarray
2D array (either real or complex) representing the input image plane or
pupil plane to transform.
nlamD : float or 2-tuple of floats (nlamDY, nlamDX)
Size of desired output region in lambda / D units, assuming that the
pupil fills the input array (corresponds to 'm' in
Soummer et al. 2007 4.2). This is in units of the spatial frequency
that is just Nyquist sampled by the input array.) If given as a tuple,
interpreted as (nlamDY, nlamDX).
npix : int or 2-tuple of ints (npixY, npixX)
Number of pixels per side side of destination plane array (corresponds
to 'N_B' in Soummer et al. 2007 4.2). This will be the # of pixels in
the image plane for a forward transformation, in the pupil plane for an
inverse. If given as a tuple, interpreted as (npixY, npixX).
inverse : bool, optional
Is this a forward or inverse transformation? (Default is False,
implying a forward transformation.)
centering : {'FFTSTYLE', 'SYMMETRIC', 'ADJUSTABLE'}, optional
What type of centering convention should be used for this FFT?
* ADJUSTABLE (the default) For an output array with ODD size n,
the PSF center will be at the center of pixel (n-1)/2. For an output
array with EVEN size n, the PSF center will be in the corner between
pixel (n/2-1, n/2-1) and (n/2, n/2)
* FFTSTYLE puts the zero-order term in a single pixel.
* SYMMETRIC spreads the zero-order term evenly between the center
four pixels
offset : 2-tuple of floats (offsetY, offsetX)
For ADJUSTABLE-style transforms, an offset in pixels by which the PSF
will be displaced from the central pixel (or cross). Given as
(offsetY, offsetX).
Returns
-------
norm_coeff * t2; float, ndarray
normalized FT coeffs
"""
npupY, npupX = plane.shape
if np.isscalar(npix):
npixY, npixX = npix, npix
else:
try:
npixY, npixX = npix
except ValueError:
raise ValueError(
"'npix' must be supplied as a scalar (for square arrays) or as"
"a 2-tuple of ints (npixY, npixX)"
)
if np.isscalar(nlamD):
nlamDY, nlamDX = nlamD, nlamD
else:
try:
nlamDY, nlamDX = nlamD
except ValueError:
raise ValueError(
"'nlamD' must be supplied as a scalar (for square arrays) or"
" as a 2-tuple of floats (nlamDY, nlamDX)"
)
centering = centering.upper()
# In the following: X and Y are coordinates in the input plane
# U and V are coordinates in the output plane
if inverse:
dX = nlamDX / float(npupX)
dY = nlamDY / float(npupY)
dU = 1.0 / float(npixX)
dV = 1.0 / float(npixY)
else:
dU = nlamDX / float(npixX)
dV = nlamDY / float(npixY)
dX = 1.0 / float(npupX)
dY = 1.0 / float(npupY)
if centering == FFTSTYLE:
Xs = (np.arange(npupX) - (npupX / 2)) * dX
Ys = (np.arange(npupY) - (npupY / 2)) * dY
Us = (np.arange(npixX) - npixX / 2) * dU
Vs = (np.arange(npixY) - npixY / 2) * dV
elif centering == ADJUSTABLE:
if offset is None:
offsetY, offsetX = 0.0, 0.0
else:
try:
offsetY, offsetX = offset
except ValueError:
raise ValueError(
"'offset' must be supplied as a 2-tuple with "
"(y_offset, x_offset) as floating point values"
)
Xs = (np.arange(npupX) - float(npupX) / 2.0 - offsetX + 0.5) * dX
Ys = (np.arange(npupY) - float(npupY) / 2.0 - offsetY + 0.5) * dY
Us = (np.arange(npixX) - float(npixX) / 2.0 - offsetX + 0.5) * dU
Vs = (np.arange(npixY) - float(npixY) / 2.0 - offsetY + 0.5) * dV
elif centering == SYMMETRIC:
Xs = (np.arange(npupX) - float(npupX) / 2.0 + 0.5) * dX
Ys = (np.arange(npupY) - float(npupY) / 2.0 + 0.5) * dY
Us = (np.arange(npixX) - float(npixX) / 2.0 + 0.5) * dU
Vs = (np.arange(npixY) - float(npixY) / 2.0 + 0.5) * dV
else:
raise ValueError("Invalid centering style")
XU = np.outer(Xs, Us)
YV = np.outer(Ys, Vs)
if inverse:
expYV = np.exp(-2.0 * np.pi * -1j * YV).T
expXU = np.exp(-2.0 * np.pi * -1j * XU)
t1 = np.dot(expYV, plane)
t2 = np.dot(t1, expXU)
else:
expXU = np.exp(-2.0 * np.pi * 1j * XU)
expYV = np.exp(-2.0 * np.pi * 1j * YV).T
t1 = np.dot(expYV, plane)
t2 = np.dot(t1, expXU)
norm_coeff = np.sqrt((nlamDY * nlamDX) / (npupY * npupX * npixY * npixX))
return norm_coeff * t2
def matrix_idft(*args, **kwargs):
kwargs['inverse'] = True
return matrix_dft(*args, **kwargs)
matrix_idft.__doc__ = matrix_dft.__doc__.replace( # type: ignore
'Perform a matrix discrete Fourier transform',
'Perform an inverse matrix discrete Fourier transform'
)
class MatrixFourierTransform:
"""Implements a discrete matrix Fourier transform for optical propagation,
following the algorithms discussed in Soummer et al. 2007 JOSA 15 24.
Parameters
----------
centering : {'FFTSTYLE', 'SYMMETRIC', 'ADJUSTABLE'}, optional
What type of centering convention should be used for this FFT?
* ADJUSTABLE (the default) For an output array with ODD size n,
the PSF center will be at the center of pixel (n-1)/2. For an output
array with EVEN size n, the PSF center will be in the corner between
pixel (n/2-1, n/2-1) and (n/2, n/2)
* FFTSTYLE puts the zero-order term in a single pixel.
* SYMMETRIC spreads the zero-order term evenly between the center
four pixels
verbose : bool
Deprecated. Use poppy.conf.default_logging_level to set DEBUG level
logging.
History
-------
Code by Sivaramakrishnan based on Soummer et al.
2010-01 Documentation updated by Perrin
2013-01 'choice' keyword renamed to 'centering' for clarity. 'choice' is
retained as an option for back compatibility, however it
is deprecated.
2015-01-21: Internals updated to use refactored `matrix_dft` function,
docstrings made consistent with each other -- J. Long
"""
def __init__(self, centering="ADJUSTABLE", verbose=False):
self.verbose = verbose
centering = centering.upper()
if centering == FFTRECT: # for backwards compatibility
centering = FFTSTYLE
if centering not in CENTERING_CHOICES:
raise ValueError(
"'centering' must be one of [ADJUSTABLE, SYMMETRIC, FFTSTYLE]"
)
self.centering = centering
_log.debug("MatrixFourierTransform initialized using centering "
"type = {0}".format(centering))
def _validate_args(self, nlamD, npix, offset):
if self.centering == SYMMETRIC:
if not np.isscalar(nlamD) or not np.isscalar(npix):
raise RuntimeError(
'The selected centering mode, {}, does not support '
'rectangular arrays.'.format(self.centering)
)
if self.centering == FFTSTYLE or self.centering == SYMMETRIC:
if offset is not None:
raise RuntimeError(
'The selected centering mode, {}, does not support '
'position offsets.'.format(self.centering)
)
def perform(self, pupil, nlamD, npix, offset=None):
"""Forward matrix discrete Fourier Transform
Parameters
----------
pupil : 2D ndarray
2D array (either real or complex) representing the input pupil
plane to transform.
nlamD : float or 2-tuple of floats (nlamDY, nlamDX)
Size of desired output region in lambda / D units, assuming that
the pupil fills the input array (corresponds to 'm' in
Soummer et al. 2007 4.2). This is in units of the spatial
frequency that is just Nyquist sampled by the input array.) If
given as a tuple, interpreted as (nlamDY, nlamDX).
npix : int or 2-tuple of ints (npixY, npixX)
Number of pixels per side side of destination plane array
(corresponds to 'N_B' in Soummer et al. 2007 4.2). This will be the
# of pixels in the image plane for a forward transformation, in the
pupil plane for an inverse. If given as a tuple, interpreted as
(npixY, npixX).
offset : 2-tuple of floats (offsetY, offsetX)
For ADJUSTABLE-style transforms, an offset in pixels by which the
PSF will be displaced from the central pixel (or cross). Given as
(offsetY, offsetX).
Returns
-------
complex ndarray
The Fourier transform of the input
"""
self._validate_args(nlamD, npix, offset)
_log.debug(
"Forward MatrixFourierTransform: array shape {}, "
"centering style {}, "
"output region size {} in lambda / D units, "
"output array size {} pixels, "
"offset {}".format(pupil.shape, self.centering, nlamD, npix,
offset)
)
return matrix_dft(pupil, nlamD, npix,
centering=self.centering, offset=offset)
def inverse(self, image, nlamD, npix, offset=None):
"""Inverse matrix discrete Fourier Transform
Parameters
----------
image : 2D ndarray
2D array (either real or complex) representing the input image
plane to transform.
nlamD : float or 2-tuple of floats (nlamDY, nlamDX)
Size of desired output region in lambda / D units, assuming that
the pupil fills the input array (corresponds to 'm' in
Soummer et al. 2007 4.2). This is in units of the spatial frequency
that is just Nyquist sampled by the input array.) If given as a
tuple, interpreted as (nlamDY, nlamDX).
npix : int or 2-tuple of ints (npixY, npixX)
Number of pixels per side side of destination plane array
(corresponds to 'N_B' in Soummer et al. 2007 4.2). This will be the
# of pixels in the image plane for a forward transformation, in the
pupil plane for an inverse. If given as a tuple, interpreted as
(npixY, npixX).
offset : 2-tuple of floats (offsetY, offsetX)
For ADJUSTABLE-style transforms, an offset in pixels by which the
PSF will be displaced from the central pixel (or cross). Given as
(offsetY, offsetX).
Returns
-------
complex ndarray
The Fourier transform of the input
"""
self._validate_args(nlamD, npix, offset)
_log.debug(
"Inverse MatrixFourierTransform: array shape {}, "
"centering style {}, "
"output region size {} in lambda / D units, "
"output array size {} pixels, "
"offset {}".format(image.shape, self.centering, nlamD, npix,
offset)
)
return matrix_idft(image, nlamD, npix,
centering=self.centering, offset=offset)
|
spacetelescopeREPO_NAMEjwstPATH_START.@jwst_extracted@jwst-main@jwst@ami@matrix_dft.py@.PATH_END.py
|
{
"filename": "figure4-lsbi-prior-dependence.py",
"repo_name": "htjb/tension-networks",
"repo_path": "tension-networks_extracted/tension-networks-main/figure4-lsbi-prior-dependence.py",
"type": "Python"
}
|
from lsbi.model import LinearModel
from lsbi.stats import multivariate_normal
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from tensionnet.tensionnet import nre
import tensorflow as tf
from tensorflow.keras.optimizers.schedules import ExponentialDecay
import numpy as np
from random import shuffle
from scipy.stats import ecdf, norm
from tqdm import tqdm
import os
import matplotlib as mpl
from matplotlib import rc
mpl.rcParams['axes.prop_cycle'] = mpl.cycler('color',
['ff7f00', '984ea3', '999999', '377eb8', '4daf4a','f781bf', 'a65628', 'e41a1c', 'dede00'])
mpl.rcParams['text.usetex'] = True
rc('font', family='serif')
rc('font', serif='cm')
rc('savefig', pad_inches=0.05)
plt.rc('text.latex', preamble=r'\usepackage{amsmath} \usepackage{amssymb}')
def logR(A, B):
return model_AB.evidence().logpdf(np.hstack([A, B])) - \
model_A.evidence().logpdf(A) - model_B.evidence().logpdf(B)
def simulation_process(simsA, simsB):
# generate lots of simulations
idx = np.arange(0, len(simsB), 1)
shuffle(idx)
mis_labeled_simsB = simsB[idx]
data = []
for i in range(len(simsA)):
"""
Sigma(log(r)) = 1 results in R >> 1 i.e. data sets are consistent
sigma(log(r)) = 0 --> R << 1 i.e. data sets are inconsistent
"""
data.append([*simsA[i], *simsB[i], 1])
data.append([*simsA[i], *mis_labeled_simsB[i], 0])
data = np.array(data)
idx = np.arange(0, 2*len(simsA), 1)
shuffle(idx)
labels = data[idx, -1]
data = data[idx, :-1]
print('Simulations built.')
print('Splitting data and normalizing...')
data_train, data_test, labels_train, labels_test = \
train_test_split(data, labels, test_size=0.2)
labels_test = labels_test
labels_train = labels_train
data_trainA = data_train[:, :len(simsA[0])]
data_trainB = data_train[:, len(simsA[0]):]
data_testA = data_test[:, :len(simsA[0])]
data_testB = data_test[:, len(simsA[0]):]
data_testA = (data_testA - data_trainA.mean(axis=0)) / \
data_trainA.std(axis=0)
data_testB = (data_testB - data_trainB.mean(axis=0)) / \
data_trainB.std(axis=0)
data_trainA = (data_trainA - data_trainA.mean(axis=0)) / \
data_trainA.std(axis=0)
data_trainB = (data_trainB - data_trainB.mean(axis=0)) / \
data_trainB.std(axis=0)
norm_data_train = np.hstack([data_trainA, data_trainB])
norm_data_test = np.hstack([data_testA, data_testB])
return norm_data_train, norm_data_test, data_train, \
data_test, labels_train, labels_test
# General model is
# D = m + M theta +/- sqrt(C)
# theta = mu +/- sqrt(Sigma)
# Parameters & priors
n = 3
# Data B
d = 50
MB = np.random.rand(d, n)
mB = np.random.rand(d)
CB = 0.01
# Data A
MA = np.random.rand(d, n)
mA = np.random.rand(d)
CA = 0.01
mu = np.random.rand(n)
theta_true = multivariate_normal(mu, 0.01).rvs()
#theta_true = [0.70325735, 0.56504433, 0.43477517]
print(theta_true)
"""for i in range(theta_true.shape[0]):
plt.hist(theta_true[i, :], bins=50, density=True, histtype='step')
plt.show()
sys.exit(1)"""
#theta_true = [0.8, 0.9, 0.95]
Sigmas = [0.1, 1, 100]
prior_label = [r'$\Sigma =$' + f'{Sigmas[0]}' + r'$\mathcal{I}$',
r'$\Sigma =$' + f'{Sigmas[1]}' + r'$\mathcal{I}$',
r'$\Sigma =$' + f'{Sigmas[2]}' + r'$\mathcal{I}$']
true_distributions, predicted_distributions, Rsss = [], [], []
trueTs, trueCs = [], []
predictedTs, predictedCs = [], []
for j in range(5):
print('Iteration ', j)
td, pd, truet, truec, Rs = [], [], [], [], []
predt, predc = [], []
for i, Sigma in enumerate(Sigmas):
print('Iteration ', i, ' Sigma = ', Sigma)
# build models and generate data
model_A = LinearModel(M=MA, m=mA, C=CA,
mu=mu, Sigma=Sigma)
model_B = LinearModel(M=MB, m=mB, C=CB,
mu=mu, Sigma=Sigma)
# Data AB
d = model_A.d + model_B.d
M = np.vstack([model_A.M, model_B.M])
m = np.hstack([model_A.m, model_B.m])
C = np.concatenate([model_A.C * np.ones(model_A.d),
model_B.C * np.ones(model_B.d)])
model_AB = LinearModel(M=M, m=m, C=C, mu=mu, Sigma=Sigma)
if i == 0 and j == 0:
# pull a real observation from the narrow prior
A_obs = model_A.likelihood(theta_true).rvs()
B_obs = model_B.likelihood(theta_true).rvs()
Robs = logR(A_obs, B_obs)
Rs.append(Robs)
N_sim = 500000
AB_sim = model_AB.evidence().rvs(N_sim)
A_sim = AB_sim[:, :model_A.d]
B_sim = AB_sim[:, model_A.d:]
# build the nre
nrei = nre(lr=1e-4)
nrei.build_model(len(A_obs) + len(B_obs),
[25]*5, 'sigmoid')
norm_data_train, norm_data_test, data_train, data_test, \
labels_train, labels_test = \
simulation_process(A_sim, B_sim)
nrei.data_test = norm_data_test
nrei.labels_test = labels_test
nrei.data_train = norm_data_train
nrei.labels_train = labels_train
nrei.simulation_func_A = None
nrei.simulation_func_B = None
# generate some test data to build the distribution
N_test_sim = 5000
AB_sim = model_AB.evidence().rvs(N_test_sim)
A_sim = AB_sim[:, :model_A.d]
B_sim = AB_sim[:, model_A.d:]
logr_true_dist = logR(A_sim, B_sim)
# build the analytic distribution and calculate T and C
logr_true_dist = np.sort(logr_true_dist)
td.append(logr_true_dist)
true_cdf = ecdf(logr_true_dist)
true_sigmaD = norm.isf(true_cdf.cdf.evaluate(Robs)/2)
true_sigmaA = norm.isf((1- true_cdf.cdf.evaluate(Robs))/2)
print(f'True sigmaD: {true_sigmaD}')
print(f'True sigmaA: {true_sigmaA}')
truet.append(true_sigmaD)
truec.append(true_sigmaA)
# train the model
model, data_test, labels_test = nrei.training(epochs=1000,
batch_size=1000)
# normalise the test data
A_sim = (A_sim - data_train[:, :len(A_obs)].mean(axis=0)) / \
data_train[:, :len(A_obs)].std(axis=0)
B_sim = (B_sim - data_train[:, len(A_obs):].mean(axis=0)) / \
data_train[:, len(A_obs):].std(axis=0)
data_test = np.hstack([A_sim, B_sim])
# evalute the predicted distribution
nrei.__call__(iters=data_test)
predicted_r_dist = nrei.r_values
mask = np.isfinite(predicted_r_dist)
# calcualte the predicted T and C
predicted_r_dist = np.sort(predicted_r_dist[mask])
pd.append(predicted_r_dist)
c = ecdf(predicted_r_dist)
sigmaD = norm.isf(c.cdf.evaluate(Robs)/2)
sigmaA = norm.isf((1- c.cdf.evaluate(Robs))/2)
print(f'Predicted sigmaD: {sigmaD}')
print(f'Predicted sigmaA: {sigmaA}')
predt.append(sigmaD)
predc.append(sigmaA)
true_distributions.append(td)
predicted_distributions.append(pd)
Rsss.append(Rs)
trueTs.append(truet)
trueCs.append(truec)
predictedTs.append(predt)
predictedCs.append(predc)
trueTs = np.array(trueTs)
trueCs = np.array(trueCs)
predictedTs = np.array(predictedTs)
predictedCs = np.array(predictedCs)
meantrueTs = np.mean(trueTs, axis=0)
meantrueCs = np.mean(trueCs, axis=0)
meanpredictedTs = np.mean(predictedTs, axis=0)
meanpredictedCs = np.mean(predictedCs, axis=0)
errortrueTs = np.std(trueTs, axis=0)/np.sqrt(5)
errortrueCs = np.std(trueCs, axis=0)/np.sqrt(5)
errorpredictedTs = np.std(predictedTs, axis=0)/np.sqrt(5)
errorpredictedCs = np.std(predictedCs, axis=0)/np.sqrt(5)
fig, axes = plt.subplots(3, 3, figsize=(8, 6.3), sharex='col')
k = np.random.randint(0, 5)
for i in range(len(Sigmas)):
axes[i, 0].hist(predicted_distributions[k][i], bins=50, density=True,
histtype='step', label='Prediction')
axes[i, 0].hist(true_distributions[k][i], bins=50, density=True,
histtype='step', label='Truth')
axes[i, 0].axvline(Rsss[k][i], color='r', ls='--')
axes[i, 0].set_title(r'$\log R_\mathrm{obs}=$' + str(np.round(Rsss[k][i], 2)))
c = ecdf(predicted_distributions[k][i])
true_cdf = ecdf(true_distributions[k][i])
axes[i, 1].plot(predicted_distributions[k][i], c.cdf.evaluate(predicted_distributions[k][i]), label='Prediction')
axes[i, 1].plot(true_distributions[k][i], true_cdf.cdf.evaluate(true_distributions[k][i]), label='Truth')
axes[i, 1].axhline(c.cdf.evaluate(Rsss[k][i]), ls='--',
color='C0')
axes[i, 1].axhline(true_cdf.cdf.evaluate(Rsss[k][i]), ls='--',
color='C1')
if i == 0:
axes[i, 0].legend(fontsize=8)
axes[i, 1].legend(fontsize=8)
[axes[i, j].tick_params(labelbottom=True) for j in range(2)]
axes[i, 0].set_ylabel(prior_label[i])
axes[i, 1].set_ylabel(r'$P(\log R < \log R^\prime)$')
axes[i, 2].axis('off')
axes[i, 2].table(cellText=[[f'{meantrueTs[i]:.3f} $\pm$ {errortrueTs[i]:.3f}',
f'{meantrueCs[i]:.3f} $\pm$ {errortrueCs[i]:.3f}'],
[f'{meanpredictedTs[i]:.3f} $\pm$ {errorpredictedTs[i]:.3f}',
f'{meanpredictedCs[i]:.3f} $\pm$ {errorpredictedCs[i]:.3f}']],
colLabels=[r'$T$', r'$C$'],
rowLabels=['Truth', r'\textsc{tensionnet}'],
cellLoc='center',
loc='center',
fontsize=15)
axes[2, 0].set_xlabel(r'$\log R$')
axes[2, 1].set_xlabel(r'$\log R$')
plt.tight_layout()
plt.savefig('figures/figure4-lsbi-averages.pdf', bbox_inches='tight')
plt.close()
|
htjbREPO_NAMEtension-networksPATH_START.@tension-networks_extracted@tension-networks-main@figure4-lsbi-prior-dependence.py@.PATH_END.py
|
{
"filename": "stannet.py",
"repo_name": "tomasplsek/CADET",
"repo_path": "CADET_extracted/CADET-main/training_testing/stannet.py",
"type": "Python"
}
|
from tensorflow.keras.initializers import Constant, TruncatedNormal, HeNormal
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Activation, BatchNormalization, concatenate, Conv2D, Dropout, Input, PReLU
##################### STAN's MODEL #####################
stride = (1, 1)
shapes_layers = [[32, (1,1)],
[32, (3,3)],
[16, (5,5)],
[8, (7,7)],
[4, (9,9)],
[2, (11,11)],
[1, (13,13)]]
shapes_layers_final = [[8, (8,8)],
[4, (16,16)],
[2, (32,32)],
[1, (64,64)]]
# init_kernel = HeNormal(seed=420)
init_kernel = TruncatedNormal(mean=0.0, stddev=0.03, seed=420)
init_bias = Constant(value=0.01)
def block(data, filters, shapes, active="None", drop=False):
# INCEPTION LAYER
layers = []
for f, s in shapes:
out_layer = Conv2D(filters = f, kernel_size = s, strides = stride,
kernel_initializer = init_kernel, bias_initializer = init_bias,
padding = "same", activation = None)(data)
# ACTIVATION FUNCTION
if active == "relu": out_layer = Activation("relu")(out_layer)
elif active == "prelu": out_layer = Activation(PReLU())(out_layer)
out_layer = BatchNormalization(axis = -1)(out_layer)
layers.append(out_layer)
# CONCATENATE INCEPTION FILTERS
layers = concatenate(layers, axis=-1)
# CONSEQUENT CONV LAYER
out_layer = Conv2D(filters = filters, kernel_size = (1,1), strides = stride,
kernel_initializer = init_kernel, bias_initializer = init_bias,
padding = "same", activation = None)(layers)
# ACTIVATION FUNCTION
if filters > 1:
if active == "relu": out_layer = Activation("relu")(out_layer)
elif active == "prelu": out_layer = Activation(PReLU())(out_layer)
out_layer = BatchNormalization(axis = -1)(out_layer)
if drop: out_layer = Dropout(drop)(out_layer)
else: out_layer = Activation("sigmoid")(out_layer)
return out_layer
def Stannet(shape_image, active="None", drop=False):
input_data = Input(shape=(shape_image))
data = BatchNormalization(axis = -1)(input_data)
data = block(data, 32, shapes_layers, active, drop)
data = block(data, 64, shapes_layers, active, drop)
data = block(data, 64, shapes_layers, active, drop)
data = block(data, 32, shapes_layers, active, drop)
data = block(data, 1, shapes_layers_final, active)
output = Model([input_data], data)
return output
|
tomasplsekREPO_NAMECADETPATH_START.@CADET_extracted@CADET-main@training_testing@stannet.py@.PATH_END.py
|
{
"filename": "_showscale.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/streamtube/_showscale.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowscaleValidator(_plotly_utils.basevalidators.BooleanValidator):
def __init__(self, plotly_name="showscale", parent_name="streamtube", **kwargs):
super(ShowscaleValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@streamtube@_showscale.py@.PATH_END.py
|
{
"filename": "pyopengl2.py",
"repo_name": "macrocosme/shwirl",
"repo_path": "shwirl_extracted/shwirl-master/shwirl/extern/vispy/gloo/gl/pyopengl2.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
""" GL ES 2.0 API implemented via pyOpenGL library. Intended as a
fallback and for testing.
"""
from OpenGL import GL as _GL
import OpenGL.GL.framebufferobjects as _FBO
from ...util import logger
from . import _copy_gl_functions
from ._constants import * # noqa
def _patch():
""" Monkey-patch pyopengl to fix a bug in glBufferSubData. """
import sys
from OpenGL import GL
if sys.version_info > (3,):
buffersubdatafunc = GL.glBufferSubData
if hasattr(buffersubdatafunc, 'wrapperFunction'):
buffersubdatafunc = buffersubdatafunc.wrapperFunction
_m = sys.modules[buffersubdatafunc.__module__]
_m.long = int
# Fix missing enum
try:
from OpenGL.GL.VERSION import GL_2_0
GL_2_0.GL_OBJECT_SHADER_SOURCE_LENGTH = GL_2_0.GL_SHADER_SOURCE_LENGTH
except Exception:
pass
# Patch OpenGL package
_patch()
## Inject
def _make_unavailable_func(funcname):
def cb(*args, **kwargs):
raise RuntimeError('OpenGL API call "%s" is not available.' % funcname)
return cb
def _get_function_from_pyopengl(funcname):
""" Try getting the given function from PyOpenGL, return
a dummy function (that shows a warning when called) if it
could not be found.
"""
func = None
# Get function from GL
try:
func = getattr(_GL, funcname)
except AttributeError:
# Get function from FBO
try:
func = getattr(_FBO, funcname)
except AttributeError:
func = None
# Try using "alias"
if not bool(func):
# Some functions are known by a slightly different name
# e.g. glDepthRangef, glClearDepthf
if funcname.endswith('f'):
try:
func = getattr(_GL, funcname[:-1])
except AttributeError:
pass
# Set dummy function if we could not find it
if func is None:
func = _make_unavailable_func(funcname)
logger.warning('warning: %s not available' % funcname)
return func
def _inject():
""" Copy functions from OpenGL.GL into _pyopengl namespace.
"""
NS = _pyopengl2.__dict__
for glname, ourname in _pyopengl2._functions_to_import:
func = _get_function_from_pyopengl(glname)
NS[ourname] = func
from . import _pyopengl2 # noqa
# Inject remaining functions from OpenGL.GL
# copies name to _pyopengl2 namespace
_inject()
# Inject all function definitions in _pyopengl2
_copy_gl_functions(_pyopengl2, globals())
|
macrocosmeREPO_NAMEshwirlPATH_START.@shwirl_extracted@shwirl-master@shwirl@extern@vispy@gloo@gl@pyopengl2.py@.PATH_END.py
|
{
"filename": "bilby_example.py",
"repo_name": "mj-will/nessai",
"repo_path": "nessai_extracted/nessai-main/examples/bilby_example.py",
"type": "Python"
}
|
#!/usr/bin/env python
# Example of using nessai with Bilby (Requires separate installation)
# See 2d_gaussian.py for a more detailed explanation of using nessai
import bilby
import numpy as np
# The output from the sampler will be saved to:
# '$outdir/$label_nessai/'
# alongside the usual bilby outputs
outdir = "./outdir/"
label = "bilby_example"
# Setup the bilby logger this will also configure the nessai logger.
bilby.core.utils.setup_logger(outdir=outdir, label=label)
# Define a likelihood using Bilby
class SimpleGaussianLikelihood(bilby.Likelihood):
def __init__(self):
"""A very simple Gaussian likelihood"""
super().__init__(parameters={"x": None, "y": None})
def log_likelihood(self):
"""Log-likelihood."""
return -0.5 * (
self.parameters["x"] ** 2.0 + self.parameters["y"] ** 2.0
) - np.log(2.0 * np.pi)
# Define priors (this provides the bounds that are then used in nessai)
priors = dict(
x=bilby.core.prior.Uniform(-10, 10, "x"),
y=bilby.core.prior.Uniform(-10, 10, "y"),
)
# Instantiate the likelihood
likelihood = SimpleGaussianLikelihood()
# Run using bilby.run_sampler, any kwargs are parsed to the sampler
# NOTE: when using Bilby if the priors can be sampled analytically the flag
# `analytic_priors` enables faster initial sampling. See 'further details' in
# the documentation for more details
result = bilby.run_sampler(
outdir=outdir,
label=label,
resume=False,
plot=True,
likelihood=likelihood,
priors=priors,
sampler="nessai",
injection_parameters={"x": 0.0, "y": 0.0},
analytic_priors=True,
seed=1234,
)
|
mj-willREPO_NAMEnessaiPATH_START.@nessai_extracted@nessai-main@examples@bilby_example.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "tcollett/LensPop",
"repo_path": "LensPop_extracted/LensPop-master/stellarpop/bak25sep12/__init__.py",
"type": "Python"
}
|
import distances
|
tcollettREPO_NAMELensPopPATH_START.@LensPop_extracted@LensPop-master@stellarpop@bak25sep12@__init__.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/funnelarea/stream/__init__.py",
"type": "Python"
}
|
import sys
if sys.version_info < (3, 7):
from ._token import TokenValidator
from ._maxpoints import MaxpointsValidator
else:
from _plotly_utils.importers import relative_import
__all__, __getattr__, __dir__ = relative_import(
__name__, [], ["._token.TokenValidator", "._maxpoints.MaxpointsValidator"]
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@funnelarea@stream@__init__.py@.PATH_END.py
|
{
"filename": "setup_package.py",
"repo_name": "radio-astro-tools/spectral-cube",
"repo_path": "spectral-cube_extracted/spectral-cube-master/spectral_cube/tests/setup_package.py",
"type": "Python"
}
|
def get_package_data():
return {
_ASTROPY_PACKAGE_NAME_ + '.tests': ['coveragerc', 'data/*.fits', 'data/*.hdr', 'data/*.lmv', 'data/*reg']
}
|
radio-astro-toolsREPO_NAMEspectral-cubePATH_START.@spectral-cube_extracted@spectral-cube-master@spectral_cube@tests@setup_package.py@.PATH_END.py
|
{
"filename": "test_interaction.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/ipywidgets/py3/ipywidgets/widgets/tests/test_interaction.py",
"type": "Python"
}
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
"""Test interact and interactive."""
from unittest.mock import patch
import os
from enum import Enum
from collections import OrderedDict
import pytest
import ipywidgets as widgets
from traitlets import TraitError, Float
from ipywidgets import (interact, interact_manual, interactive,
interaction, Output, Widget)
#-----------------------------------------------------------------------------
# Utility stuff
#-----------------------------------------------------------------------------
def f(**kwargs):
pass
class Color(Enum):
red = 0
green = 1
blue = 2
def g(a: str, b: bool, c: int, d: float, e: Color) -> None:
pass
displayed = []
@pytest.fixture()
def clear_display():
global displayed
displayed = []
def record_display(*args):
displayed.extend(args)
#-----------------------------------------------------------------------------
# Actual tests
#-----------------------------------------------------------------------------
def check_widget(w, **d):
"""Check a single widget against a dict"""
for attr, expected in d.items():
if attr == 'cls':
assert w.__class__ is expected
else:
value = getattr(w, attr)
assert value == expected, "{}.{} = {!r} != {!r}".format(w.__class__.__name__, attr, value, expected)
# For numeric values, the types should match too
if isinstance(value, (int, float)):
tv = type(value)
te = type(expected)
assert tv is te, "type({}.{}) = {!r} != {!r}".format(w.__class__.__name__, attr, tv, te)
def check_widget_children(container, **to_check):
"""Check that widgets are created as expected"""
# build a widget dictionary, so it matches
widgets = {}
for w in container.children:
if not isinstance(w, Output):
widgets[w.description] = w
for key, d in to_check.items():
assert key in widgets
check_widget(widgets[key], **d)
def test_single_value_string():
a = 'hello'
c = interactive(f, a=a)
w = c.children[0]
check_widget(w,
cls=widgets.Text,
description='a',
value=a,
)
def test_single_value_bool():
for a in (True, False):
c = interactive(f, a=a)
w = c.children[0]
check_widget(w,
cls=widgets.Checkbox,
description='a',
value=a,
)
def test_single_value_float():
for a in (2.25, 1.0, -3.5, 0.0):
if not a:
expected_min = 0.0
expected_max = 1.0
elif a > 0:
expected_min = -a
expected_max = 3*a
else:
expected_min = 3*a
expected_max = -a
c = interactive(f, a=a)
w = c.children[0]
check_widget(w,
cls=widgets.FloatSlider,
description='a',
value=a,
min=expected_min,
max=expected_max,
step=0.1,
readout=True,
)
def test_single_value_int():
for a in (1, 5, -3, 0):
if not a:
expected_min = 0
expected_max = 1
elif a > 0:
expected_min = -a
expected_max = 3*a
else:
expected_min = 3*a
expected_max = -a
c = interactive(f, a=a)
assert len(c.children) == 2
w = c.children[0]
check_widget(w,
cls=widgets.IntSlider,
description='a',
value=a,
min=expected_min,
max=expected_max,
step=1,
readout=True,
)
def test_list_str():
values = ['hello', 'there', 'guy']
first = values[0]
c = interactive(f, lis=values)
assert len(c.children) == 2
d = dict(
cls=widgets.Dropdown,
value=first,
options=tuple(values),
_options_labels=tuple(values),
_options_values=tuple(values),
)
check_widget_children(c, lis=d)
def test_list_int():
values = [3, 1, 2]
first = values[0]
c = interactive(f, lis=values)
assert len(c.children) == 2
d = dict(
cls=widgets.Dropdown,
value=first,
options=tuple(values),
_options_labels=tuple(str(v) for v in values),
_options_values=tuple(values),
)
check_widget_children(c, lis=d)
def test_list_tuple():
values = [(3, 300), (1, 100), (2, 200)]
first = values[0][1]
c = interactive(f, lis=values)
assert len(c.children) == 2
d = dict(
cls=widgets.Dropdown,
value=first,
options=tuple(values),
_options_labels=("3", "1", "2"),
_options_values=(300, 100, 200),
)
check_widget_children(c, lis=d)
def test_list_tuple_invalid():
for bad in [
(),
]:
with pytest.raises(ValueError):
print(bad) # because there is no custom message in assert_raises
c = interactive(f, tup=bad)
def test_dict():
for d in [
dict(a=5),
dict(a=5, b='b', c=dict),
]:
c = interactive(f, d=d)
w = c.children[0]
check = dict(
cls=widgets.Dropdown,
description='d',
value=next(iter(d.values())),
options=d,
_options_labels=tuple(d.keys()),
_options_values=tuple(d.values()),
)
check_widget(w, **check)
def test_ordereddict():
from collections import OrderedDict
items = [(3, 300), (1, 100), (2, 200)]
first = items[0][1]
values = OrderedDict(items)
c = interactive(f, lis=values)
assert len(c.children) == 2
d = dict(
cls=widgets.Dropdown,
value=first,
options=values,
_options_labels=("3", "1", "2"),
_options_values=(300, 100, 200),
)
check_widget_children(c, lis=d)
def test_iterable():
def yield_values():
yield 3
yield 1
yield 2
first = next(yield_values())
c = interactive(f, lis=yield_values())
assert len(c.children) == 2
d = dict(
cls=widgets.Dropdown,
value=first,
options=(3, 1, 2),
_options_labels=("3", "1", "2"),
_options_values=(3, 1, 2),
)
check_widget_children(c, lis=d)
def test_iterable_tuple():
values = [(3, 300), (1, 100), (2, 200)]
first = values[0][1]
c = interactive(f, lis=iter(values))
assert len(c.children) == 2
d = dict(
cls=widgets.Dropdown,
value=first,
options=tuple(values),
_options_labels=("3", "1", "2"),
_options_values=(300, 100, 200),
)
check_widget_children(c, lis=d)
def test_mapping():
from collections.abc import Mapping
from collections import OrderedDict
class TestMapping(Mapping):
def __init__(self, values):
self.values = values
def __getitem__(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
def __iter__(self):
raise NotImplementedError
def items(self):
return self.values
items = [(3, 300), (1, 100), (2, 200)]
first = items[0][1]
values = TestMapping(items)
c = interactive(f, lis=values)
assert len(c.children) == 2
d = dict(
cls=widgets.Dropdown,
value=first,
options=tuple(items),
_options_labels=("3", "1", "2"),
_options_values=(300, 100, 200),
)
check_widget_children(c, lis=d)
def test_decorator_kwarg(clear_display):
with patch.object(interaction, 'display', record_display):
@interact(a=5)
def foo(a):
pass
assert len(displayed) == 1
w = displayed[0].children[0]
check_widget(w,
cls=widgets.IntSlider,
value=5,
)
def test_interact_instancemethod(clear_display):
class Foo:
def show(self, x):
print(x)
f = Foo()
with patch.object(interaction, 'display', record_display):
g = interact(f.show, x=(1,10))
assert len(displayed) == 1
w = displayed[0].children[0]
check_widget(w,
cls=widgets.IntSlider,
value=5,
)
def test_decorator_no_call(clear_display):
with patch.object(interaction, 'display', record_display):
@interact
def foo(a='default'):
pass
assert len(displayed) == 1
w = displayed[0].children[0]
check_widget(w,
cls=widgets.Text,
value='default',
)
def test_call_interact(clear_display):
def foo(a='default'):
pass
with patch.object(interaction, 'display', record_display):
ifoo = interact(foo)
assert len(displayed) == 1
w = displayed[0].children[0]
check_widget(w,
cls=widgets.Text,
value='default',
)
def test_call_interact_on_trait_changed_none_return(clear_display):
def foo(a='default'):
pass
with patch.object(interaction, 'display', record_display):
ifoo = interact(foo)
assert len(displayed) == 1
w = displayed[0].children[0]
check_widget(w,
cls=widgets.Text,
value='default',
)
with patch.object(interaction, 'display', record_display):
w.value = 'called'
assert len(displayed) == 1
def test_call_interact_kwargs(clear_display):
def foo(a='default'):
pass
with patch.object(interaction, 'display', record_display):
ifoo = interact(foo, a=10)
assert len(displayed) == 1
w = displayed[0].children[0]
check_widget(w,
cls=widgets.IntSlider,
value=10,
)
def test_call_decorated_on_trait_change(clear_display):
"""test calling @interact decorated functions"""
d = {}
with patch.object(interaction, 'display', record_display):
@interact
def foo(a='default'):
d['a'] = a
return a
assert len(displayed) == 2 # display the result and the interact
w = displayed[1].children[0]
check_widget(w,
cls=widgets.Text,
value='default',
)
# test calling the function directly
a = foo('hello')
assert a == 'hello'
assert d['a'] == 'hello'
# test that setting trait values calls the function
with patch.object(interaction, 'display', record_display):
w.value = 'called'
assert d['a'] == 'called'
assert len(displayed) == 3
assert w.value == displayed[-1]
def test_call_decorated_kwargs_on_trait_change(clear_display):
"""test calling @interact(foo=bar) decorated functions"""
d = {}
with patch.object(interaction, 'display', record_display):
@interact(a='kwarg')
def foo(a='default'):
d['a'] = a
return a
assert len(displayed) == 2 # display the result and the interact
w = displayed[1].children[0]
check_widget(w,
cls=widgets.Text,
value='kwarg',
)
# test calling the function directly
a = foo('hello')
assert a == 'hello'
assert d['a'] == 'hello'
# test that setting trait values calls the function
with patch.object(interaction, 'display', record_display):
w.value = 'called'
assert d['a'] == 'called'
assert len(displayed) == 3
assert w.value == displayed[-1]
def test_fixed():
c = interactive(f, a=widgets.fixed(5), b='text')
assert len(c.children) == 2
w = c.children[0]
check_widget(w,
cls=widgets.Text,
value='text',
description='b',
)
def test_default_description():
c = interactive(f, b='text')
w = c.children[0]
check_widget(w,
cls=widgets.Text,
value='text',
description='b',
)
def test_custom_description():
d = {}
def record_kwargs(**kwargs):
d.clear()
d.update(kwargs)
c = interactive(record_kwargs, b=widgets.Text(value='text', description='foo'))
w = c.children[0]
check_widget(w,
cls=widgets.Text,
value='text',
description='foo',
)
w.value = 'different text'
assert d == {'b': 'different text'}
def test_raises_on_non_value_widget():
""" Test that passing in a non-value widget raises an error """
class BadWidget(Widget):
""" A widget that contains a `value` traitlet """
value = Float()
with pytest.raises(TypeError, match=".* not a ValueWidget.*"):
interactive(f, b=BadWidget())
def test_interact_manual_button():
c = interact.options(manual=True).widget(f)
w = c.children[0]
check_widget(w, cls=widgets.Button)
def test_interact_manual_nocall():
callcount = 0
def calltest(testarg):
callcount += 1
c = interact.options(manual=True)(calltest, testarg=5).widget
c.children[0].value = 10
assert callcount == 0
def test_interact_call():
w = interact.widget(f)
w.update()
w = interact_manual.widget(f)
w.update()
def test_interact_options():
def f(x):
return x
w = interact.options(manual=False).options(manual=True)(f, x=21).widget
assert w.manual == True
w = interact_manual.options(manual=False).options()(x=21).widget(f)
assert w.manual == False
w = interact(x=21)().options(manual=True)(f).widget
assert w.manual == True
def test_interact_options_bad():
with pytest.raises(ValueError):
interact.options(bad="foo")
def test_int_range_logic():
irsw = widgets.IntRangeSlider
w = irsw(value=(2, 4), min=0, max=6)
check_widget(w, cls=irsw, value=(2, 4), min=0, max=6)
w.upper = 3
w.max = 3
check_widget(w, cls=irsw, value=(2, 3), min=0, max=3)
w.min = 0
w.max = 6
w.lower = 2
w.upper = 4
check_widget(w, cls=irsw, value=(2, 4), min=0, max=6)
w.value = (0, 1) #lower non-overlapping range
check_widget(w, cls=irsw, value=(0, 1), min=0, max=6)
w.value = (5, 6) #upper non-overlapping range
check_widget(w, cls=irsw, value=(5, 6), min=0, max=6)
w.lower = 2
check_widget(w, cls=irsw, value=(2, 6), min=0, max=6)
with pytest.raises(TraitError):
w.min = 7
with pytest.raises(TraitError):
w.max = -1
w = irsw(min=2, max=3, value=(2, 3))
check_widget(w, min=2, max=3, value=(2, 3))
w = irsw(min=100, max=200, value=(125, 175))
check_widget(w, value=(125, 175))
with pytest.raises(TraitError):
irsw(min=2, max=1)
def test_float_range_logic():
frsw = widgets.FloatRangeSlider
w = frsw(value=(.2, .4), min=0., max=.6)
check_widget(w, cls=frsw, value=(.2, .4), min=0., max=.6)
w.min = 0.
w.max = .6
w.lower = .2
w.upper = .4
check_widget(w, cls=frsw, value=(.2, .4), min=0., max=.6)
w.value = (0., .1) #lower non-overlapping range
check_widget(w, cls=frsw, value=(0., .1), min=0., max=.6)
w.value = (.5, .6) #upper non-overlapping range
check_widget(w, cls=frsw, value=(.5, .6), min=0., max=.6)
w.lower = .2
check_widget(w, cls=frsw, value=(.2, .6), min=0., max=.6)
with pytest.raises(TraitError):
w.min = .7
with pytest.raises(TraitError):
w.max = -.1
w = frsw(min=2, max=3, value=(2.2, 2.5))
check_widget(w, min=2., max=3.)
with pytest.raises(TraitError):
frsw(min=.2, max=.1)
def test_multiple_selection():
smw = widgets.SelectMultiple
# degenerate multiple select
w = smw()
check_widget(w, value=tuple())
# don't accept random other value when no options
with pytest.raises(TraitError):
w.value = (2,)
check_widget(w, value=tuple())
# basic multiple select
w = smw(options=[(1, 1)], value=[1])
check_widget(w, cls=smw, value=(1,), options=((1, 1),))
# don't accept random other value
with pytest.raises(TraitError):
w.value = w.value + (2,)
check_widget(w, value=(1,))
# change options, which resets value
w.options = w.options + ((2, 2),)
check_widget(w, options=((1, 1), (2,2)), value=())
# change value
w.value = (1,2)
check_widget(w, value=(1, 2))
# dict style
w.options = {1: 1}
check_widget(w, options={1:1})
# updating
w.options = (1,)
with pytest.raises(TraitError):
w.value = (2,)
check_widget(w, options=(1,) )
def test_interact_noinspect():
a = 'hello'
c = interactive(dict, a=a)
w = c.children[0]
check_widget(w,
cls=widgets.Text,
description='a',
value=a,
)
def test_get_interact_value():
from ipywidgets.widgets import ValueWidget
from traitlets import Unicode
class TheAnswer(ValueWidget):
_model_name = Unicode('TheAnswer')
description = Unicode()
def get_interact_value(self):
return 42
w = TheAnswer()
c = interactive(lambda v: v, v=w)
c.update()
assert c.result == 42
def test_state_schema():
from ipywidgets.widgets import IntSlider, Widget
import json
import jsonschema
s = IntSlider()
state = Widget.get_manager_state(drop_defaults=True)
import yatest.common as yc
with open(yc.source_path('contrib/python/ipywidgets/py3/ipywidgets/state.schema.json')) as f:
schema = json.load(f)
jsonschema.validate(state, schema)
def test_type_hints():
c = interactive(g)
assert len(c.children) == 6
check_widget_children(
c,
a={'cls': widgets.Text},
b={'cls': widgets.Checkbox},
c={'cls': widgets.IntText},
d={'cls': widgets.FloatText},
e={
'cls': widgets.Dropdown,
'options': {
'red': Color.red,
'green': Color.green,
'blue': Color.blue,
},
'_options_labels': ("red", "green", "blue"),
'_options_values': (Color.red, Color.green, Color.blue),
},
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@ipywidgets@py3@ipywidgets@widgets@tests@test_interaction.py@.PATH_END.py
|
{
"filename": "conf.py",
"repo_name": "spacetelescope/hstaxe",
"repo_path": "hstaxe_extracted/hstaxe-main/docs/conf.py",
"type": "Python"
}
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
import datetime
import importlib
import sys
import os
from distutils.version import LooseVersion
from configparser import ConfigParser
import sphinx
import stsci_rtd_theme
# -- Project information -----------------------------------------------------
def setup(app):
try:
app.add_css_file("stsci.css")
except AttributeError:
app.add_stylesheet("stsci.css")
conf = ConfigParser()
sys.path.insert(0, os.path.abspath('hstaxe/'))
sys.path.insert(0, os.path.abspath('exts/'))
# -- General configuration ---------------------------------------------------
# General information about the project
package = 'hstaxe'
project = 'hstaxe'
author = 'STScI'
copyright = '{0}, {1}'.format(datetime.datetime.now().year, author)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = release = '1.0.1'
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.3'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
def check_sphinx_version(expected_version):
sphinx_version = LooseVersion(sphinx.__version__)
expected_version = LooseVersion(expected_version)
if sphinx_version < expected_version:
raise RuntimeError(
"At least Sphinx version {0} is required to build this "
"documentation. Found {1}.".format(
expected_version, sphinx_version))
# Configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://numpy.org/doc/stable/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'matplotlib': ('https://matplotlib.org/', None),
}
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'numfig',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
if on_rtd:
extensions.append('sphinx.ext.mathjax')
elif LooseVersion(sphinx.__version__) < LooseVersion('1.4'):
extensions.append('sphinx.ext.pngmath')
else:
extensions.append('sphinx.ext.imgmath')
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build']
# A list of warning types to suppress arbitrary warning messages. We mean to
# override directives in astropy_helpers.sphinx.ext.autodoc_enhancements,
# thus need to ignore those warning. This can be removed once the patch gets
# released in upstream Sphinx (https://github.com/sphinx-doc/sphinx/pull/1843).
# Suppress the warnings requires Sphinx v1.4.2
suppress_warnings = ['app.add_directive', ]
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
#rst_epilog = """.. _hstaxe: high-level_API.html"""
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'obj'
# Don't show summaries of the members in each class along with the
# class' docstring
#numpydoc_show_class_members = False
#autosummary_generate = True
# Class documentation should contain *both* the class docstring and
# the __init__ docstring
#autoclass_content = "both"
# Render inheritance diagrams in SVG
graphviz_output_format = "svg"
graphviz_dot_args = [
'-Nfontsize=10',
'-Nfontname=Helvetica Neue, Helvetica, Arial, sans-serif',
'-Efontsize=10',
'-Efontname=Helvetica Neue, Helvetica, Arial, sans-serif',
'-Gfontsize=10',
'-Gfontname=Helvetica Neue, Helvetica, Arial, sans-serif'
]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'default'
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'stsci_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"collapse_navigation": True
}
# "nosidebar": "false",
# "sidebarbgcolor": "#4db8ff",
# "sidebartextcolor": "black",
# "sidebarlinkcolor": "black",
# "headbgcolor": "white",
# }
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [stsci_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**': ['globaltoc.html', 'relations.html', 'searchbox.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'hstaxedoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
'pointsize': '14pt',
# Additional stuff for the LaTeX preamble.
'preamble': r'''\usepackage{enumitem} \setlistdepth{99}'''
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'hstaxe.tex', u'HSTAXE Documentation',
u'hstaxe', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = '_static/JWSTlogocrop.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
latex_show_urls = 'True'
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'hstaxe', u'HSTAXE Documentation',
[u'hstaxe'], 1)
]
# If true, show URL addresses after external links.
man_show_urls = True
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'hstaxe', u'HSTAXE Documentation',
u'hstaxe', 'hstaxe', 'HSTAXE Documentation',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
texinfo_show_urls = 'inline'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'hstaxe'
epub_author = u'STSCI'
epub_publisher = u'STSCI'
epub_copyright = u'2020, AURA'
# The basename for the epub file. It defaults to the project name.
# epub_basename = u'hstaxe'
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and
# epub output is usually not wise. This defaults to 'epub', a theme designed
# to save visual space.
epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
|
spacetelescopeREPO_NAMEhstaxePATH_START.@hstaxe_extracted@hstaxe-main@docs@conf.py@.PATH_END.py
|
{
"filename": "position_dependent_power_spectrum.py",
"repo_name": "sambit-giri/tools21cm",
"repo_path": "tools21cm_extracted/tools21cm-master/src/tools21cm/position_dependent_power_spectrum.py",
"type": "Python"
}
|
import numpy as np
from tqdm import tqdm
from .power_spectrum import *
def power_spectrum_response(cube, cube2=None, Ncuts=4, kbins=15, box_dims=244/.7, binning='log', verbose=True):
"""
Calculate the response of a field to large-scale fluctuations (Giri et al. 2019, arXiv:1811.09633).
Parameters:
- cube1 (numpy.ndarray): The first input 3D data cube.
- cube2 (numpy.ndarray): The second input 3D data cube. Default: None, which is replaced with the first cube.
- Ncuts (int): Number of cuts along each dimension for the box division. Default: 4.
- kbins (int): Number of bins for the power spectrum calculation. Default: 15.
- box_dims (float): Size of the box in comoving Mpc. Default: 244/h.
- binning (str): Binning method for the power spectrum ('log' or 'linear'). Default: 'log'.
- verbose (bool): If True, display progress. Default: True.
Returns:
- numpy.ndarray: Integrated bispectrum cross-power spectrum.
- numpy.ndarray: Wavenumbers corresponding to the power spectrum.
"""
return integrated_bispectrum_cross(cube, cube if cube2 is None else cube2, Ncuts=Ncuts, kbins=kbins, box_dims=box_dims, binning=binning, normalize=True, verbose=verbose)
def integrated_bispectrum(cube, Ncuts=4, kbins=15, box_dims=244/.7, binning='log', normalize=False, verbose=True):
"""
Calculate the integrated bispectrum auto-power spectrum (Giri et al. 2019, arXiv:1811.09633).
Parameters:
- cube1 (numpy.ndarray): The first input 3D data cube.
- cube2 (numpy.ndarray): The second input 3D data cube. Default: None, which is replaced with the first cube.
- Ncuts (int): Number of cuts along each dimension for the box division. Default: 4.
- kbins (int): Number of bins for the power spectrum calculation. Default: 15.
- box_dims (float): Size of the box in comoving Mpc. Default: 244/h.
- binning (str): Binning method for the power spectrum ('log' or 'linear'). Default: 'log'.
- normalize (bool): If True, normalize the result. Default: False.
- verbose (bool): If True, display progress. Default: True.
Returns:
- numpy.ndarray: Integrated bispectrum cross-power spectrum.
- numpy.ndarray: Wavenumbers corresponding to the power spectrum.
"""
return integrated_bispectrum_cross(cube, cube, Ncuts=Ncuts, kbins=kbins, box_dims=box_dims, binning=binning, normalize=normalize, verbose=verbose)
def integrated_bispectrum_cross(cube1, cube2, Ncuts=4, kbins=15, box_dims=244/.7, binning='log', normalize=False, verbose=True):
"""
Calculate the integrated bispectrum cross-power spectrum (Giri et al. 2019, arXiv:1811.09633).
Parameters:
- cube1 (numpy.ndarray): The first input 3D data cube.
- cube2 (numpy.ndarray): The second input 3D data cube.
- Ncuts (int): Number of cuts along each dimension for the box division.
- kbins (int): Number of bins for the power spectrum calculation.
- box_dims (float): Size of the box in comoving Mpc.
- binning (str): Binning method for the power spectrum ('log' or 'linear').
- normalize (bool): If True, normalize the result.
- verbose (bool): If True, display progress.
Returns:
- numpy.ndarray: Integrated bispectrum cross-power spectrum.
- numpy.ndarray: Wavenumbers corresponding to the power spectrum.
"""
assert cube1.shape == cube2.shape
assert cube1.shape[0]%Ncuts==0 and cube1.shape[1]%Ncuts==0 and cube1.shape[2]%Ncuts==0
Lx,Ly,Lz = cube1.shape[0]/Ncuts,cube1.shape[1]/Ncuts,cube1.shape[2]/Ncuts
rLs = [[Lx/2.+i*Lx,Ly/2.+j*Ly,Lz/2.+k*Lz] for i in range(Ncuts) for j in range(Ncuts) for k in range(Ncuts)]
B_k = np.zeros(kbins, dtype=np.float64)
P_k = np.zeros(kbins, dtype=np.float64)
sig2 = 0
n_box = Ncuts**3
V_L = (Lx*Ly*Lz)
for i in tqdm(range(n_box), disable=not verbose):
w1 = _W_L(cube1, rLs[i], [Lx,Ly,Lz])
w2 = _W_L(cube2, rLs[i], [Lx,Ly,Lz])
c1 = cube1 * w1
c2 = cube2 * w2
pk, ks = power_spectrum_1d(c1, kbins=kbins, box_dims=box_dims, binning=binning)
d_mean = c2.sum(dtype=np.float64)/V_L
B_k += pk*d_mean
P_k += pk
sig2 += (d_mean)**2 #c2.var(dtype=np.float64)
# if verbose: print("%.2f %%"%(100*(i+1)/n_box))
B_k = B_k/n_box
P_k = P_k/n_box
sig2 = sig2/n_box
if verbose: print('The long wavelength mode is %.3f/cMpc'%(2*np.pi/(box_dims/Ncuts)))
if normalize: return B_k/P_k/sig2, ks
return B_k, ks
def _W_L(array, rL, L):
'''
Cubical heaviside filter.
'''
assert array.ndim == np.array(rL).size
out = np.zeros(array.shape)
if np.array(L).size==1: L = [L for i in range(array.ndim)]
xl = [int(rL[0]-L[0]/2),int(rL[0]+L[0]/2)]
yl = [int(rL[1]-L[1]/2),int(rL[1]+L[1]/2)]
zl = [int(rL[2]-L[2]/2),int(rL[2]+L[2]/2)]
out[xl[0]:xl[1], yl[0]:yl[1], zl[0]:zl[1]] = 1
return out
def integrated_bispectrum_cross_slide(cube1, cube2, L_subbox=400, kbins=15, box_dims=244/.7, binning='log', normalize=False, verbose=True, slide_overlap=0.5):
"""
Calculate the integrated bispectrum cross-power spectrum (Giri et al. 2019, arXiv:1811.09633).
Parameters:
- cube1 (numpy.ndarray): The first input 3D data cube.
- cube2 (numpy.ndarray): The second input 3D data cube.
- L_subbox (float): Box length of the sub-boxes (in number of cells/grids).
- kbins (int): Number of bins for the power spectrum calculation.
- box_dims (float): Size of the box in comoving Mpc.
- binning (str): Binning method for the power spectrum ('log' or 'linear').
- normalize (bool): If True, normalize the result.
- verbose (bool): If True, display progress.
Returns:
- numpy.ndarray: Integrated bispectrum cross-power spectrum.
- numpy.ndarray: Wavenumbers corresponding to the power spectrum.
"""
print('IMPLEMENTATION NOT OVER.')
assert cube1.shape == cube2.shape
L = cube1.shape[0]
Lx,Ly,Lz = L_subbox, L_subbox, L_subbox
L_slide = slide_overlap*L_subbox
sn = int(np.round((L-L_subbox)/(L_subbox-L_slide)+1))
L_slide = int(np.round(L_subbox-(L-L_subbox)/(sn-1)))
rLs = [[L_subbox*0.5+(L_subbox-L_slide)*i,L_subbox*0.5+(L_subbox-L_slide)*j,L_subbox*0.5+(L_subbox-L_slide)*k] for i in range(sn) for j in range(sn) for k in range(sn)]
B_k = np.zeros(kbins, dtype=np.float64)
P_k = np.zeros(kbins, dtype=np.float64)
sig2 = 0
n_box = len(rLs)
V_L = (Lx*Ly*Lz)
for i in tqdm(range(n_box), disable=not verbose):
w1 = _W_L(cube1, rLs[i], [Lx,Ly,Lz])
w2 = _W_L(cube2, rLs[i], [Lx,Ly,Lz])
c1 = cube1 * w1
c2 = cube2 * w2
pk, ks = power_spectrum_1d(c1, kbins=kbins, box_dims=box_dims, binning=binning)
d_mean = c2.sum(dtype=np.float64)/V_L
B_k += pk*d_mean
P_k += pk
sig2 += (d_mean)**2 #c2.var(dtype=np.float64)
# if verbose: print("%.2f %%"%(100*(i+1)/n_box))
B_k = B_k/n_box
P_k = P_k/n_box
sig2 = sig2/n_box
if verbose: print('The long wavelength mode is %.3f/cMpc'%(2*np.pi/(box_dims*L_subbox/Lx)))
if normalize: return B_k/P_k/sig2, ks
return B_k, ks
|
sambit-giriREPO_NAMEtools21cmPATH_START.@tools21cm_extracted@tools21cm-master@src@tools21cm@position_dependent_power_spectrum.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "NuSpaceSim/nupyprop",
"repo_path": "nupyprop_extracted/nupyprop-main/tests/__init__.py",
"type": "Python"
}
|
NuSpaceSimREPO_NAMEnupypropPATH_START.@nupyprop_extracted@nupyprop-main@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "setup.py",
"repo_name": "timothydmorton/VESPA",
"repo_path": "VESPA_extracted/VESPA-master/setup.py",
"type": "Python"
}
|
from setuptools import setup, Extension, find_packages
on_rtd = False
try:
from Cython.Distutils import build_ext
from Cython.Build import cythonize
import numpy
except ImportError:
on_rtd = True
numpy = None
build_ext = None
import os
def readme():
with open('README.rst') as f:
return f.read()
# Hackishly inject a constant into builtins to enable importing of the
# package before the library is built.
import sys
if sys.version_info[0] < 3:
import __builtin__ as builtins
else:
import builtins
builtins.__VESPA_SETUP__ = True
import vespa
version = vespa.__version__
# Publish the library to PyPI.
if "publish" in sys.argv[-1]:
os.system("python setup.py sdist upload")
sys.exit()
# Push a new tag to GitHub.
if "tag" in sys.argv:
os.system("git tag -a {0} -m 'version {0}'".format(version))
os.system("git push --tags")
sys.exit()
if not on_rtd:
transit_utils = [Extension('vespa._transitutils',['vespa/_transitutils.pyx'],
include_dirs=[numpy.get_include()])]
else:
transit_utils = None
setup(name = "VESPA",
version = version,
description = "Calculate astrophysical false positive probabilities for transiting exoplanet signals",
long_description = readme(),
author = "Timothy D. Morton",
author_email = "tim.morton@gmail.com",
url = "https://github.com/timothydmorton/VESPA",
#packages = ['vespa', 'vespa/stars',
# 'vespa/orbits'],
packages = find_packages(),
package_data = {'vespa': ['data/*', 'tests/kepler-22/*.ini',
'tests/kepler-22/*.h5', 'tests/kepler-22/*.pkl',
'tests/kepler-22/*.cc', 'tests/kepler-22/signal.txt'],
'vespa.stars': ['data/*'],
'vespa.orbits':['data/*']},
ext_modules = cythonize(transit_utils),
scripts = ['scripts/get_trilegal',
'scripts/koifpp',
'scripts/batch_koifpp_condor',
'scripts/calcfpp',
'scripts/koifpp-config',
'scripts/get_kepler_ttvs'],
cmdclass = {'build_ext': build_ext},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Astronomy'
],
install_requires=['pandas>=0.21','simpledist>=0.1.13', 'emcee',
'isochrones>=1.1.1', 'batman-package>=2.1',
'configobj'],
zip_safe=False
)
|
timothydmortonREPO_NAMEVESPAPATH_START.@VESPA_extracted@VESPA-master@setup.py@.PATH_END.py
|
{
"filename": "test_bayesian_model.py",
"repo_name": "renecotyfanboy/jaxspec",
"repo_path": "jaxspec_extracted/jaxspec-main/tests/test_bayesian_model.py",
"type": "Python"
}
|
import operator
import jax
import jax.numpy as jnp
from jaxspec._fit._build_model import build_numpyro_model_for_single_obs, build_prior
from jaxspec.fit import BayesianModel
from numpyro.infer.inspect import get_model_relations
def test_model_building(obs_model_prior):
"""
Check that all parameters are built correctly within the numpyro model.
"""
obs, spectral_model, prior_distributions = obs_model_prior
def numpyro_model():
params = build_prior(prior_distributions, expand_shape=())
lower_model = build_numpyro_model_for_single_obs(obs[0], spectral_model, None)
lower_model(params)
relations = get_model_relations(numpyro_model)
assert {
key for key in relations["sample_param"].keys() if key not in relations["observed"]
} == set(prior_distributions.keys())
def test_likelihood(obs_model_prior):
obsconf, model, prior = obs_model_prior
bayesian_model = BayesianModel(model, prior, obsconf)
parameter_array = jnp.asarray([[0.7, 0.2, 2, 3e-4]])
parameters = bayesian_model.array_to_dict(parameter_array)
total_likelihood = bayesian_model.log_likelihood(parameters)
splitted_likelihood = bayesian_model.log_likelihood_per_obs(parameters)
total_likelihood_from_splitted = jax.tree.reduce(
operator.add, jax.tree.map(jnp.sum, splitted_likelihood)
)
assert jnp.isclose(total_likelihood_from_splitted, total_likelihood)
|
renecotyfanboyREPO_NAMEjaxspecPATH_START.@jaxspec_extracted@jaxspec-main@tests@test_bayesian_model.py@.PATH_END.py
|
{
"filename": "_textpositionsrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/waterfall/_textpositionsrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TextpositionsrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="textpositionsrc", parent_name="waterfall", **kwargs
):
super(TextpositionsrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@waterfall@_textpositionsrc.py@.PATH_END.py
|
{
"filename": "ackbar.py",
"repo_name": "sebastian-zieba/PACMAN",
"repo_path": "PACMAN_extracted/PACMAN-master/src/pacman/lib/models/ackbar.py",
"type": "Python"
}
|
# ! /usr/bin/env python
"""ramp effect model
2 means two types of traps
original author: Daniel Apai
Version 0.3 fixing trapping parameters
Version 0.2.1 introduce two types of traps, slow traps and fast traps
Version 0.2: add extra keyword parameter to indicate scan or staring
mode observations for staring mode, the detector receive flux in the
same rate during overhead time as that during exposure
precise mathematics forms are included
Version 0.1: Adapted original IDL code to python by Yifan Zhou.
"""
import sys
sys.path.insert(0, '..')
import numpy as np
import itertools
def ackbar(t, data, params, visit = 0):
"""Hubble Space Telescope ramp effect model
Parameters:
cRates -- intrinsic count rate of each exposures, unit e/s
tExp -- start time of every exposures
expTime -- (default 180 seconds) exposure time of the time series
trap_pop -- (default 0) number of occupied traps at the beginning of the observations
dTrap -- (default [0])number of extra trap added between two orbits
dt0 -- (default 0) possible exposures before very beginning, e.g.,
guiding adjustment
lost -- (default 0, no lost) proportion of trapped electrons that are not eventually detected
(mode) -- (default scanning, scanning or staring, or others), for scanning mode
observation , the pixel no longer receive photons during the overhead
time, in staring mode, the pixel keps receiving elctrons
"""
trap_pop_s, trap_pop_f, dTrap_s, dTrap_f = params
trap_pop_s = trap_pop_s[visit]
trap_pop_f = trap_pop_f[visit]
dTrap_s = dTrap_s[visit]
dTrap_f = dTrap_f[visit]
#print trap_pop_s, trap_pop_f
#t = data.time[idx]
mean = 12173979.5
cRates = 328.1*np.ones_like(t)#*np.mean(data.flux)/mean
#cRates = 316.*np.ones_like(t)#*np.mean(data.flux)/mean
tExp = (t - t[0])*24.*60.*60.
exptime = data.exp_time
dt0=0
lost=0
mode='scanning'
#nTrap_s = 1525.38 # 1320.0
#eta_trap_s = 0.013318 # 0.01311
nTrap_s = 1525.38 # 1320.0
eta_trap_s = 0.013318 # 0.01311
tau_trap_s = 1.63e4
nTrap_f = 162.38
eta_trap_f = 0.008407
tau_trap_f = 281.463
try:
dTrap_f = itertools.cycle(dTrap_f)
dTrap_s = itertools.cycle(dTrap_s)
dt0 = itertools.cycle(dt0)
except TypeError:
# if dTrap, dt0 provided in scala, convert them to list
dTrap_f = itertools.cycle([dTrap_f])
dTrap_s = itertools.cycle([dTrap_s])
dt0 = itertools.cycle([dt0])
obsCounts = np.zeros(len(tExp))
# ensure initial values do not exceed the total trap numbers
trap_pop_s = min(trap_pop_s, nTrap_s)
trap_pop_f = min(trap_pop_f, nTrap_f)
#print "trap_pop_f", trap_pop_f
for i in range(len(tExp)):
try:
dt = tExp[i+1] - tExp[i]
except IndexError:
dt = exptime
f_i = cRates[i]
c1_s = eta_trap_s * f_i / nTrap_s + 1 / tau_trap_s # a key factor
c1_f = eta_trap_f * f_i / nTrap_f + 1 / tau_trap_f
# number of trapped electron during one exposure
dE1_s = (eta_trap_s * f_i / c1_s - trap_pop_s) * (1 - np.exp(-c1_s * exptime))
dE1_f = (eta_trap_f * f_i / c1_f - trap_pop_f) * (1 - np.exp(-c1_f * exptime))
dE1_s = min(trap_pop_s + dE1_s, nTrap_s) - trap_pop_s
dE1_f = min(trap_pop_f + dE1_f, nTrap_f) - trap_pop_f
trap_pop_s = min(trap_pop_s + dE1_s, nTrap_s)
trap_pop_f = min(trap_pop_f + dE1_f, nTrap_f)
obsCounts[i] = f_i * exptime - dE1_s - dE1_f
if dt < 5 * exptime: # whether next exposure is in next batch of exposures
# same orbits
if mode == 'scanning':
# scanning mode, no incoming flux between exposures
dE2_s = - trap_pop_s * (1 - np.exp(-(dt - exptime)/tau_trap_s))
dE2_f = - trap_pop_f * (1 - np.exp(-(dt - exptime)/tau_trap_f))
elif mode == 'staring':
# for staring mode, there is flux between exposures
dE2_s = (eta_trap_s * f_i / c1_s - trap_pop_s) * (1 - np.exp(-c1_s * (dt - exptime)))
dE2_f = (eta_trap_f * f_i / c1_f - trap_pop_f) * (1 - np.exp(-c1_f * (dt - exptime)))
else:
# others, same as scanning
dE2_s = - trap_pop_s * (1 - np.exp(-(dt - exptime)/tau_trap_s))
dE2_f = - trap_pop_f * (1 - np.exp(-(dt - exptime)/tau_trap_f))
trap_pop_s = min(trap_pop_s + dE2_s, nTrap_s)
trap_pop_f = min(trap_pop_f + dE2_f, nTrap_f)
elif dt < 1200:
# considering in-orbit buffer download scenario
if mode == 'staring':
trap_pop_s = min(trap_pop_s * np.exp(-(dt-exptime)/tau_trap_s), nTrap_s)
trap_pop_f = min(trap_pop_f * np.exp(-(dt-exptime)/tau_trap_f), nTrap_f)
else:
# switch orbit
dt0_i = next(dt0)
trap_pop_s = min(trap_pop_s * np.exp(-(dt-exptime-dt0_i)/tau_trap_s) + next(dTrap_s), nTrap_s)
trap_pop_f = min(trap_pop_f * np.exp(-(dt-exptime-dt0_i)/tau_trap_f) + next(dTrap_f), nTrap_f)
f_i = cRates[i + 1]
c1_s = eta_trap_s * f_i / nTrap_s + 1 / tau_trap_s # a key factor
c1_f = eta_trap_f * f_i / nTrap_f + 1 / tau_trap_f
dE3_s = (eta_trap_s * f_i / c1_s - trap_pop_s) * (1 - np.exp(-c1_s * dt0_i))
dE3_f = (eta_trap_f * f_i / c1_f - trap_pop_f) * (1 - np.exp(-c1_f * dt0_i))
dE3_s = min(trap_pop_s + dE3_s, nTrap_s) - trap_pop_s
dE3_f = min(trap_pop_f + dE3_f, nTrap_f) - trap_pop_f
trap_pop_s = min(trap_pop_s + dE3_s, nTrap_s)
trap_pop_f = min(trap_pop_f + dE3_f, nTrap_f)
trap_pop_s = max(trap_pop_s, 0)
trap_pop_f = max(trap_pop_f, 0)
return (obsCounts/np.max(obsCounts))
|
sebastian-ziebaREPO_NAMEPACMANPATH_START.@PACMAN_extracted@PACMAN-master@src@pacman@lib@models@ackbar.py@.PATH_END.py
|
{
"filename": "test_MBB_errors.py",
"repo_name": "mjuvela/ISM",
"repo_path": "ISM_extracted/ISM-master/MBB/test_MBB_errors.py",
"type": "Python"
}
|
import os, sys
ISM_DIRECTORY = os.path.expanduser('~/GITHUB')
try:
ISM_DIRECTORY = os.environ(['ISM_DIRECTORY'])
except:
pass
sys.path.append(ISM_DIRECTORY)
from ISM.Defs import *
from ISM.FITS.FITS import CopyFits, MakeEmptyFitsDim
from ISM.MBB.MBB_MCMC import *
import matplotlib.ticker as ticker
import time
import numpy as np
# Generate some synthetic surface brightness observations
N, M = 54, 55
J, I = indices((N,M))
I250 = 10.0+2.0*cos(0.05*I)+2.0*sin(0.05*J)
T = 15.0+2.0*sin(0.1*I)
B = 1.8 + 0.2*sin(0.1*J)
um = asarray([160.0, 250.0, 350.0, 500.0], float32) # wavelengths [um]
freq = um2f(um)
FF, dFF = [], []
for iband in range(len(um)):
y = (I250* ModifiedBlackbody_250(freq[iband], T, B)).reshape(N,M)
dy = 0.03*y
y += dy*randn(N,M)
F = MakeEmptyFitsDim(1.0, 0.0, 10.0*ARCSEC_TO_RADIAN, N, M)
dF = MakeEmptyFitsDim(1.0, 0.0, 10.0*ARCSEC_TO_RADIAN, N, M)
F[0].data = y.reshape(N, M)
dF[0].data = dy.reshape(N, M)
FF.append(F)
dFF.append(dF)
# Fits the MBB spectra with python
PI, PT, PB = FitModifiedBlackbody_simple(um, FF, dFF, FF[1][0].data.copy(), 15.0, 1.8)
# Fit the MBB spectra with the OpenCL routine
CI, CT, CB = MBB_fit_CL_FITS(freq, FF, dFF, GPU=0, TMIN=7.0, TMAX=40.0, BMIN=0.5, BMAX=3.5)
figure(1, figsize=(8,7))
rc('font', size=9)
subplots_adjust(left=0.08, right=0.94, bottom=0.08, top=0.94, wspace=0.29, hspace=0.27)
subplot(331)
imshow(I250)
title(r'$I_0$')
colorbar()
subplot(332)
imshow(T)
title(r'$T_0$')
colorbar()
subplot(333)
imshow(B)
title(r'$\beta_0$')
colorbar()
# second row -- residuals of the Python fits
subplot(334)
X = PI[0].data - I250
imshow(X)
title(r'$I(Py)-I_0$')
colorbar()
subplot(335)
X = PT[0].data - T
imshow(X)
title(r'$T(Py)-T_0 \/ (K)$')
colorbar()
subplot(336)
X = PB[0].data - B
imshow(X)
title(r'$\beta(Py)-\beta_0$')
colorbar()
# third row -- residuals of the OpenCL fits
subplot(337)
X = CI[0].data - PI[0].data
imshow(X)
title(r'$I(CL)-I(Py)$')
colorbar()
subplot(338)
X = CT[0].data - PT[0].data
imshow(X)
title(r'$T(CL)-T(Py) \/ (K)$')
colorbar()
subplot(339)
X = CB[0].data - PB[0].data
imshow(X)
title(r'$\beta(CL)-\beta(Py)$')
colorbar()
savefig('test_MBB_errors.png')
show()
|
mjuvelaREPO_NAMEISMPATH_START.@ISM_extracted@ISM-master@MBB@test_MBB_errors.py@.PATH_END.py
|
{
"filename": "ui-notes.md",
"repo_name": "Caltech-IPAC/firefly",
"repo_path": "firefly_extracted/firefly-master/docs/ui-notes.md",
"type": "Markdown"
}
|
# UI Notes
### Miscellaneous
- toolbar background: `<Sheet variant='soft'>` or `<Sheet className='TapSearch__section' variant='outline' sx={{flexDirection: 'column', flexGrow: 1}}>`
- Use `Divider` instead of border where possible
- `sx` is mostly not necessary for `Box` and `Stack` as these are considered `CSS Utility` components in Joy UI
- instead, if you have something like:
- `<Stack justifyContent='space-between' spacing={1} sx={{resize: 'both', overflow: 'auto'}}>`
- it could just become: `<Stack {...{justifyContent:'space-between', spacing:1, resize:'both', overflow: 'auto'}}>`
- Avoid using component with Typography like this unless there's a good reason to do so: `<Typography component='div'>`
### Buttons
- primary button: `<Button size:'md', variant: 'solid' color: 'primary'/>`
- secondary button: `<Button/>` (see above for default)
- significant action button: (eg file upload): `<Button color='success' variant='solid'/>`
- other buttons: `<Chip/>` and `<Link/>` (see above for defaults)
### Text
- information: `<Typography/>`
- labels: `<Typography/>`
- feedback: `<Typography color='warning'/>`
- Do not use bold or italic directly (in fact, avoid using fontStyle, fontWeight, fontSize)
- Instead of bold or fontSize, use level with `<Typography/>`
- Instead of italic, use a color (like warning) or `Link` / `Chip` if it's a link
### Colors
- Avoid setting color directly, instead use JoyUI colors like `color='success'`, `color='primary'`, `color='warning'`, `color='danger'`, etc.
- This is important especially for dark mode. If we use JoyUI colors, JoyUI handles the changes when switching to dark mode.
### Spacing and Layout:
- **Control spacing from the top:** Unless absolutely necessary, avoid applying padding/margin to individual elements for creating space from their sibling elements. Instead, set spacing at the parent element by using `<Stack spacing={number}>`, which also enforces visual consistency and maintainability.
- **Match spacing with visual relationships:**
- Decrease spacing gradually from parent to child elements in the code to visually reinforce hierarchy. E.g.:
```jsx
<Stack spacing={2}>
<Stack spacing={1}> {/* Visual group 1 */}
<Component1/>
<Component1HelperText/>
</Stack>
<Stack spacing={1}> {/* Visual group 2 */}
<Component2/>
<Component2HelperText/>
</Stack>
</Stack>
```
- Enforce a sibling relationship by setting equal spacing between elements, even if they have a parent-child relationship in the code. E.g.:
```jsx
<Stack spacing={1}>
<ComponentModeSelection/>
<ComponentWithHelperText/> {/* Internally uses <Stack spacing={1}> */}
</Stack>
```
Intentionally setting spacing=1 instead of 2 will make `ComponentModeSelection` appear on same level as constituents of `ComponentWithHelperText`.
|
Caltech-IPACREPO_NAMEfireflyPATH_START.@firefly_extracted@firefly-master@docs@ui-notes.md@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "waynebhayes/SpArcFiRe",
"repo_path": "SpArcFiRe_extracted/SpArcFiRe-master/scripts/SpArcFiRe-pyvenv/lib/python2.7/site-packages/pyfits/tests/__init__.py",
"type": "Python"
}
|
from __future__ import division # confidence high
import os
import shutil
import stat
import tempfile
import time
import warnings
import pyfits as fits
class PyfitsTestCase(object):
def setup(self):
self.data_dir = os.path.join(os.path.dirname(__file__), 'data')
self.temp_dir = tempfile.mkdtemp(prefix='pyfits-test-')
# Restore global settings to defaults
for name, value in fits.core.GLOBALS:
setattr(fits, name, value)
# Ignore deprecation warnings--this only affects Python 2.5 and 2.6,
# since deprecation warnings are ignored by defualt on 2.7
warnings.resetwarnings()
warnings.simplefilter('ignore')
warnings.simplefilter('always', UserWarning)
def teardown(self):
tries = 3
while tries:
try:
shutil.rmtree(self.temp_dir)
break
except OSError:
# Probably couldn't delete the file because for whatever reason
# a handle to it is still open/hasn't been garbage-collected
time.sleep(0.5)
tries -= 1
def copy_file(self, filename):
"""Copies a backup of a test data file to the temp dir and sets its
mode to writeable.
"""
shutil.copy(self.data(filename), self.temp(filename))
os.chmod(self.temp(filename), stat.S_IREAD | stat.S_IWRITE)
def data(self, filename):
"""Returns the path to a test data file."""
return os.path.join(self.data_dir, filename)
def temp(self, filename):
""" Returns the full path to a file in the test temp dir."""
return os.path.join(self.temp_dir, filename)
|
waynebhayesREPO_NAMESpArcFiRePATH_START.@SpArcFiRe_extracted@SpArcFiRe-master@scripts@SpArcFiRe-pyvenv@lib@python2.7@site-packages@pyfits@tests@__init__.py@.PATH_END.py
|
{
"filename": "_shape.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/scatter/fillpattern/_shape.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShapeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="shape", parent_name="scatter.fillpattern", **kwargs
):
super(ShapeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
array_ok=kwargs.pop("array_ok", True),
edit_type=kwargs.pop("edit_type", "style"),
values=kwargs.pop("values", ["", "/", "\\", "x", "-", "|", "+", "."]),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@scatter@fillpattern@_shape.py@.PATH_END.py
|
{
"filename": "test_year.py",
"repo_name": "pandas-dev/pandas",
"repo_path": "pandas_extracted/pandas-main/pandas/tests/tseries/offsets/test_year.py",
"type": "Python"
}
|
"""
Tests for the following offsets:
- YearBegin
- YearEnd
"""
from __future__ import annotations
from datetime import datetime
import numpy as np
import pytest
from pandas import Timestamp
from pandas.tests.tseries.offsets.common import (
assert_is_on_offset,
assert_offset_equal,
)
from pandas.tseries.offsets import (
YearBegin,
YearEnd,
)
class TestYearBegin:
def test_misspecified(self):
with pytest.raises(ValueError, match="Month must go from 1 to 12"):
YearBegin(month=13)
offset_cases = []
offset_cases.append(
(
YearBegin(),
{
datetime(2008, 1, 1): datetime(2009, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 1),
datetime(2005, 12, 31): datetime(2006, 1, 1),
},
)
)
offset_cases.append(
(
YearBegin(0),
{
datetime(2008, 1, 1): datetime(2008, 1, 1),
datetime(2008, 6, 30): datetime(2009, 1, 1),
datetime(2008, 12, 31): datetime(2009, 1, 1),
datetime(2005, 12, 30): datetime(2006, 1, 1),
datetime(2005, 12, 31): datetime(2006, 1, 1),
},
)
)
offset_cases.append(
(
YearBegin(3),
{
datetime(2008, 1, 1): datetime(2011, 1, 1),
datetime(2008, 6, 30): datetime(2011, 1, 1),
datetime(2008, 12, 31): datetime(2011, 1, 1),
datetime(2005, 12, 30): datetime(2008, 1, 1),
datetime(2005, 12, 31): datetime(2008, 1, 1),
},
)
)
offset_cases.append(
(
YearBegin(-1),
{
datetime(2007, 1, 1): datetime(2006, 1, 1),
datetime(2007, 1, 15): datetime(2007, 1, 1),
datetime(2008, 6, 30): datetime(2008, 1, 1),
datetime(2008, 12, 31): datetime(2008, 1, 1),
datetime(2006, 12, 29): datetime(2006, 1, 1),
datetime(2006, 12, 30): datetime(2006, 1, 1),
datetime(2007, 1, 1): datetime(2006, 1, 1),
},
)
)
offset_cases.append(
(
YearBegin(-2),
{
datetime(2007, 1, 1): datetime(2005, 1, 1),
datetime(2008, 6, 30): datetime(2007, 1, 1),
datetime(2008, 12, 31): datetime(2007, 1, 1),
},
)
)
offset_cases.append(
(
YearBegin(month=4),
{
datetime(2007, 4, 1): datetime(2008, 4, 1),
datetime(2007, 4, 15): datetime(2008, 4, 1),
datetime(2007, 3, 1): datetime(2007, 4, 1),
datetime(2007, 12, 15): datetime(2008, 4, 1),
datetime(2012, 1, 31): datetime(2012, 4, 1),
},
)
)
offset_cases.append(
(
YearBegin(0, month=4),
{
datetime(2007, 4, 1): datetime(2007, 4, 1),
datetime(2007, 3, 1): datetime(2007, 4, 1),
datetime(2007, 12, 15): datetime(2008, 4, 1),
datetime(2012, 1, 31): datetime(2012, 4, 1),
},
)
)
offset_cases.append(
(
YearBegin(4, month=4),
{
datetime(2007, 4, 1): datetime(2011, 4, 1),
datetime(2007, 4, 15): datetime(2011, 4, 1),
datetime(2007, 3, 1): datetime(2010, 4, 1),
datetime(2007, 12, 15): datetime(2011, 4, 1),
datetime(2012, 1, 31): datetime(2015, 4, 1),
},
)
)
offset_cases.append(
(
YearBegin(-1, month=4),
{
datetime(2007, 4, 1): datetime(2006, 4, 1),
datetime(2007, 3, 1): datetime(2006, 4, 1),
datetime(2007, 12, 15): datetime(2007, 4, 1),
datetime(2012, 1, 31): datetime(2011, 4, 1),
},
)
)
offset_cases.append(
(
YearBegin(-3, month=4),
{
datetime(2007, 4, 1): datetime(2004, 4, 1),
datetime(2007, 3, 1): datetime(2004, 4, 1),
datetime(2007, 12, 15): datetime(2005, 4, 1),
datetime(2012, 1, 31): datetime(2009, 4, 1),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(YearBegin(), datetime(2007, 1, 3), False),
(YearBegin(), datetime(2008, 1, 1), True),
(YearBegin(), datetime(2006, 12, 31), False),
(YearBegin(), datetime(2006, 1, 2), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
class TestYearEnd:
def test_misspecified(self):
with pytest.raises(ValueError, match="Month must go from 1 to 12"):
YearEnd(month=13)
offset_cases = []
offset_cases.append(
(
YearEnd(),
{
datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2009, 12, 31),
datetime(2005, 12, 30): datetime(2005, 12, 31),
datetime(2005, 12, 31): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
YearEnd(0),
{
datetime(2008, 1, 1): datetime(2008, 12, 31),
datetime(2008, 6, 30): datetime(2008, 12, 31),
datetime(2008, 12, 31): datetime(2008, 12, 31),
datetime(2005, 12, 30): datetime(2005, 12, 31),
},
)
)
offset_cases.append(
(
YearEnd(-1),
{
datetime(2007, 1, 1): datetime(2006, 12, 31),
datetime(2008, 6, 30): datetime(2007, 12, 31),
datetime(2008, 12, 31): datetime(2007, 12, 31),
datetime(2006, 12, 29): datetime(2005, 12, 31),
datetime(2006, 12, 30): datetime(2005, 12, 31),
datetime(2007, 1, 1): datetime(2006, 12, 31),
},
)
)
offset_cases.append(
(
YearEnd(-2),
{
datetime(2007, 1, 1): datetime(2005, 12, 31),
datetime(2008, 6, 30): datetime(2006, 12, 31),
datetime(2008, 12, 31): datetime(2006, 12, 31),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(YearEnd(), datetime(2007, 12, 31), True),
(YearEnd(), datetime(2008, 1, 1), False),
(YearEnd(), datetime(2006, 12, 31), True),
(YearEnd(), datetime(2006, 12, 29), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
class TestYearEndDiffMonth:
offset_cases = []
offset_cases.append(
(
YearEnd(month=3),
{
datetime(2008, 1, 1): datetime(2008, 3, 31),
datetime(2008, 2, 15): datetime(2008, 3, 31),
datetime(2008, 3, 31): datetime(2009, 3, 31),
datetime(2008, 3, 30): datetime(2008, 3, 31),
datetime(2005, 3, 31): datetime(2006, 3, 31),
datetime(2006, 7, 30): datetime(2007, 3, 31),
},
)
)
offset_cases.append(
(
YearEnd(0, month=3),
{
datetime(2008, 1, 1): datetime(2008, 3, 31),
datetime(2008, 2, 28): datetime(2008, 3, 31),
datetime(2008, 3, 31): datetime(2008, 3, 31),
datetime(2005, 3, 30): datetime(2005, 3, 31),
},
)
)
offset_cases.append(
(
YearEnd(-1, month=3),
{
datetime(2007, 1, 1): datetime(2006, 3, 31),
datetime(2008, 2, 28): datetime(2007, 3, 31),
datetime(2008, 3, 31): datetime(2007, 3, 31),
datetime(2006, 3, 29): datetime(2005, 3, 31),
datetime(2006, 3, 30): datetime(2005, 3, 31),
datetime(2007, 3, 1): datetime(2006, 3, 31),
},
)
)
offset_cases.append(
(
YearEnd(-2, month=3),
{
datetime(2007, 1, 1): datetime(2005, 3, 31),
datetime(2008, 6, 30): datetime(2007, 3, 31),
datetime(2008, 3, 31): datetime(2006, 3, 31),
},
)
)
@pytest.mark.parametrize("case", offset_cases)
def test_offset(self, case):
offset, cases = case
for base, expected in cases.items():
assert_offset_equal(offset, base, expected)
on_offset_cases = [
(YearEnd(month=3), datetime(2007, 3, 31), True),
(YearEnd(month=3), datetime(2008, 1, 1), False),
(YearEnd(month=3), datetime(2006, 3, 31), True),
(YearEnd(month=3), datetime(2006, 3, 29), False),
]
@pytest.mark.parametrize("case", on_offset_cases)
def test_is_on_offset(self, case):
offset, dt, expected = case
assert_is_on_offset(offset, dt, expected)
def test_add_out_of_pydatetime_range():
# GH#50348 don't raise in Timestamp.replace
ts = Timestamp(np.datetime64("-20000-12-31"))
off = YearEnd()
result = ts + off
# TODO(cython3): "arg: datetime" annotation will impose
# datetime limitations on Timestamp. The fused type below works in cy3
# ctypedef fused datetimelike:
# _Timestamp
# datetime
# expected = Timestamp(np.datetime64("-19999-12-31"))
# assert result == expected
assert result.year in (-19999, 1973)
assert result.month == 12
assert result.day == 31
|
pandas-devREPO_NAMEpandasPATH_START.@pandas_extracted@pandas-main@pandas@tests@tseries@offsets@test_year.py@.PATH_END.py
|
{
"filename": "test_feature.py",
"repo_name": "light-curve/light-curve-python",
"repo_path": "light-curve-python_extracted/light-curve-python-master/light-curve/tests/light_curve_ext/test_feature.py",
"type": "Python"
}
|
import copy
import inspect
import pickle
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_array_equal
import light_curve.light_curve_ext as lc
def _feature_classes(module, *, exclude_parametric=True):
for name, member in inspect.getmembers(module):
if name.startswith("_"):
continue
if inspect.ismodule(member):
yield from _feature_classes(member)
if not inspect.isclass(member):
continue
if not issubclass(member, lc._FeatureEvaluator):
continue
if member is lc.JSONDeserializedFeature:
continue
# Skip classes with non-trivial constructors
if exclude_parametric:
try:
member()
except TypeError:
continue
yield member
non_param_feature_classes = frozenset(_feature_classes(lc, exclude_parametric=True))
assert len(non_param_feature_classes) > 0
all_feature_classes = frozenset(_feature_classes(lc, exclude_parametric=False))
assert len(all_feature_classes) > 0
def get_new_args_kwargs(cls):
if hasattr(cls, "__getnewargs_ex__"):
return cls.__getnewargs_ex__()
if hasattr(cls, "__getnewargs__"):
args = cls.__getnewargs__()
return args, {}
return (), {}
def new_default(cls, **kwargs):
args, kwargs_ = get_new_args_kwargs(cls)
kwargs = dict(kwargs_, **kwargs)
return cls(*args, **kwargs)
def construct_example_objects(cls, *, parametric_variants=1, rng=None):
# Extractor is special
if cls is lc.Extractor:
return [cls(lc.BeyondNStd(1.5), lc.LinearFit())]
# No mandatory arguments
if not hasattr(cls, "__getnewargs__"):
return [cls()]
# default mandatory arguments
args, kwargs = get_new_args_kwargs(cls)
# Add Mean feature for metafeatures
args = [[lc.Mean()] if arg == () else arg for arg in args]
objects = [cls(*args, **kwargs)]
# Nothing to mutate
if not any(isinstance(arg, float) for arg in args + list(kwargs.values())):
return objects
# Mutate floats
rng = np.random.default_rng(rng)
def mutation(value):
if not isinstance(value, float):
return value
return value * rng.uniform(0.9, 1.1) + rng.uniform(0.0, 1e-3)
for _ in range(1, parametric_variants):
mutated_args = list(map(mutation, args))
mutated_kwargs = {name: mutation(value) for name, value in kwargs.items()}
objects.append(cls(*mutated_args, **mutated_kwargs))
return objects
def gen_feature_evaluators(*, parametric_variants=0, rng=None):
if parametric_variants == 0:
for cls in non_param_feature_classes:
yield cls()
return
rng = np.random.default_rng(rng)
for cls in all_feature_classes:
yield from construct_example_objects(cls, parametric_variants=parametric_variants, rng=rng)
def gen_lc(n, rng=None):
rng = np.random.default_rng(rng)
t = np.sort(rng.normal(0, 1, n))
m = t.copy()
sigma = np.full_like(t, 0.1)
return t, m, sigma
@pytest.mark.parametrize("cls", list(all_feature_classes))
def test_available_transforms(cls):
# All available features should consume transform=None
none = new_default(cls, transform=None)
# If transform consumes False it
# 1) should give the same feature as transform=None
# 2) should be able to consume transform=True
try:
false = new_default(cls, transform=False)
except NotImplementedError:
return
# It would be better to compare objects themselves, but __eq__ is not implemented yet
# https://github.com/light-curve/light-curve-python/issues/148
assert false.names == none.names
true = new_default(cls, transform=True)
# Check if transform=True is not the same as transform=False
default_transform = getattr(cls, "default_transform", None)
if default_transform != "identity":
assert true.names != false.names
# Both attributes should be present or absent
assert hasattr(cls, "supported_transforms") == hasattr(cls, "default_transform")
if not hasattr(cls, "supported_transforms"):
return
assert cls.default_transform in cls.supported_transforms
for transform in cls.supported_transforms + ["default"]:
new_default(cls, transform=transform)
@pytest.mark.parametrize("feature", gen_feature_evaluators(parametric_variants=2))
def test_negative_strides(feature):
t = np.linspace(1, 0, 20)[::-2]
m = np.exp(t)[:]
err = np.random.uniform(0.1, 0.2, t.shape)
feature(t, m, err)
# We don't want *Fit features here: not precise
@pytest.mark.parametrize("feature", gen_feature_evaluators(parametric_variants=0))
def test_float32_vs_float64(feature):
rng = np.random.default_rng(0)
n = 128
t, m, sigma = gen_lc(n, rng=rng)
results = [
feature(t.astype(dtype), m.astype(dtype), sigma.astype(dtype), sorted=True)
for dtype in [np.float32, np.float64]
]
assert_allclose(*results, rtol=1e-5, atol=1e-5)
# We don't want *Fit features here: too slow
@pytest.mark.parametrize("feature", gen_feature_evaluators(parametric_variants=0))
def test_many_vs_call(feature):
rng = np.random.default_rng(0)
n_obs = 128
n_lc = 128
lcs = [gen_lc(n_obs, rng=rng) for _ in range(n_lc)]
call = np.stack([feature(*lc, sorted=True) for lc in lcs])
many = feature.many(lcs, sorted=True, n_jobs=2)
assert_array_equal(many, call)
def test_fill_value_not_enough_observations():
n = 1
t = np.linspace(0.0, 1.0, n)
m = t.copy()
fill_value = -100.0
sigma = np.ones_like(t)
feature = lc.Kurtosis()
with pytest.raises(ValueError):
feature(t, m, sigma, fill_value=None)
assert_array_equal(feature(t, m, sigma, fill_value=fill_value), fill_value)
@pytest.mark.parametrize("cls", all_feature_classes)
def test_nonempty_docstring(cls):
assert len(cls.__doc__) > 10
@pytest.mark.parametrize("feature", gen_feature_evaluators(parametric_variants=2))
def test_check_t(feature):
n_obs = 128
t, m, sigma = gen_lc(n_obs)
t[0] = np.nan
with pytest.raises(ValueError):
feature(t, m, sigma, check=True)
t[0] = np.inf
with pytest.raises(ValueError):
feature(t, m, sigma, check=True)
@pytest.mark.parametrize("feature", gen_feature_evaluators(parametric_variants=2))
def test_check_m(feature):
n_obs = 128
t, m, sigma = gen_lc(n_obs)
m[0] = np.nan
with pytest.raises(ValueError):
feature(t, m, sigma, check=True)
m[0] = np.inf
with pytest.raises(ValueError):
feature(t, m, sigma, check=True)
# We need evaluators which use sigma
@pytest.mark.parametrize("cls", (lc.ExcessVariance, lc.LinearFit, lc.ReducedChi2, lc.StetsonK, lc.WeightedMean))
def test_check_sigma(cls):
n_obs = 128
t, m, sigma = gen_lc(n_obs)
sigma[0] = np.nan
feature = cls()
with pytest.raises(ValueError):
feature(t, m, sigma, check=True)
# infinite values are allowed for sigma
sigma[0] = np.inf
feature(t, m, sigma, check=True)
@pytest.mark.parametrize("feature", gen_feature_evaluators(parametric_variants=5, rng=None))
@pytest.mark.parametrize("pickle_protocol", tuple(range(2, pickle.HIGHEST_PROTOCOL + 1)))
def test_pickling(feature, pickle_protocol):
n_obs = 128
data = gen_lc(n_obs)
values = feature(*data)
b = pickle.dumps(feature, protocol=pickle_protocol)
new_feature = pickle.loads(b)
new_values = new_feature(*data)
assert_array_equal(values, new_values)
@pytest.mark.parametrize("feature", gen_feature_evaluators(parametric_variants=5, rng=None))
def test_copy_deepcopy(feature):
n_obs = 128
data = gen_lc(n_obs)
values = feature(*data)
copied = copy.copy(feature)
values_copied = copied(*data)
assert_array_equal(values, values_copied)
deepcopied = copy.deepcopy(feature)
values_deepcopied = deepcopied(*data)
assert_array_equal(values, values_deepcopied)
PICKLE_BENCHMARK_FEATURES = [
lc.Amplitude(), # no parameters
lc.BeyondNStd(1.5), # parametric
lc.Extractor( # large
lc.Amplitude(),
lc.BeyondNStd(2.0),
lc.Bins(
[lc.Kurtosis(), lc.LinearTrend(), lc.WeightedMean()],
window=2.0,
offset=59500.5,
),
lc.Periodogram(features=[lc.InterPercentileRange(0.01)], peaks=5, max_freq_factor=12.0),
),
]
@pytest.mark.parametrize("feature", PICKLE_BENCHMARK_FEATURES)
def test_benchmark_pickle_loads(feature, benchmark):
b = pickle.dumps(feature, protocol=pickle.HIGHEST_PROTOCOL)
benchmark(pickle.loads, b)
@pytest.mark.parametrize("feature", PICKLE_BENCHMARK_FEATURES)
def test_benchmark_pickle_dumps(feature, benchmark):
benchmark(pickle.dumps, feature, protocol=pickle.HIGHEST_PROTOCOL)
@pytest.mark.parametrize("feature", PICKLE_BENCHMARK_FEATURES)
def test_benchmark_copy(feature, benchmark):
benchmark(copy.copy, feature)
@pytest.mark.parametrize("feature", PICKLE_BENCHMARK_FEATURES)
def test_benchmark_deepcopy(feature, benchmark):
benchmark(copy.deepcopy, feature)
# We do not check pure MCMC because it requires a lot of iterations and would be too slow
@pytest.mark.parametrize("algo", ("ceres", "mcmc-ceres", "lmsder", "mcmc-lmsder"))
def test_bazin_fit_precise(algo):
bazin = lc.BazinFit(algo)
true_params = np.array([10.0, -2.0, 10.0, 10.0, 25.0])
t = np.linspace(-50.0, 120.0, 1000)
flux = bazin.model(t, true_params)
fluxerr = np.ones_like(t)
*params, reduced_chi2 = bazin(t, flux, fluxerr)
assert_allclose(true_params, params, rtol=1e-4) # tolerance set to underlying algorithms
@pytest.mark.parametrize("feature", gen_feature_evaluators(parametric_variants=5, rng=None))
def test_json_serialization(feature):
n_obs = 128
data = gen_lc(n_obs)
values = feature(*data)
from_to_json = lc.feature_from_json(feature.to_json())
values_from_to_json = from_to_json(*data)
assert_array_equal(values, values_from_to_json)
def test_json_deserialization():
json = """
{"FeatureExtractor":{"features":[{"Transformed":{"feature":{"AndersonDarlingNormal":{}},"transformer":{"Ln1p":{}}}},
{"Transformed":{"feature":{"BazinFit":{"algorithm":{"Ceres":{"loss_factor":null,"niterations":20}},"inits_bounds":
{"OptionArrays":{"init":[null,null,null,null,null],"lower":[0.0036307805477010066,null,null,0.0001,0.0001],"upper":
[3630780547.7010174,null,null,30000.0,30000.0]}},"ln_prior":{"Fixed":{"None":{}}}}},"transformer":{"BazinFit":
{"mag_zp":23.899999618530273}}}},{"ExcessVariance":{}}]}}
"""
from_json = lc.feature_from_json(json)
assert isinstance(from_json, lc._FeatureEvaluator)
from_json(*gen_lc(128))
|
light-curveREPO_NAMElight-curve-pythonPATH_START.@light-curve-python_extracted@light-curve-python-master@light-curve@tests@light_curve_ext@test_feature.py@.PATH_END.py
|
{
"filename": "array_descriptor.py",
"repo_name": "rapidsai/cuml",
"repo_path": "cuml_extracted/cuml-main/python/cuml/cuml/common/array_descriptor.py",
"type": "Python"
}
|
#
# Copyright (c) 2020-2023, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from dataclasses import dataclass, field
from cuml.internals.array import CumlArray
import cuml
from cuml.internals.input_utils import (
input_to_cuml_array,
determine_array_type,
)
@dataclass
class CumlArrayDescriptorMeta:
# The type for the input value. One of: _input_type_to_str
input_type: str
# Dict containing values in different formats. One entry per type. Both the
# input type and any cached converted types will be stored. Erased on set
values: dict = field(default_factory=dict)
def get_input_value(self):
assert (
self.input_type in self.values
), "Missing value for input_type {}".format(self.input_type)
return self.values[self.input_type]
def __getstate__(self):
# Need to only return the input_value from
return {
"input_type": self.input_type,
"input_value": self.get_input_value(),
}
def __setstate__(self, d):
self.input_type = d["input_type"]
self.values = {self.input_type: d["input_value"]}
class CumlArrayDescriptor:
"""
Python descriptor object to control getting/setting `CumlArray` attributes
on `Base` objects. See the Estimator Guide for an in depth guide.
"""
def __init__(self, order="K"):
# order corresponds to the order that the CumlArray attribute
# should be in to work with the C++ algorithms.
self.order = order
def __set_name__(self, owner, name):
self.name = name
setattr(owner, name + "_order", self.order)
def _get_meta(
self, instance, throw_on_missing=False
) -> CumlArrayDescriptorMeta:
if throw_on_missing:
if self.name not in instance.__dict__:
raise AttributeError()
return instance.__dict__.setdefault(
self.name, CumlArrayDescriptorMeta(input_type=None, values={})
)
def _to_output(self, instance, to_output_type, to_output_dtype=None):
existing = self._get_meta(instance, throw_on_missing=True)
# Handle input_type==None which means we have a non-array object stored
if existing.input_type is None:
# Dont save in the cache. Just return the value
return existing.values[existing.input_type]
# Return a cached value if it exists
if to_output_type in existing.values:
return existing.values[to_output_type]
# If the input type was anything but CumlArray, need to create one now
if "cuml" not in existing.values:
existing.values["cuml"] = input_to_cuml_array(
existing.get_input_value(), order="K"
).array
cuml_arr: CumlArray = existing.values["cuml"]
# Do the conversion
output = cuml_arr.to_output(
output_type=to_output_type, output_dtype=to_output_dtype
)
# Cache the value
existing.values[to_output_type] = output
return output
def __get__(self, instance, owner):
if instance is None:
return self
existing = self._get_meta(instance, throw_on_missing=True)
assert len(existing.values) > 0
# Get the global output type
output_type = cuml.global_settings.output_type
# First, determine if we need to call to_output at all
if output_type == "mirror":
# We must be internal, just return the input type
return existing.get_input_value()
else:
# We are external, determine the target output type
if output_type is None:
# Default to the owning base object output_type
output_type = instance.output_type
if output_type == "input":
# Default to the owning base object, _input_type
output_type = instance._input_type
return self._to_output(instance, output_type)
def __set__(self, instance, value):
existing = self._get_meta(instance)
# Determine the type
existing.input_type = determine_array_type(value)
# Clear any existing values
existing.values.clear()
# Set the existing value
existing.values[existing.input_type] = value
def __delete__(self, instance):
if instance is not None:
del instance.__dict__[self.name]
|
rapidsaiREPO_NAMEcumlPATH_START.@cuml_extracted@cuml-main@python@cuml@cuml@common@array_descriptor.py@.PATH_END.py
|
{
"filename": "_ticklen.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/mesh3d/colorbar/_ticklen.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class TicklenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="ticklen", parent_name="mesh3d.colorbar", **kwargs):
super(TicklenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@mesh3d@colorbar@_ticklen.py@.PATH_END.py
|
{
"filename": "validator.md",
"repo_name": "ultralytics/ultralytics",
"repo_path": "ultralytics_extracted/ultralytics-main/docs/en/reference/engine/validator.md",
"type": "Markdown"
}
|
---
description: Explore Ultralytics BaseValidator for model validation in PyTorch, TensorFlow, ONNX, and more. Learn to check model accuracy and performance metrics.
keywords: Ultralytics, BaseValidator, model validation, PyTorch, TensorFlow, ONNX, model accuracy, performance metrics
---
# Reference for `ultralytics/engine/validator.py`
!!! note
This file is available at [https://github.com/ultralytics/ultralytics/blob/main/ultralytics/engine/validator.py](https://github.com/ultralytics/ultralytics/blob/main/ultralytics/engine/validator.py). If you spot a problem please help fix it by [contributing](https://docs.ultralytics.com/help/contributing/) a [Pull Request](https://github.com/ultralytics/ultralytics/edit/main/ultralytics/engine/validator.py) 🛠️. Thank you 🙏!
<br>
## ::: ultralytics.engine.validator.BaseValidator
<br><br>
|
ultralyticsREPO_NAMEultralyticsPATH_START.@ultralytics_extracted@ultralytics-main@docs@en@reference@engine@validator.md@.PATH_END.py
|
{
"filename": "obs_extinction.py",
"repo_name": "msiebert1/UCSC_spectral_pipeline",
"repo_path": "UCSC_spectral_pipeline_extracted/UCSC_spectral_pipeline-master/spectral_reduction/tmath/pydux/obs_extinction.py",
"type": "Python"
}
|
def obs_extinction(observat):
import numpy as np
observatory_heights={'keck':4160,
'gemini-north': 4213.4,
'gemini-south': 2737.0,
'gemini-n': 4213.4,
'gemini-s': 2737.0,
'soar': 2737.0,
'kpno': 2064.0,
'lick': 1285.0,
'palomar': 1706.0 ,
'mcdonald': 2075.0,
'flwo': 2320.0,
'mmto': 2600.0,
'sso': 1149.0,
'vlt': 2635.0,
'lco': 2282.0,
'lco-imacs': 2282.0,
'ctio': 2399.0
}
if (observat in observatory_heights):
height=observatory_heights[observat]
else:
print('OBSERVATORY UNKNOWN!!!!!!!!')
height=0.0
# 8300 meters is scale height of troposphere
sitefactor=np.exp(-1.0*height/8300.0)
return sitefactor
|
msiebert1REPO_NAMEUCSC_spectral_pipelinePATH_START.@UCSC_spectral_pipeline_extracted@UCSC_spectral_pipeline-master@spectral_reduction@tmath@pydux@obs_extinction.py@.PATH_END.py
|
{
"filename": "README.md",
"repo_name": "pynbody/pynbody",
"repo_path": "pynbody_extracted/pynbody-master/README.md",
"type": "Markdown"
}
|
pynbody
=======
[](https://github.com/pynbody/pynbody/actions)
[Pynbody](https://github.com/pynbody/pynbody) is an analysis framework for
N-body and hydrodynamic astrophysical simulations supporting PKDGRAV/Gasoline,
Gadget, Gadget4/Arepo, N-Chilada and RAMSES AMR outputs.
Python version support adheres roughly to [SPEC0](https://scientific-python.org/specs/spec-0000/).
### Documentation
The documentation is at [readthedocs](https://pynbody.readthedocs.io). To get a first sense of what pynbody
can do, try the [quick-start tutorial](https://pynbody.readthedocs.io/latest/tutorials/quickstart.html) or
[images tutorial](https://pynbody.readthedocs.io/latest/tutorials/images.html).
### Getting started
If python and the standard pip package manager is installed and properly configured, you can simply do:
```
$ pip install --pre pynbody
```
At the moment, we encourage new users to use pynbody version 2 release candidate, which is why the `--pre`
flag is applied. If you need strict backwards compatibility, omit `--pre`. Version 2 will soon become
the default (likely by the end of 2024).
If this fails, you may need some more detailed [installation
instructions](https://pynbody.readthedocs.io/latest/installation.html).
### Contributing
Help us make *pynbody* better! As you develop analysis for your science with pynbody, consider making your code available
for everyone else to use. You can do this by creating a [tutorial](https://pynbody.readthedocs.io/latest/tutorials/tutorials.html)
or by adding your code to the relevant sub-module and submitting a pull request (make a fork first — see
https://help.github.com/articles/using-pull-requests).
### Acknowledging the code
When using pynbody, please acknowledge it by citing the [Astrophysics Source Code Library entry](http://adsabs.harvard.edu/abs/2013ascl.soft05002P). Optionally you can also cite the Zenodo DOI for the specific version of pynbody that you are using, which may be found [here](https://doi.org/10.5281/zenodo.1297087).
### Support and Contact
If you have trouble with Pynbody or you have feature requests/suggestions you can [submit an issue](https://github.com/pynbody/pynbody/issues),
and/or send us an email on the [Usergroup mailing list](https://groups.google.com/forum/?fromgroups#!forum/pynbody-users).
|
pynbodyREPO_NAMEpynbodyPATH_START.@pynbody_extracted@pynbody-master@README.md@.PATH_END.py
|
{
"filename": "Wfunctions_test.ipynb",
"repo_name": "bradkav/WIMpy_NREFT",
"repo_path": "WIMpy_NREFT_extracted/WIMpy_NREFT-main/old_WIMpy/Wfunctions_test.ipynb",
"type": "Jupyter Notebook"
}
|
## Testing the python implementation of the structure functions
```python
import numpy as np
from matplotlib import pylab as pl
```
Load in the fortran structure functions
```python
import WD, WM, WMP2, WP1, WP2, WS1, WS2, WS1D
print WD.__doc__
```
This module 'WD' is auto-generated with f2py (version:2).
Functions:
wd = calcwd(i,j,y,target)
.
Load in the python structure functions
```python
from Wfunctions_python import WD as pyWD
from Wfunctions_python import WM as pyWM
from Wfunctions_python import WMP2 as pyWMP2
from Wfunctions_python import WP1 as pyWP1
from Wfunctions_python import WP2 as pyWP2
from Wfunctions_python import WS1 as pyWS1
from Wfunctions_python import WS2 as pyWS2
from Wfunctions_python import WS1D as pyWS1D
```
```python
#Define a function which tests the structure functions
target_list = np.loadtxt("../Nuclei.txt", usecols=(0,), dtype='string')
A_list = np.loadtxt("../Nuclei.txt", usecols=(1,))
Avals = dict(zip(target_list, A_list))
```
Function to plot some of the structure functions for comparison:
```python
def Plot_WM(target):
#Range of recoil energies to test
E_list = np.logspace(-3, 2, 100)
#Calculate sensible 'y' values for the given target
A = Avals[target]
amu = 931.5e3 # keV
q1 = np.sqrt(2*A*amu*E_list)
#Recoil momentum over nucleon mass
qr = q1/amu
# Required for form factors
q2 = q1*(1e-12/1.97e-7)
b = np.sqrt(41.467/(45*A**(-1.0/3.0) - 25*A**(-2.0/3.0)))
y = (q2*b/2)**2
W_test_f = np.zeros((2,2,len(E_list)))
W_test_py = np.zeros((2,2,len(E_list)))
pl.figure(figsize=(7,5))
cols = [['r','g'], ['b','c']]
for i in range(2):
for j in range(2):
W_test_f[i,j,:] = np.vectorize(WM.calcwm)(i,j,y,target)
W_test_py[i,j,:] = pyWM.calcWM(i,j,y,target)
ls = '-'
pl.semilogx(E_list,W_test_py[i,j,:], linestyle='-', label=str(i)+str(j), color =cols[i][j], alpha=0.5)
pl.semilogx(E_list,W_test_f[i,j,:], linestyle='--',color =cols[i][j])
pl.semilogx(0,0, 'k-', label='Python')
pl.semilogx(0,0, 'k--', label='Fortran')
pl.legend(loc="best")
pl.xlabel(r'$E_R$ [keV]')
pl.ylabel(r'$W_M(E_R)$')
pl.title(target)
pl.show()
```
```python
Plot_WM("Ar40")
```

Function to carefully test all the structure functions (for a given target)
```python
def Compare_W(target, err_tol=1e-4):
print " Testing python and fortran W functions for target", target,"..."
found_discrepancy = False
#Range of recoil energies to test
E_list = np.logspace(-3, 2, 1000)
#Calculate sensible 'y' values for the given target
A = Avals[target]
amu = 931.5e3 # keV
q1 = np.sqrt(2*A*amu*E_list)
#Recoil momentum over nucleon mass
qr = q1/amu
# Required for form factors
q2 = q1*(1e-12/1.97e-7)
b = np.sqrt(41.467/(45*A**(-1.0/3.0) - 25*A**(-2.0/3.0)))
y = (q2*b/2)**2
fortran_funcs = [WM.calcwm, WD.calcwd, WMP2.calcwmp2, WP1.calcwp1, WP2.calcwp2, WS1.calcws1, WS2.calcws2, WS1D.calcws1d]
python_funcs = [pyWM.calcWM, pyWD.calcWD, pyWMP2.calcWMP2, pyWP1.calcWP1, pyWP2.calcWP2, pyWS1.calcWS1, pyWS2.calcWS2, pyWS1D.calcWS1D]
for fort_fun, py_fun in zip(fortran_funcs, python_funcs):
W_test_f = np.zeros(len(E_list))
W_test_py = np.zeros(len(E_list))
#print py_fun.__name__
for i in range(2):
for j in range(2):
W_test_f = np.vectorize(fort_fun)(i,j,y,target)
W_test_py = py_fun(i,j,y,target)
norm = np.trapz(W_test_f, E_list)/(np.max(E_list) - np.min(E_list))
#Test the values you calculate using the 2 codes
error = np.sqrt((W_test_f - W_test_py)**2/norm**2)
if np.any(error > err_tol):
found_discrepancy = True
print " Discrepancy found in ", py_fun.__name__, " for i = ", i, ", j = ", j
#print W_test_py
#print W_test_f
pl.figure(figsize=(7,5))
pl.loglog(E_list,np.abs(W_test_py), linestyle='-', alpha=0.5, label='Python')
pl.loglog(E_list,np.abs(W_test_f), linestyle='--',label='Fortran')
#pl.semilogx(E_list,error, linestyle='--',label='Fortran')
pl.legend(loc="best")
pl.xlabel(r'$E_R$ [keV]')
pl.ylabel(r'$W(E_R)$')
pl.title(target)
pl.show()
if (found_discrepancy == False):
print " No discrepancies found!"
print " "
return 1
else:
print " Some discrepancies found!"
print " "
return 0
```
```python
[Compare_W(target, err_tol = 1e-4) for target in target_list]
```
Testing python and fortran W functions for target Xe128 ...
No discrepancies found!
Testing python and fortran W functions for target Xe129 ...
No discrepancies found!
Testing python and fortran W functions for target Xe130 ...
No discrepancies found!
Testing python and fortran W functions for target Xe131 ...
No discrepancies found!
Testing python and fortran W functions for target Xe132 ...
No discrepancies found!
Testing python and fortran W functions for target Xe134 ...
/usr/local/lib/python2.7/site-packages/IPython/kernel/__main__.py:41: RuntimeWarning: invalid value encountered in divide
/usr/local/lib/python2.7/site-packages/IPython/kernel/__main__.py:42: RuntimeWarning: invalid value encountered in greater
No discrepancies found!
Testing python and fortran W functions for target Xe136 ...
No discrepancies found!
Testing python and fortran W functions for target Ar40 ...
No discrepancies found!
Testing python and fortran W functions for target C12 ...
No discrepancies found!
Testing python and fortran W functions for target Ge70 ...
No discrepancies found!
Testing python and fortran W functions for target Ge72 ...
No discrepancies found!
Testing python and fortran W functions for target Ge73 ...
No discrepancies found!
Testing python and fortran W functions for target Ge74 ...
No discrepancies found!
Testing python and fortran W functions for target Ge76 ...
No discrepancies found!
Testing python and fortran W functions for target Fluorine ...
No discrepancies found!
Testing python and fortran W functions for target Iodine ...
No discrepancies found!
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
That previous comparison used recoil energies, because then we can make sure that the codes agree in the sensible bit of parameter space that we care about. Below, we also compare by calculating on a grid of y-values. Unfortunately, I have no idea what a value of y = 7.5349272 corresponds to, but it's only to check that both codes give the same results, so it's fine:
```python
def Compare_W_y(target, err_tol=1e-4):
print " Testing python and fortran W functions for target", target,"..."
found_discrepancy = False
#Range of recoil energies to test
y = np.logspace(-3, 1, 1000)
fortran_funcs = [WM.calcwm, WD.calcwd, WMP2.calcwmp2, WP1.calcwp1, WP2.calcwp2, WS1.calcws1, WS2.calcws2, WS1D.calcws1d]
python_funcs = [pyWM.calcWM, pyWD.calcWD, pyWMP2.calcWMP2, pyWP1.calcWP1, pyWP2.calcWP2, pyWS1.calcWS1, pyWS2.calcWS2, pyWS1D.calcWS1D]
for fort_fun, py_fun in zip(fortran_funcs, python_funcs):
W_test_f = np.zeros(len(y))
W_test_py = np.zeros(len(y))
#print py_fun.__name__
for i in range(2):
for j in range(2):
W_test_f = np.vectorize(fort_fun)(i,j,y,target)
W_test_py = py_fun(i,j,y,target)
norm = np.trapz(W_test_f, y)/(np.max(y) - np.min(y))
#print norm
#Test the values you calculate using the 2 codes
error = np.sqrt((W_test_f - W_test_py)**2/norm**2)
if np.any(error > err_tol):
found_discrepancy = True
print " Discrepancy found in ", py_fun.__name__, " for i = ", i, ", j = ", j
#print W_test_py
#print W_test_f
pl.figure(figsize=(7,5))
pl.loglog(y,np.abs(W_test_py), linestyle='-', alpha=0.5, label='Python')
pl.loglog(y,np.abs(W_test_f), linestyle='--',label='Fortran')
#pl.semilogx(E_list,error, linestyle='--',label='Fortran')
pl.legend(loc="best")
pl.xlabel(r'$y$')
pl.ylabel(r'$W(y)$')
pl.title(target)
pl.show()
if (found_discrepancy == False):
print " No discrepancies found!"
print " "
return 1
else:
print " Some discrepancies found!"
print " "
return 0
```
```python
full_target_list = ['Ni59','Ni58','Fe56','Ca40','Ar40','S32','Si28',
'Al27','Mg24','Ne20','O16','N14','C12','He4',
'He3','H','Xe128','Xe129','Xe130','Xe131','Xe132',
'Xe134','Xe136','Ge70','Ge72','Ge73','Ge74','Ge76',
'Na23','Iodine','Fluorine']
[Compare_W_y(target, err_tol=1e-4) for target in full_target_list]
```
Testing python and fortran W functions for target Ni59 ...
No discrepancies found!
Testing python and fortran W functions for target Ni58 ...
No discrepancies found!
Testing python and fortran W functions for target Fe56 ...
No discrepancies found!
Testing python and fortran W functions for target Ca40 ...
No discrepancies found!
Testing python and fortran W functions for target Ar40 ...
No discrepancies found!
Testing python and fortran W functions for target S32 ...
No discrepancies found!
Testing python and fortran W functions for target Si28 ...
/usr/local/lib/python2.7/site-packages/IPython/kernel/__main__.py:27: RuntimeWarning: invalid value encountered in divide
/usr/local/lib/python2.7/site-packages/IPython/kernel/__main__.py:28: RuntimeWarning: invalid value encountered in greater
No discrepancies found!
Testing python and fortran W functions for target Al27 ...
No discrepancies found!
Testing python and fortran W functions for target Mg24 ...
No discrepancies found!
Testing python and fortran W functions for target Ne20 ...
No discrepancies found!
Testing python and fortran W functions for target O16 ...
No discrepancies found!
Testing python and fortran W functions for target N14 ...
No discrepancies found!
Testing python and fortran W functions for target C12 ...
No discrepancies found!
Testing python and fortran W functions for target He4 ...
No discrepancies found!
Testing python and fortran W functions for target He3 ...
No discrepancies found!
Testing python and fortran W functions for target H ...
No discrepancies found!
Testing python and fortran W functions for target Xe128 ...
Discrepancy found in calcWMP2 for i = 1 , j = 0

Some discrepancies found!
Testing python and fortran W functions for target Xe129 ...
Discrepancy found in calcWMP2 for i = 1 , j = 0

Some discrepancies found!
Testing python and fortran W functions for target Xe130 ...
Discrepancy found in calcWMP2 for i = 1 , j = 0

Some discrepancies found!
Testing python and fortran W functions for target Xe131 ...
Discrepancy found in calcWMP2 for i = 1 , j = 0

Discrepancy found in calcWS1 for i = 0 , j = 0

Discrepancy found in calcWS1 for i = 0 , j = 1

Discrepancy found in calcWS1 for i = 1 , j = 0

Discrepancy found in calcWS1 for i = 1 , j = 1

Discrepancy found in calcWS1D for i = 1 , j = 0

Some discrepancies found!
Testing python and fortran W functions for target Xe132 ...
Discrepancy found in calcWMP2 for i = 1 , j = 0

Some discrepancies found!
Testing python and fortran W functions for target Xe134 ...
Discrepancy found in calcWMP2 for i = 1 , j = 0

Some discrepancies found!
Testing python and fortran W functions for target Xe136 ...
Discrepancy found in calcWMP2 for i = 1 , j = 0

Some discrepancies found!
Testing python and fortran W functions for target Ge70 ...
No discrepancies found!
Testing python and fortran W functions for target Ge72 ...
No discrepancies found!
Testing python and fortran W functions for target Ge73 ...
No discrepancies found!
Testing python and fortran W functions for target Ge74 ...
No discrepancies found!
Testing python and fortran W functions for target Ge76 ...
No discrepancies found!
Testing python and fortran W functions for target Na23 ...
No discrepancies found!
Testing python and fortran W functions for target Iodine ...
Discrepancy found in calcWMP2 for i = 1 , j = 0

Discrepancy found in calcWS1D for i = 1 , j = 0

Some discrepancies found!
Testing python and fortran W functions for target Fluorine ...
No discrepancies found!
[1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
1,
1,
0,
1]
```python
```
|
bradkavREPO_NAMEWIMpy_NREFTPATH_START.@WIMpy_NREFT_extracted@WIMpy_NREFT-main@old_WIMpy@Wfunctions_test.ipynb@.PATH_END.py
|
{
"filename": "gpwatch_7DT.py",
"repo_name": "SilverRon/gppy",
"repo_path": "gppy_extracted/gppy-main/gpwatch_7DT.py",
"type": "Python"
}
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#%%
#============================================================
# Module
#------------------------------------------------------------
import os
import sys
# import glob
import time
from util import tool
# IMSNGpy modules
# sys.path.append('/home/paek/imsngpy')
# from misc import *
# Astropy
from astropy.io import ascii
# from astropy.table import Table, vstack
#============================================================
# Function
#------------------------------------------------------------
def get_size(start_path = '.'):
total_size = 0
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
fp = os.path.join(dirpath, f)
# skip if it is symbolic link
if not os.path.islink(fp):
total_size += os.path.getsize(fp)
return total_size
#------------------------------------------------------------
#%%
# Path
#------------------------------------------------------------
path_obsdata = '/data6/obsdata'
path_table = '/home/paek/table'
path_log = '/home/paek/log'
# path_gppy = '/home/paek/imsngpy/imsngpy'
# path_check_rasa36 = '/home/paek/qsopy/monitor/classify_rasa36.py'
# path_check_lsgt = '/home/paek/qsopy/monitor/classify_lsgt.py'
# path_imsng_routine = '/home/paek/qsopy/reduction/gregorycalib_2021.py'
path_imsng_routine = './7DT_Routine.py'
path_gecko_routine = './GECKO_Routine.py'
# Slack
keytbl = ascii.read(f'{path_table}/keys.dat')
OAuth_Token = keytbl['key'][keytbl['name']=='slack'].item()
#------------------------------------------------------------
#%%
# Data information
#------------------------------------------------------------
obsdict = dict(
# LOAO
loao=dict(
path_base='/data6/obsdata/LOAO',
path_new='',
log=f'{path_log}/loao.log',
size=0, # [bytes]
core=1, # 4
),
# DOAO
doao=dict(
path_base='/data6/obsdata/DOAO',
path_new='',
log=f'{path_log}/doao.log',
size=0, # [bytes]
core=1, # 4
),
# SOAO
soao=dict(
path_base='/data6/obsdata/SOAO',
path_new='',
log=f'{path_log}/soao.log',
size=0, # [bytes]
core=1, # 4
),
# CBNUO
cbnuo=dict(
path_base='/data6/obsdata/CBNUO', # ./2021_0101
path_new='',
log=f'{path_log}/cbnuo.log',
size=0, # [bytes]
core=1, # 4
),
# KHAO
khao=dict(
path_base='/data6/obsdata/KHAO', # ./2021_0101
path_new='',
log=f'{path_log}/khao.log',
size=0, # [bytes]
core=2, # 4
),
# MDFTS
mdfts=dict(
path_base='/data6/obsdata/MDFTS', # ./2021_0101
path_new='',
log=f'{path_log}/mdfts.log',
size=0, # [bytes]
core=2, # 4
),
# KCT_STX16803
kct_stx16803=dict(
path_base='/data6/obsdata/KCT_STX16803',
path_new='',
log=f'{path_log}/kct_stx16803.log',
size=0, # [bytes]
core=1, # 4
),
# RASA36
rasa36=dict(
path_base='/data6/obsdata/RASA36',
path_new='',
log=f'{path_log}/rasa36.log',
size=0, # [bytes]
core=1, # 4
),
)
#------------------------------------------------------------
obslist = ['LOAO', 'DOAO', 'SOAO', 'CBNUO', 'KHAO', 'KCT_STX16803', 'RASA36', 'LSGT']
print('OBSERVATOR LIST :', end='')
print(obslist)
try:
obs = (sys.argv[1]).upper()
except:
obs = input('obs:').upper()
# obs = 'LOAO'
delay = 10
try:
ncore = int(sys.argv[2])
except:
ncore = 1
'''
print(f"Wrong input in variable 'sphere' (sphere={sphere})")
print('Process all obs. data')
obslist = ['loao', 'doao', 'soao', 'cbnuo',]+['kct_stx16803', 'rasa36']
'''
#============================================================
#%%
# Main body
#------------------------------------------------------------
print(f"{'='*60}\n\n[`gpwatch`/o_o] Watching new data for {obs} with {ncore} cores \n\n{'='*60}")
error_count = 0
st = time.time()
while True:
try:
# Time
et = time.time()
delt = int(et - st)
h = delt // (60*60)
m = delt // 60
s = delt % 60
timer = '{:02d}:{:02d}:{:02d}'.format(h, m, s)
print(timer, end="\r")
log = f"{path_log}/{obs.lower()}.log"
path_base = f"{path_obsdata}/{obs}"
#
logtbl = ascii.read(log)
dirlist = os.listdir(path_base)
#
for f in dirlist:
path_new = f"{path_base}/{f}"
if (path_new not in logtbl['date']) & (f"{path_new}/" not in logtbl['date']) & (os.path.isdir(path_new)):
print()
#------------------------------------------------------------
# Slack message
#------------------------------------------------------------
channel = '#pipeline'
text = f'[`gpwatch`/{obs}] Detected New {os.path.basename(path_new)} Data'
param_slack = dict(
token = OAuth_Token,
channel = channel,
text = text,
)
tool.slack_bot(**param_slack)
#
print(text)
init_size = get_size(path_new)
while True:
if obs.upper() == 'LOAO':
time.sleep(int(delay*6))
else:
time.sleep(int(delay*2))
now_size = get_size(path_new)
if init_size != now_size:
print(f'Still uploading {os.path.basename(path_new)} : {init_size} --> {now_size}')
init_size = now_size
# else:
#------------------------------------------------------------
# RASA36 exception
#------------------------------------------------------------
# if (obs.upper() == 'RASA36'):
# if (obs.upper() == 'SKIP'):
# com = f'python {path_check_rasa36} {path_new}'
# print(com)
# os.system(com)
# if len(dirlist) == len(os.listdir(path_base)):
# com = f"python {path_imsng_routine} {obs} {ncore}"
# print(com)
# os.system(com)
# else:
# break
#------------------------------------------------------------
# LSGT exception
#------------------------------------------------------------
# elif (obs.upper() == 'LSGT'):
# com = f'python {path_check_lsgt} {path_new}'
# print(com)
# os.system(com)
# if (len(dirlist) == len(os.listdir(path_base))) & (len(glob.glob(f"{path_new}/*.fit"))>0):
# com = f"python {path_imsng_routine} {obs} {ncore}"
# print(com)
# os.system(com)
# st = time.time()
# else:
# # Write log --> skip no data folder
# logtbl = ascii.read(log)
# logtbl_ = Table()
# logtbl_['date'] = [path_new]
# logtbl = vstack([logtbl, logtbl_])
# logtbl.write(f'{path_log}/{obs.lower()}.log', format='ascii.tab', overwrite=True)
# break
pass
else:
# Run python code
# IMSNG routine
if 'S2' not in path_new:
com = f"python {path_imsng_routine} {obs} {ncore}"
# GECKO routine
else:
com = f"python {path_gecko_routine} {obs} {ncore}"
print(com)
os.system(com)
# print(f"[gpwatch/{obs}] Process for {os.path.basename(path_new)} is done.")
print(f"{'='*60}\n\n[gpwatch/o_o] Watching new data for {obs} with {ncore} cores \n\n{'='*60}")
break
except Exception as e:
print(e)
#------------------------------------------------------------
# Slack message
#------------------------------------------------------------
channel = '#pipeline'
text = f'[`gpwatch`] Error for {obs} data (ERROR COUNT={error_count}/10)\n'+f"```{e}```"
param_slack = dict(
token = OAuth_Token,
channel = channel,
text = text,
)
tool.slack_bot(**param_slack)
print(text)
if error_count > 10:
print("ABORT THE gpwatch!")
#------------------------------------------------------------
# Slack message
#------------------------------------------------------------
channel = '#pipeline'
text = f'[`gpwatch`] ABORT the code for {obs} (ERROR COUNT={error_count}>10)'
param_slack = dict(
token = OAuth_Token,
channel = channel,
text = text,
)
tool.slack_bot(**param_slack)
break
else:
pass
time.sleep(1)
|
SilverRonREPO_NAMEgppyPATH_START.@gppy_extracted@gppy-main@gpwatch_7DT.py@.PATH_END.py
|
{
"filename": "rfi_inspect_2458155.ipynb",
"repo_name": "HERA-Team/H1C_IDR3_Notebooks",
"repo_path": "H1C_IDR3_Notebooks-main/rfi_inspect/rfi_inspect_2458155.ipynb",
"type": "Jupyter Notebook"
}
|
# RFI Inspection Daily RTP Notebook
```python
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import glob
import os
from astropy import units
from copy import deepcopy
from pyuvdata import UVFlag
import matplotlib.colors as colors
from matplotlib import cm
%matplotlib inline
%config InlineBackend.figure_format = 'retina'
```
```python
# If you want to run this notebook locally, copy the output of the next cell into the first few lines of this cell.
# JD = '2459122'
# data_path = '/lustre/aoc/projects/hera/H4C/2459122'
# os.environ["JULIANDATE"] = JD
# os.environ["DATA_PATH"] = data_path
```
```python
# Use environment variables to figure out path to data
JD = os.environ['JULIANDATE']
data_path = os.environ['DATA_PATH']
print(f'JD = "{JD}"')
print(f'data_path = "{data_path}"')
JD = int(JD)
```
JD = "2458155"
data_path = "/lustre/aoc/projects/hera/H1C_IDR3/IDR3_2/2458155"
```python
uvf = UVFlag(f'{data_path}/zen.{JD}.total_threshold_and_a_priori_flags.h5')
# Load in the metadata for easier plotting.
freqs = np.unique(uvf.freq_array)
times = np.unique(uvf.time_array)
lsts = np.unique(uvf.lst_array)
chans = np.arange(freqs.size)
plot_times = times - np.floor(times[0])
lsts_hr = lsts * units.rad.to("cycle") * units.day.to("hr")
freqs_MHz = freqs * units.Hz.to("MHz")
extent = (freqs_MHz[0], freqs_MHz[-1], plot_times[-1], plot_times[0])
```
```python
plt.figure(figsize=(16,12))
cax = plt.imshow(uvf.flag_array[:,:,0], aspect='auto', interpolation='nearest',
extent=[uvf.freq_array[0] / 1e6, uvf.freq_array[-1] / 1e6,
uvf.time_array[-1] - JD, uvf.time_array[0] - JD])
plt.xlabel('Frequency (MHz)')
plt.ylabel(f'JD - {JD}')
ax2 = plt.gca().twinx()
ax2.set_ylim([uvf.lst_array[0] * 12 / np.pi, uvf.lst_array[-1] * 12 / np.pi])
ax2.invert_yaxis()
ax2.set_ylabel('LST (hours)')
ax3 = plt.gca().twiny()
ax3.set_xlim([0, uvf.Nfreqs - 1])
ax3.set_xlabel('Channel');
```
Text(0.5, 0, 'Channel')

# Figure 1(a): Full day of XRFI flags
Yellow is flagged. Blue is unflagged.
```python
xrfi_dirs = sorted(glob.glob(f'{data_path}/zen.{JD}.?????.xrfi'))
print(f'Found {len(xrfi_dirs)} directories containing XRFI intermediate data products.')
files1 = [glob.glob(f'{d}/*combined_metrics1.h5')[0] for d in xrfi_dirs]
print(f'Found {len(files1)} combined round 1 XRFI metrics files.')
files2 = [glob.glob(f'{d}/*combined_metrics2.h5')[0] for d in xrfi_dirs]
print(f'Found {len(files2)} combined round 2 XRFI metrics files.')
uvf1 = UVFlag(files1)
uvf2 = UVFlag(files2)
uvf2.metric_array = np.where(np.isinf(uvf2.metric_array), uvf1.metric_array,
uvf2.metric_array)
```
Found 73 directories containing XRFI intermediate data products.
Found 73 combined round 1 XRFI metrics files.
Found 73 combined round 2 XRFI metrics files.
```python
plt.figure(figsize=(16,12))
max_abs = 100
if np.max(uvf2.metric_array) > max_abs:
extend = 'max'
if np.min(uvf2.metric_array) < -max_abs:
extend = 'both'
elif np.min(uvf2.metric_array) < -max_abs:
extend = 'min'
else:
extend = 'neither'
plt.imshow(uvf2.metric_array[:,:,0], aspect='auto', cmap='RdBu_r',
norm=colors.SymLogNorm(linthresh=1,vmin=-max_abs, vmax=max_abs),
extent=[uvf.freq_array[0] / 1e6, uvf.freq_array[-1] / 1e6,
uvf.time_array[-1] - JD, uvf.time_array[0] - JD])
plt.colorbar(pad=.07, extend=extend,
label='RFI Detection Significance ($\sigma$s)')
plt.title('Combined XRFI Metrics')
plt.xlabel('Frequency (MHz)')
plt.ylabel(f'JD - {JD}')
ax2 = plt.gca().twinx()
ax2.set_ylim([uvf.lst_array[0] * 12 / np.pi, uvf.lst_array[-1] * 12 / np.pi])
ax2.invert_yaxis()
ax2.set_ylabel('LST (hours)')
ax3 = plt.gca().twiny()
ax3.set_xlim([0, uvf.Nfreqs - 1])
ax3.set_xlabel('Channel');
```
default base will change from np.e to 10 in 3.4. To suppress this warning specify the base keyword argument.
Text(0.5, 0, 'Channel')

## Figure 2(a): Combined XRFI Detection Significance
This figure shows round 2 XRFI metrics (mean filter outliers) combined in quadrature. When flagged in round 1 of XRFI, round 1's combined median filter metrics are used instead.
```python
# Load in the flags from each round of XRFI flagging.
low_level_flag_labels = (
"abscal_chi_sq_flags1",
"abscal_chi_sq_flags2",
"ag_flags1",
"ag_flags2",
"apriori_flags",
"auto_flags1",
"auto_flags2",
"ax_flags1",
"ax_flags2",
"combined_flags1",
"combined_flags2",
"cross_flags1",
"cross_flags2",
"flags1",
"flags2",
"og_flags1",
"og_flags2",
"omnical_chi_sq_flags1",
"omnical_chi_sq_flags2",
"ox_flags1",
"ox_flags2",
"v_flags1",
"v_flags2",
)
# Keep the thresholded flags separate for easier analysis.
thresholded_flag_labels = (
"abscal_chi_sq_renormed_threshold_flags",
"ag_threshold_flags",
"auto_threshold_flags",
"ax_threshold_flags",
"combined_threshold_flags",
"cross_threshold_flags",
"og_threshold_flags",
"omnical_chi_sq_renormed_threshold_flags",
"ox_threshold_flags",
"v_threshold_flags",
"total_threshold_and_a_priori_flags",
)
low_level_flags = {}
for file_id in low_level_flag_labels:
flag_files = []
for xrfi_dir in xrfi_dirs:
matching_files = glob.glob(os.path.join(xrfi_dir, f"*.{file_id}.h5"))
if len(matching_files) > 0:
flag_files.append(matching_files[0])
if len(flag_files) > 0:
uvf = UVFlag(flag_files)
low_level_flags[file_id] = np.squeeze(uvf.flag_array)
thresholded_flags = {}
for file_id in thresholded_flag_labels:
flag_file = f"{data_path}/zen.{JD}.{file_id}.h5"
if os.path.exists(flag_file):
uvf = UVFlag(flag_file)
thresholded_flags[file_id] = np.squeeze(uvf.flag_array)
all_flags = dict(**low_level_flags, **thresholded_flags)
```
```python
label_mapping = {
f"Round {i}": {
"Priors": ("apriori_flags", "flags1")[i-1],
"Autocorrs": f"auto_flags{i}",
"Crosscorrs": f"cross_flags{i}",
"Omnical\nVisibilities": f"v_flags{i}",
"Omnical\nGains": f"og_flags{i}",
r"Omnical $\chi^2$": f"ox_flags{i}",
"Omnical\nGlobal $\chi^2$": f"omnical_chi_sq_flags{i}",
"Abscal\nGains": f"ag_flags{i}",
r"Abscal $\chi^2$": f"ax_flags{i}",
r"Abscal\nGlobal $\chi^2$": f"abscal_chi_sq_flags{i}",
"Combined\nMetrics": f"combined_flags{i}",
} for i in (1,2)
}
label_mapping["Round 3"] = {
"Priors": "flags2",
"Autocorrs": "auto_threshold_flags",
"Crosscorrs": "cross_threshold_flags",
"Omnical\nGains": "og_threshold_flags",
r"Omnical $\chi^2$": "ox_threshold_flags",
"Omnical\nGlobal $\chi^2$": f"omnical_chi_sq_renormed_threshold_flags",
"Omnical\nVisibilities": "v_threshold_flags",
"Abscal\nGains": "ag_threshold_flags",
r"Abscal $\chi^2$": "ax_threshold_flags",
r"Abscal\nGlobal $\chi^2$": f"abscal_chi_sq_renormed_threshold_flags",
"Combined\nMetrics": "combined_threshold_flags",
'Final\nFlags': "total_threshold_and_a_priori_flags",
}
# remove labels for metrics not used
label_mapping = {rnd: {label: flags for label, flags in labels.items() if flags in all_flags}
for rnd, labels in label_mapping.items()}
```
```python
# Pick easily distinguishable colors
color_palette = (
'#000000', #black
'#ffffff', #white
'#800000', #maroon
'#808000', #olive
'#008b8b', #darkcyan
'#000080', #navy
'#ff8c00', #darkorange
'#ffff00', #yellow
'#00ff00', #lime
'#0000ff', #blue
'#ff00ff', #fuchsia
'#1e90ff', #dodgerblue
'#98fb98', #palegreen
'#ff1493', #deeppink
)
# assign a unique color to a label
label_to_color_map = {"Unflagged": color_palette[0]}
color_index = 1
for mapping in label_mapping.values():
for label in tuple(mapping.keys()) + ("2+ Separate\nMetrics",):
if label not in label_to_color_map:
label_to_color_map[label] = color_palette[color_index]
color_index += 1
```
```python
# Figure out which flags are unique to each step and source
unique_flags_by_stage = {}
for round_label, mapping in label_mapping.items():
unique_flags_by_stage[round_label] = {}
# handle prior flags
prior_flags = low_level_flags[mapping["Priors"]]
unique_flags_by_stage[round_label]["Priors"] = prior_flags
# handle all other flag types
overlap_flags = np.zeros_like(np.squeeze(uvf.flag_array))
for label, file_id in mapping.items():
if label in ["Priors", "Final\nFlags", "Combined\nMetrics"]: # skip these, they are special
continue
flags = all_flags[file_id]
unique_flags = flags.copy()
for other_label, other_file_id in mapping.items():
if other_label in [label, "Priors", "Final\nFlags", "Combined\nMetrics"]:
continue
other_flags = all_flags[other_file_id]
unique_flags &= ~other_flags
overlap_region = flags & other_flags & ~prior_flags
overlap_flags[overlap_region] = True
unique_flags_by_stage[round_label][label] = unique_flags
unique_flags_by_stage[round_label]["2+ Separate\nMetrics"] = overlap_flags
# handle combined metrics separately so that it doesn't affect "2+ Separate\nMetrics"
all_flags_so_far = np.sum(list(unique_flags_by_stage[round_label].values()), axis=0).astype(bool)
combined_metrics_flags = all_flags[mapping["Combined\nMetrics"]]
unique_flags_by_stage[round_label]["Combined\nMetrics"] = combined_metrics_flags & ~all_flags_so_far
# Figure out which flags got applied at the very end when the a priori YAML was used
all_other_round_3_flags = np.sum([flags for flags in unique_flags_by_stage['Round 3'].values()], axis=0).astype(bool)
unique_flags_by_stage['Round 3']["Final\nFlags"] = all_flags[label_mapping['Round 3']["Final\nFlags"]] & (~all_other_round_3_flags)
```
```python
cmap = plt.cm.colors.ListedColormap(list(label_to_color_map.values()))
norm = plt.cm.colors.Normalize(vmin=0, vmax=1)
smap = plt.cm.ScalarMappable(cmap=cmap, norm=norm)
colored_flags = {}
for round_label, flag_dict in unique_flags_by_stage.items():
colored_flags[round_label] = np.zeros(np.squeeze(uvf.flag_array).shape)
for label, flags in flag_dict.items():
colored_flags[round_label][flags] = list(label_to_color_map.keys()).index(label) / (len(label_to_color_map) - 1)
```
```python
def plot_flag_evolution(freq_slice):
fig, axes = plt.subplots(len(colored_flags), figsize=(15, 11 * len(colored_flags)), dpi=300)
# Figure out the details for which part of the flag arrays to plot.
tmin, tmax = plot_times[0], plot_times[-1]
lstmin, lstmax = lsts_hr[0], lsts_hr[-1]
fmin, fmax = freqs_MHz[freq_slice][::freq_slice.size - 1]
extent = (fmin, fmax, tmax, tmin)
# Actually plot the things.
for ax, (label, flags) in zip(axes, colored_flags.items()):
ax.set_title(label, fontsize=16)
ax.imshow(flags[:,freq_slice], aspect="auto", extent=extent, cmap=cmap, vmin=0, vmax=1)
twinx = ax.twinx()
twiny = ax.twiny()
twinx.set_ylim(lstmax, lstmin)
twiny.set_xlim(freq_slice[0], freq_slice[-1])
ax.set_xlabel("Frequency (MHz)", fontsize=12)
ax.set_ylabel(f"JD - {JD}", fontsize=12)
twinx.set_ylabel("LST (hour)", fontsize=12)
twiny.set_xlabel("Channel", fontsize=12)
fig.tight_layout()
for ax in axes.ravel():
cbar = fig.colorbar(smap, ax=ax, orientation="horizontal", pad=0.1)
cbar.set_ticks(np.linspace(0, 1, 2 * len(cmap.colors) + 1)[1::2])
cbar.set_ticklabels(list(label_to_color_map.keys()))
```
```python
# Plot flags in the low-band.
if np.any(freqs_MHz < 100):
freq_slice = np.argwhere(freqs_MHz < 100).flatten() # Low-band, pre-FM
plot_flag_evolution(freq_slice)
```
## Figure 3: Flag Evolution in the Low Band
This figure delineates which steps different flags are introduced in, but does not make a distinction between sources when multiple flagging routines flag the same region of the waterfall. The plot shows flags for frequencies below the FM band, for the entire night. The top plot shows the flags for the first round of flagging (median filter), where the prior flags are the apriori flags; the middle plot shows the flags for the second round of flagging (mean filter), where the prior flags are the combined flags from the first round of flagging (plus extra flags based on the metrics added in quadrature); the bottom plot shows the flags for the final round of flagging (thresholding), where the prior flags are the combined flags from round 2 (plus extra flags based on the metrics added in quadrature). After threshold flagging, the "final flags" also include any apriori flags from the YAML files. *Note: for H1C data, this plot will be skipped.*
```python
# Plot flags in the mid-band.
freq_slice = np.argwhere(np.logical_and(freqs_MHz >= 100, freqs_MHz < 200)).flatten()
plot_flag_evolution(freq_slice)
```

## Figure 4: Flag Evolution in the Mid-Band
This figure delineates which steps different flags are introduced in, but does not make a distinction between sources when multiple flagging routines flag the same region of the waterfall. The plot shows flags for frequencies between the FM band and the analog TV band, for the entire night. The top plot shows the flags for the first round of flagging (median filter), where the prior flags are the apriori flags; the middle plot shows the flags for the second round of flagging (mean filter), where the prior flags are the combined flags from the first round of flagging (plus extra flags based on the metrics added in quadrature); the bottom plot shows the flags for the final round of flagging (thresholding), where the prior flags are the combined flags from round 2 (plus extra flags based on the metrics added in quadrature). After threshold flagging, the "final flags" also include any apriori flags from the YAML files.
```python
# Calculate occupancies for different important sets of flags.
label_mapping = {
"A Priori": "apriori_flags",
"Median Filter": "flags1",
"Mean Filter": "flags2",
"Thresholding": "total_threshold_and_a_priori_flags",
}
occupancies = {}
for axis, axis_label in enumerate(("Frequency", "Time")):
occupancies[axis_label] = {}
for flag_label, flag_id in label_mapping.items():
flags = all_flags[flag_id]
occupancies[axis_label][flag_label] = flags.mean(axis=(1-axis))
```
```python
fig, axes = plt.subplots(2, figsize=(15,14), dpi=200)
for i, items in enumerate(zip(axes.ravel(), occupancies.items())):
ax, (occupancy_axis, flag_dict) = items
xvalues = (plot_times, freqs_MHz)[i]
alt_xvalues = (lsts_hr, chans)[i]
xlabel = (f"JD - {JD}", "Frequency (MHz)")[i]
ylabel = (
"Fraction of Channels Flagged",
"Fraction of Integrations Flagged"
)[i]
alt_xlabel = ("LST (hours)", "Channel")[i]
ax.set_xlabel(xlabel, fontsize=12)
ax.set_ylabel(ylabel, fontsize=12)
for flag_label, occupancy in flag_dict.items():
ax.plot(xvalues, occupancy, label=flag_label)
twin_ax = ax.twiny()
twin_ax.set_xlim(alt_xvalues[0], alt_xvalues[-1])
twin_ax.set_xlabel(alt_xlabel, fontsize=12)
ax.legend()
```

## Figure 5: Flagging Occupancies
These plots show the flagging occupancies for the Round 0 Flags (Apriori), Round 1 Flags (Median Filter), Round 2 Flags (Mean Filter), and Round 3 Flags (Thresholding). The top plot shows the fraction of channels flagged at each integration for each set of flags, and the bottom plot shows the fraction of integrations flagged as a function of frequency.
# Metadata
```python
from hera_qm import version
print(version.construct_version_info())
```
{'version': '1.0', 'git_origin': 'git@github.com:HERA-Team/hera_qm.git', 'git_hash': 'a15c511f7e0fc30602257c9eb5ff761bc83ef6a5', 'git_description': 'v1.1-313-ga15c511', 'git_branch': 'master'}
```python
```
|
HERA-TeamREPO_NAMEH1C_IDR3_NotebooksPATH_START.@H1C_IDR3_Notebooks-main@rfi_inspect@rfi_inspect_2458155.ipynb@.PATH_END.py
|
{
"filename": "_contour.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/layout/template/data/_contour.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ContourValidator(_plotly_utils.basevalidators.CompoundArrayValidator):
def __init__(
self, plotly_name="contour", parent_name="layout.template.data", **kwargs
):
super(ContourValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Contour"),
data_docs=kwargs.pop(
"data_docs",
"""
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@layout@template@data@_contour.py@.PATH_END.py
|
{
"filename": "plot_opacity.py",
"repo_name": "dullemond/radmc3d-2.0",
"repo_path": "radmc3d-2.0_extracted/radmc3d-2.0-master/opac/dust_continuum/astrosil_laordraine/plot_opacity.py",
"type": "Python"
}
|
import numpy as np
import matplotlib.pyplot as plt
from radmc3dPy.analyze import *
o = readOpac(ext=['astrosilicate_laordraine93'],scatmat=[True])
plt.figure()
plt.plot(o.wav[0],o.kabs[0],label=r'$\kappa_{a}$ (absorption)')
plt.plot(o.wav[0],o.ksca[0],label=r'$\kappa_{s}$ (scattering)')
plt.xscale('log')
plt.yscale('log')
plt.ylim(ymin=1e-2)
plt.xlabel(r'$\lambda [\mu\mathrm{m}]$')
plt.ylabel(r'$\kappa [\mathrm{cm}^2/\mathrm{g}]$')
plt.legend()
ilam = [50,74,100]
lamn = [r'$\lambda=1\,\mu\mathrm{m}$',r'$\lambda=3\,\mu\mathrm{m}$',r'$\lambda=10\,\mu\mathrm{m}$']
plt.figure()
for i in range(len(ilam)):
plt.plot(o.scatang[0],4*np.pi*o.z11[0][ilam[i]]/o.ksca[0][ilam[i]],label=lamn[i])
plt.yscale('log')
plt.xlabel(r'$\theta [\mathrm{deg}]$')
plt.ylabel(r'$4\pi\,Z_{11}/\kappa_{s}$')
plt.legend()
plt.show()
|
dullemondREPO_NAMEradmc3d-2.0PATH_START.@radmc3d-2.0_extracted@radmc3d-2.0-master@opac@dust_continuum@astrosil_laordraine@plot_opacity.py@.PATH_END.py
|
{
"filename": "prop_begin.py",
"repo_name": "ajeldorado/falco-python",
"repo_path": "falco-python_extracted/falco-python-master/falco/proper/prop_begin.py",
"type": "Python"
}
|
# Copyright 2016, 2017 California Institute of Technology
# Users must agree to abide by the restrictions listed in the
# file "LegalStuff.txt" in the PROPER library directory.
#
# PROPER developed at Jet Propulsion Laboratory/California Inst. Technology
# Original IDL version by John Krist
# Python translation by Navtej Saini, with Luis Marchen and Nikta Amiri
#
# Modified by J. Krist - 19 April 2019 - moved search for FFTW wisdom file
# from prop_fftw to here to avoid redundant searches
import falco.proper as proper
import numpy as np
def prop_begin(beam_diameter, lamda, grid_n, beam_diam_fraction = 0.5):
"""Initialize variables for PROPER routines.
This routine must be called before any other PROPER routines in order to
initialize required variables.
Parameters
----------
beam_diameter : float
Initial diameter of beam in meters
lamda : float
Wavelength in meters
grid_n : int
Wavefront gridsize in pixels (n by n)
beam_diam_fraction : float
Fraction of the grid width corresponding to the beam diameter. If not
specified, it is assumed to be 0.5.
Returns
-------
wf : numpy ndarray
Initialized wavefront array structure created by this routine
"""
grid_n = int(grid_n)
proper.n = grid_n
ndiam = grid_n * beam_diam_fraction
proper.ndiam = ndiam
diam = float(beam_diameter)
w0 = diam / 2.
z_ray = np.pi * w0**2 / lamda
proper.rayleigh_factor = 1.
proper.old_opd = 0.
nlist = 1500
if proper.use_fftw == True and proper.use_ffti == False:
proper.prop_load_fftw_wisdom( grid_n, proper.fft_nthreads )
# Create WaveFront object
wf = proper.WaveFront(diam, ndiam, lamda, grid_n, w0, z_ray)
if proper.do_table:
proper.lens_fl_list = np.zeros(nlist, dtype = np.float64) # list of lens focal lengths
proper.lens_eff_fratio_list = np.zeros(nlist, dtype = np.float64) # list of effective fratios after each lens
proper.beam_diam_list = np.zeros(nlist, dtype = np.float64) # list of beam diameters at each lens
proper.distance_list = np.zeros(nlist, dtype = np.float64) # list of propagation distances
proper.surface_name_list = np.zeros(nlist, dtype = "S25") # list of surface names
proper.sampling_list = np.zeros(nlist, dtype = np.float64) # list of sampling at each surface
proper.action_num = 0
return wf
|
ajeldoradoREPO_NAMEfalco-pythonPATH_START.@falco-python_extracted@falco-python-master@falco@proper@prop_begin.py@.PATH_END.py
|
{
"filename": "photometry_AB_to_Jy.py",
"repo_name": "astrom-tom/SPARTAN",
"repo_path": "SPARTAN_extracted/SPARTAN-master/spartan/photometry_AB_to_Jy.py",
"type": "Python"
}
|
'''
The SPARTAN Project
-------------------
conversion from AB to Jy and
other way around
From
http://www.star.bristol.ac.uk/~mbt/stilts/sun256/uk.ac.starlink.ttools.func.Fluxes.html
@Author R. THOMAS
@year 2017
@place UV/LAM/UCBJ/ESO
@License: GPL v3.0 licence - see LICENCE.txt
'''
##python libraries
import numpy
def AB_to_jy(AB, err, string):
'''
This function convert a magnitude in AB system
to Jansky unit
Parameters
----------
AB float, AB measurement
err err, error on the AB magnitude
string str, yes or no to output as string
Return
------
Jy float, magnitude in Jansky
Jerr float, error on the magnitude in Jansky
'''
Jy = 1e23 * 10**(-(AB+48.6)/2.5)
Jyp = 1e23 * 10**(-(AB+err+48.6)/2.5) - Jy
Jym = Jy-1e23 * 10**(-(AB+err+48.6)/2.5)
if string == 'yes':
return numpy.string(Jy), numpy.string_(numpy.mean((Jyp, Jym)))
else:
return Jy, numpy.mean((Jyp, Jym))
def Jy_to_AB(Jy, Jerr, string):
'''
This function convert a magnitude in Jy to AB system
Parameters
----------
Jy float, Jy measurement
Jerr err, error on the Jy unit
string str, yes or no to output as string
Return
------
AB float, magnitude in AB
ABerr float, error on the magnitude in AB
'''
AB = 2.5 * (23 - numpy.log10(Jy)) - 48.6
ABp = 2.5 * (23 - numpy.log10(Jy+Jerr)) - 48.6
ABm = 2.5 * (23 - numpy.log10(Jy-Jerr)) - 48.6
if string == 'yes':
return numpy.string_(AB), numpy.string_(numpy.mean((ABp, ABm)))
else:
return AB, numpy.mean((ABp, ABm))
|
astrom-tomREPO_NAMESPARTANPATH_START.@SPARTAN_extracted@SPARTAN-master@spartan@photometry_AB_to_Jy.py@.PATH_END.py
|
{
"filename": "fit_MM_MCPM_EMCEE_yaml_v1.py",
"repo_name": "CPM-project/MCPM",
"repo_path": "MCPM_extracted/MCPM-master/examples/fit_MM_MCPM_EMCEE_yaml_v1.py",
"type": "Python"
}
|
"""
A simple script for running fit_MM_MCPM_EMCEE_v1.py using input from
a YAML file.
"""
import yaml
import sys
import os
from astropy.coordinates import SkyCoord
from astropy import units as u
from fit_MM_MCPM_EMCEE_v1 import fit_MM_MCPM_EMCEE
if __name__ == '__main__':
if len(sys.argv) != 2:
raise ValueError('Exactly one argument needed - yaml file')
file_in = sys.argv[1]
with open(file_in) as in_file:
settings = yaml.safe_load(in_file)
settings['parameters_to_fit'] = [*settings['starting_settings']]
ra = settings.pop("RA")
dec = settings.pop("Dec")
settings['skycoord'] = SkyCoord(ra, dec, unit=(u.deg, u.deg))
settings['config_file_root'] = os.path.splitext(file_in)[0]
settings['emcee_settings']['n_temps'] = 1
settings['emcee_settings']['PTSampler'] = False
fit_MM_MCPM_EMCEE(**settings)
|
CPM-projectREPO_NAMEMCPMPATH_START.@MCPM_extracted@MCPM-master@examples@fit_MM_MCPM_EMCEE_yaml_v1.py@.PATH_END.py
|
{
"filename": "optim.py",
"repo_name": "pyro-ppl/pyro",
"repo_path": "pyro_extracted/pyro-master/pyro/optim/optim.py",
"type": "Python"
}
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import inspect
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Optional,
Type,
Union,
ValuesView,
)
import torch
from torch import Tensor
from torch.nn.utils import clip_grad_norm_, clip_grad_value_
from torch.optim import Optimizer
import pyro
from pyro.optim.adagrad_rmsprop import AdagradRMSProp as pt_AdagradRMSProp
from pyro.optim.clipped_adam import ClippedAdam as pt_ClippedAdam
from pyro.optim.dct_adam import DCTAdam as pt_DCTAdam
from pyro.params.param_store import (
module_from_param_with_module_name,
normalize_param_name,
user_param_name,
)
def is_scheduler(optimizer) -> bool:
"""
Helper method to determine whether a PyTorch object is either a PyTorch
optimizer (return false) or a optimizer wrapped in an LRScheduler e.g. a
``ReduceLROnPlateau`` or subclasses of ``_LRScheduler`` (return true).
"""
# This uses duck typing rather than isinstance() because (1) PyTorch
# provides no comprehensive class hierarchy, and (2) the base class
# _LRScheduler of the majority of schedulers is private.
return hasattr(optimizer, "optimizer")
def _get_state_dict(optimizer) -> dict:
"""
Helper to get the state dict for either a raw optimizer or an optimizer
wrapped in an LRScheduler.
"""
if is_scheduler(optimizer):
state = {
"scheduler": optimizer.state_dict(),
"optimizer": optimizer.optimizer.state_dict(),
}
else:
state = optimizer.state_dict()
return state
def _load_state_dict(optimizer, state: dict) -> None:
"""
Helper to load the state dict into either a raw optimizer or an optimizer
wrapped in an LRScheduler.
"""
if is_scheduler(optimizer):
optimizer.load_state_dict(state["scheduler"])
optimizer.optimizer.load_state_dict(state["optimizer"])
else:
optimizer.load_state_dict(state)
class PyroOptim:
"""
A wrapper for torch.optim.Optimizer objects that helps with managing dynamically generated parameters.
:param optim_constructor: a torch.optim.Optimizer
:param optim_args: a dictionary of learning arguments for the optimizer or a callable that returns
such dictionaries
:param clip_args: a dictionary of clip_norm and/or clip_value args or a callable that returns
such dictionaries
"""
def __init__(
self,
optim_constructor: Union[Callable, Optimizer, Type[Optimizer]],
optim_args: Union[Dict, Callable[..., Dict]],
clip_args: Optional[Union[Dict, Callable[..., Dict]]] = None,
):
self.pt_optim_constructor = optim_constructor
# must be callable or dict
assert callable(optim_args) or isinstance(
optim_args, dict
), "optim_args must be function that returns defaults or a defaults dictionary"
if clip_args is None:
clip_args = {}
# must be callable or dict
assert callable(clip_args) or isinstance(
clip_args, dict
), "clip_args must be function that returns defaults or a defaults dictionary"
# hold our args to be called/used
self.pt_optim_args = optim_args
if callable(optim_args):
self.pt_optim_args_argc = len(inspect.signature(optim_args).parameters)
self.pt_clip_args = clip_args
# holds the torch optimizer objects
self.optim_objs: Dict = {}
self.grad_clip: Dict = {}
# any optimizer state that's waiting to be consumed (because that parameter hasn't been seen before)
self._state_waiting_to_be_consumed: Dict = {}
def __call__(self, params: Union[List, ValuesView], *args, **kwargs) -> None:
"""
:param params: a list of parameters
:type params: an iterable of strings
Do an optimization step for each param in params. If a given param has never been seen before,
initialize an optimizer for it.
"""
for p in params:
# if we have not seen this param before, we instantiate an optim object to deal with it
if p not in self.optim_objs:
# create a single optim object for that param
optimizer = self.optim_objs[p] = self._get_optim(p)
# create a gradient clipping function if specified
self.grad_clip[p] = self._get_grad_clip(p)
# set state from _state_waiting_to_be_consumed if present
param_name = pyro.get_param_store().param_name(p)
state = self._state_waiting_to_be_consumed.pop(param_name, None)
if state is not None:
_load_state_dict(optimizer, state)
if self.grad_clip[p] is not None:
self.grad_clip[p](p)
if (
hasattr(torch.optim.lr_scheduler, "_LRScheduler")
and isinstance(
self.optim_objs[p], torch.optim.lr_scheduler._LRScheduler
)
or hasattr(torch.optim.lr_scheduler, "LRScheduler")
and isinstance(self.optim_objs[p], torch.optim.lr_scheduler.LRScheduler)
or isinstance(
self.optim_objs[p], torch.optim.lr_scheduler.ReduceLROnPlateau
)
):
# if optim object was a scheduler, perform an optimizer step
self.optim_objs[p].optimizer.step(*args, **kwargs)
else:
self.optim_objs[p].step(*args, **kwargs)
def get_state(self) -> Dict:
"""
Get state associated with all the optimizers in the form of a dictionary with
key-value pairs (parameter name, optim state dicts)
"""
state_dict = {}
for param in self.optim_objs:
param_name = pyro.get_param_store().param_name(param)
state_dict[param_name] = _get_state_dict(self.optim_objs[param])
return state_dict
def set_state(self, state_dict: Dict) -> None:
"""
Set the state associated with all the optimizers using the state obtained
from a previous call to get_state()
"""
self._state_waiting_to_be_consumed.update(state_dict)
def save(self, filename: str) -> None:
"""
:param filename: file name to save to
:type filename: str
Save optimizer state to disk
"""
with open(filename, "wb") as output_file:
torch.save(self.get_state(), output_file)
def load(self, filename: str, map_location=None) -> None:
"""
:param filename: file name to load from
:type filename: str
:param map_location: torch.load() map_location parameter
:type map_location: function, torch.device, string or a dict
Load optimizer state from disk
"""
with open(filename, "rb") as input_file:
state = torch.load(input_file, map_location=map_location)
self.set_state(state)
def _get_optim(self, param: Union[Iterable[Tensor], Iterable[Dict[Any, Any]]]):
return self.pt_optim_constructor([param], **self._get_optim_args(param)) # type: ignore
# helper to fetch the optim args if callable (only used internally)
def _get_optim_args(self, param: Union[Iterable[Tensor], Iterable[Dict]]):
# If we were passed a function, we call function with a
# fully qualified name e.g. 'mymodule.mysubmodule.bias'.
if callable(self.pt_optim_args):
param_name = pyro.get_param_store().param_name(param)
if self.pt_optim_args_argc == 1:
# Normalize to the format of nn.Module.named_parameters().
normal_name = normalize_param_name(param_name)
opt_dict = self.pt_optim_args(normal_name)
else:
# DEPRECATED Split param name in to pieces.
module_name = module_from_param_with_module_name(param_name)
stripped_param_name = user_param_name(param_name)
opt_dict = self.pt_optim_args(module_name, stripped_param_name)
# must be dictionary
assert isinstance(
opt_dict, dict
), "per-param optim arg must return defaults dictionary"
return opt_dict
else:
return self.pt_optim_args
def _get_grad_clip(self, param: str):
grad_clip_args = self._get_grad_clip_args(param)
if not grad_clip_args:
return None
def _clip_grad(params: Union[Tensor, Iterable[Tensor]]):
self._clip_grad(params, **grad_clip_args)
return _clip_grad
def _get_grad_clip_args(self, param: str) -> Dict:
# if we were passed a fct, we call fct with param info
# arguments are (module name, param name) e.g. ('mymodule', 'bias')
if callable(self.pt_clip_args):
# get param name
param_name = pyro.get_param_store().param_name(param)
module_name = module_from_param_with_module_name(param_name)
stripped_param_name = user_param_name(param_name)
# invoke the user-provided callable
clip_dict = self.pt_clip_args(module_name, stripped_param_name)
# must be dictionary
assert isinstance(
clip_dict, dict
), "per-param clip arg must return defaults dictionary"
return clip_dict
else:
return self.pt_clip_args
@staticmethod
def _clip_grad(
params: Union[Tensor, Iterable[Tensor]],
clip_norm: Optional[Union[int, float]] = None,
clip_value: Optional[Union[int, float]] = None,
) -> None:
if clip_norm is not None:
clip_grad_norm_(params, clip_norm)
if clip_value is not None:
clip_grad_value_(params, clip_value)
def AdagradRMSProp(optim_args: Dict) -> PyroOptim:
"""
Wraps :class:`pyro.optim.adagrad_rmsprop.AdagradRMSProp` with :class:`~pyro.optim.optim.PyroOptim`.
"""
return PyroOptim(pt_AdagradRMSProp, optim_args)
def ClippedAdam(optim_args: Dict) -> PyroOptim:
"""
Wraps :class:`pyro.optim.clipped_adam.ClippedAdam` with :class:`~pyro.optim.optim.PyroOptim`.
"""
return PyroOptim(pt_ClippedAdam, optim_args)
def DCTAdam(optim_args: Dict) -> PyroOptim:
"""
Wraps :class:`pyro.optim.dct_adam.DCTAdam` with :class:`~pyro.optim.optim.PyroOptim`.
"""
return PyroOptim(pt_DCTAdam, optim_args)
|
pyro-pplREPO_NAMEpyroPATH_START.@pyro_extracted@pyro-master@pyro@optim@optim.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "ultralytics/yolov5",
"repo_path": "yolov5_extracted/yolov5-master/models/__init__.py",
"type": "Python"
}
|
ultralyticsREPO_NAMEyolov5PATH_START.@yolov5_extracted@yolov5-master@models@__init__.py@.PATH_END.py
|
|
{
"filename": "nn.py",
"repo_name": "keras-team/keras",
"repo_path": "keras_extracted/keras-master/keras/src/backend/jax/nn.py",
"type": "Python"
}
|
import builtins
import math
import jax
import jax.experimental.sparse as jax_sparse
import jax.numpy as jnp
from jax import lax
from jax import nn as jnn
from jax.experimental.pallas.ops.tpu import (
flash_attention as flash_attention_tpu,
)
from keras.src import backend
from keras.src.backend.common.backend_utils import (
compute_conv_transpose_padding_args_for_jax,
)
from keras.src.backend.jax.core import cast
from keras.src.backend.jax.core import convert_to_tensor
def relu(x):
x = convert_to_tensor(x)
return jnn.relu(x)
def relu6(x):
x = convert_to_tensor(x)
return jnn.relu6(x)
def sigmoid(x):
x = convert_to_tensor(x)
return jnn.sigmoid(x)
def tanh(x):
x = convert_to_tensor(x)
return jnn.tanh(x)
def tanh_shrink(x):
x = convert_to_tensor(x)
return x - jnp.tanh(x)
def softplus(x):
x = convert_to_tensor(x)
return jnn.softplus(x)
def softsign(x):
x = convert_to_tensor(x)
return jnn.soft_sign(x)
def soft_shrink(x, threshold=0.5):
x = convert_to_tensor(x)
return jnp.where(
x > threshold,
x - threshold,
jnp.where(x < -threshold, x + threshold, 0.0),
)
def sparse_plus(x):
x = convert_to_tensor(x)
return jnn.sparse_plus(x)
def silu(x):
x = convert_to_tensor(x)
return jnn.silu(x)
def squareplus(x, b=4):
x = convert_to_tensor(x)
return jnn.squareplus(x, b=b)
def log_sigmoid(x):
x = convert_to_tensor(x)
return jnn.log_sigmoid(x)
def leaky_relu(x, negative_slope=0.2):
x = convert_to_tensor(x)
return jnn.leaky_relu(x, negative_slope=negative_slope)
def hard_sigmoid(x):
x = convert_to_tensor(x)
return jnn.hard_sigmoid(x)
def hard_silu(x):
x = convert_to_tensor(x)
return jnn.hard_silu(x)
def elu(x, alpha=1.0):
x = convert_to_tensor(x)
return jnn.elu(x, alpha=alpha)
def selu(x):
x = convert_to_tensor(x)
return jnn.selu(x)
def gelu(x, approximate=True):
x = convert_to_tensor(x)
return jnn.gelu(x, approximate)
def celu(x, alpha=1.0):
x = convert_to_tensor(x)
return jnn.celu(x, alpha=alpha)
def glu(x, axis=-1):
x = convert_to_tensor(x)
return jnn.glu(x, axis=axis)
def hard_tanh(x):
x = convert_to_tensor(x)
return jnn.hard_tanh(x)
def hard_shrink(x, threshold=0.5):
x = convert_to_tensor(x)
return jnp.where(jnp.abs(x) > threshold, x, 0.0)
def threshold(x, threshold, default_value):
x = convert_to_tensor(x)
return jnp.where(x > threshold, x, default_value)
def softmax(x, axis=-1):
x = convert_to_tensor(x)
return jnn.softmax(x, axis=axis)
def log_softmax(x, axis=-1):
x = convert_to_tensor(x)
return jnn.log_softmax(x, axis=axis)
def sparsemax(logits, axis=-1):
# Sort logits along the specified axis in descending order
logits = convert_to_tensor(logits)
logits_sorted = -1.0 * jnp.sort(logits * -1.0, axis=axis)
logits_cumsum = jnp.cumsum(logits_sorted, axis=axis) # find cumulative sum
r = jnp.arange(1, logits.shape[axis] + 1) # Determine the sparsity
r_shape = [1] * logits.ndim
r_shape[axis] = -1 # Broadcast to match the target axis
r = r.reshape(r_shape)
support = logits_sorted - (logits_cumsum - 1) / r > 0
# Find the threshold
k = jnp.sum(support, axis=axis, keepdims=True)
logits_cumsum_safe = jnp.where(support, logits_cumsum, 0.0)
tau = (jnp.sum(logits_cumsum_safe, axis=axis, keepdims=True) - 1) / k
output = jnp.maximum(logits - tau, 0.0)
return output
def _convert_to_spatial_operand(
x,
num_spatial_dims,
data_format="channels_last",
include_batch_and_channels=True,
):
# Helper function that converts an operand to a spatial operand.
x = (x,) * num_spatial_dims if isinstance(x, int) else x
if not include_batch_and_channels:
return x
if data_format == "channels_last":
x = (1,) + x + (1,)
else:
x = (1,) + (1,) + x
return x
def _pool(
inputs,
initial_value,
reduce_fn,
pool_size,
strides=None,
padding="valid",
):
"""Helper function to define pooling functions.
Args:
inputs: input data of shape `N+2`.
initial_value: the initial value for the reduction.
reduce_fn: a reduce function of the form `(T, T) -> T`.
pool_size: a sequence of `N` integers, representing the window size to
reduce over.
strides: a sequence of `N` integers, representing the inter-window
strides (default: `(1, ..., 1)`).
padding: either the string `same` or `valid`.
Returns:
The output of the reduction for each window slice.
"""
if padding not in ("same", "valid"):
raise ValueError(
f"Invalid padding '{padding}', must be 'same' or 'valid'."
)
padding = padding.upper()
return lax.reduce_window(
inputs,
initial_value,
reduce_fn,
pool_size,
strides,
padding,
)
def max_pool(
inputs,
pool_size,
strides=None,
padding="valid",
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
pool_size = _convert_to_spatial_operand(
pool_size, num_spatial_dims, data_format
)
strides = pool_size if strides is None else strides
strides = _convert_to_spatial_operand(
strides, num_spatial_dims, data_format
)
return _pool(inputs, -jnp.inf, lax.max, pool_size, strides, padding)
def average_pool(
inputs,
pool_size,
strides,
padding,
data_format=None,
):
data_format = backend.standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
pool_size = _convert_to_spatial_operand(
pool_size, num_spatial_dims, data_format
)
strides = pool_size if strides is None else strides
strides = _convert_to_spatial_operand(
strides, num_spatial_dims, data_format
)
pooled = _pool(inputs, 0.0, lax.add, pool_size, strides, padding)
if padding == "valid":
# Avoid the extra reduce_window.
return pooled / math.prod(pool_size)
else:
# Count the number of valid entries at each input point, then use that
# for computing average. Assumes that any two arrays of same shape will
# be padded the same. Avoid broadcasting on axis where pooling is
# skipped.
shape = [
(a if b != 1 else 1) for (a, b) in zip(inputs.shape, pool_size)
]
window_counts = _pool(
jnp.ones(shape, inputs.dtype),
0.0,
lax.add,
pool_size,
strides,
padding,
)
return pooled / window_counts
def _convert_to_lax_conv_dimension_numbers(
num_spatial_dims,
data_format="channels_last",
transpose=False,
):
"""Create a `lax.ConvDimensionNumbers` for the given inputs."""
num_dims = num_spatial_dims + 2
if data_format == "channels_last":
spatial_dims = tuple(range(1, num_dims - 1))
inputs_dn = (0, num_dims - 1) + spatial_dims
else:
spatial_dims = tuple(range(2, num_dims))
inputs_dn = (0, 1) + spatial_dims
if transpose:
kernel_dn = (num_dims - 2, num_dims - 1) + tuple(range(num_dims - 2))
else:
kernel_dn = (num_dims - 1, num_dims - 2) + tuple(range(num_dims - 2))
return lax.ConvDimensionNumbers(
lhs_spec=inputs_dn, rhs_spec=kernel_dn, out_spec=inputs_dn
)
def conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
data_format = backend.standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
dimension_numbers = _convert_to_lax_conv_dimension_numbers(
num_spatial_dims,
data_format,
transpose=False,
)
strides = _convert_to_spatial_operand(
strides,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
dilation_rate = _convert_to_spatial_operand(
dilation_rate,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
if data_format == "channels_last":
channels = inputs.shape[-1]
else:
channels = inputs.shape[1]
kernel_in_channels = kernel.shape[-2]
if channels % kernel_in_channels > 0:
raise ValueError(
"The number of input channels must be evenly divisible by "
f"kernel's in_channels. Received input channels {channels} and "
f"kernel in_channels {kernel_in_channels}. "
)
feature_group_count = channels // kernel_in_channels
kernel = convert_to_tensor(kernel)
inputs = convert_to_tensor(inputs, dtype=kernel.dtype)
return jax.lax.conv_general_dilated(
inputs,
kernel,
strides,
padding,
rhs_dilation=dilation_rate,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
)
def depthwise_conv(
inputs,
kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
data_format = backend.standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
dimension_numbers = _convert_to_lax_conv_dimension_numbers(
num_spatial_dims,
data_format,
transpose=False,
)
strides = _convert_to_spatial_operand(
strides,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
dilation_rate = _convert_to_spatial_operand(
dilation_rate,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
feature_group_count = (
inputs.shape[-1] if data_format == "channels_last" else inputs.shape[1]
)
kernel = jnp.reshape(
kernel,
kernel.shape[:-2] + (1, feature_group_count * kernel.shape[-1]),
)
return jax.lax.conv_general_dilated(
inputs,
kernel,
strides,
padding,
rhs_dilation=dilation_rate,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
)
def separable_conv(
inputs,
depthwise_kernel,
pointwise_kernel,
strides=1,
padding="valid",
data_format=None,
dilation_rate=1,
):
data_format = backend.standardize_data_format(data_format)
depthwise_conv_output = depthwise_conv(
inputs,
depthwise_kernel,
strides,
padding,
data_format,
dilation_rate,
)
return conv(
depthwise_conv_output,
pointwise_kernel,
strides=1,
padding="valid",
data_format=data_format,
dilation_rate=dilation_rate,
)
def conv_transpose(
inputs,
kernel,
strides=1,
padding="valid",
output_padding=None,
data_format=None,
dilation_rate=1,
):
data_format = backend.standardize_data_format(data_format)
num_spatial_dims = inputs.ndim - 2
padding_values = compute_conv_transpose_padding_args_for_jax(
input_shape=inputs.shape,
kernel_shape=kernel.shape,
strides=strides,
padding=padding,
output_padding=output_padding,
dilation_rate=dilation_rate,
)
dimension_numbers = _convert_to_lax_conv_dimension_numbers(
num_spatial_dims,
data_format,
transpose=False,
)
strides = _convert_to_spatial_operand(
strides,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
dilation_rate = _convert_to_spatial_operand(
dilation_rate,
num_spatial_dims,
data_format,
include_batch_and_channels=False,
)
return jax.lax.conv_transpose(
inputs,
kernel,
strides,
padding=padding_values,
rhs_dilation=dilation_rate,
dimension_numbers=dimension_numbers,
transpose_kernel=True,
)
def one_hot(x, num_classes, axis=-1, dtype="float32", sparse=False):
x = convert_to_tensor(x)
if sparse:
if axis < 0:
axis = axis + len(x.shape) + 1
if dtype is None:
dtype = "float32"
# We deal with negative inputs by having zeros in the output although
# it's useless. It makes shapes static.
values = jnp.greater_equal(jnp.ravel(x), 0).astype(dtype)
values_count = values.shape[0]
indices = [jnp.arange(dim) for dim in x.shape]
indices = jnp.meshgrid(*indices, indexing="ij")
indices.insert(axis, jnp.maximum(x, 0)) # Deal with negative indices
indices = [a.reshape(values_count, 1).astype("int32") for a in indices]
indices = jnp.concatenate(indices, axis=1)
shape = list(x.shape)
shape.insert(axis, num_classes)
shape = tuple(shape)
return jax_sparse.BCOO(
(values, indices),
shape=shape,
indices_sorted=True,
unique_indices=True,
)
return jnn.one_hot(x, num_classes, axis=axis, dtype=dtype)
def multi_hot(x, num_classes, axis=-1, dtype="float32", sparse=False):
x = convert_to_tensor(x)
reduction_axis = 1 if len(x.shape) > 1 else 0
if sparse:
result = one_hot(
x, num_classes, axis=axis, dtype="int32", sparse=sparse
)
# JAX's BCOO does not support max reduction, use sum and compare with 0.
result = jax_sparse.bcoo_reduce_sum(result, axes=(reduction_axis,))
result = jax_sparse.bcoo_sum_duplicates(result)
values = jnp.greater_equal(result.data, 0).astype(dtype)
return jax_sparse.BCOO(
(values, result.indices),
shape=result.shape,
indices_sorted=True,
unique_indices=True,
)
return jnp.max(
one_hot(cast(x, "int32"), num_classes, axis=axis, dtype=dtype),
axis=reduction_axis,
)
def categorical_crossentropy(target, output, from_logits=False, axis=-1):
target = jnp.array(target)
output = jnp.array(output)
if target.shape != output.shape:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if len(target.shape) < 1:
raise ValueError(
"Arguments `target` and `output` must be at least rank 1. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
log_prob = jax.nn.log_softmax(output, axis=axis)
else:
output = output / jnp.sum(output, axis, keepdims=True)
output = jnp.clip(output, backend.epsilon(), 1.0 - backend.epsilon())
log_prob = jnp.log(output)
return -jnp.sum(target * log_prob, axis=axis)
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1):
target = jnp.array(target, dtype="int32")
output = jnp.array(output)
if len(target.shape) == len(output.shape) and target.shape[-1] == 1:
target = jnp.squeeze(target, axis=-1)
if len(output.shape) < 1:
raise ValueError(
"Argument `output` must be at least rank 1. "
"Received: "
f"output.shape={output.shape}"
)
if target.shape != output.shape[:-1]:
raise ValueError(
"Arguments `target` and `output` must have the same shape "
"up until the last dimension: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
log_prob = jax.nn.log_softmax(output, axis=axis)
else:
output = output / jnp.sum(output, axis, keepdims=True)
output = jnp.clip(output, backend.epsilon(), 1.0 - backend.epsilon())
log_prob = jnp.log(output)
target = jnn.one_hot(target, output.shape[axis], axis=axis)
return -jnp.sum(target * log_prob, axis=axis)
def binary_crossentropy(target, output, from_logits=False):
target = jnp.array(target)
output = jnp.array(output)
if target.shape != output.shape:
raise ValueError(
"Arguments `target` and `output` must have the same shape. "
"Received: "
f"target.shape={target.shape}, output.shape={output.shape}"
)
if from_logits:
log_logits = jax.nn.log_sigmoid(output)
log_neg_logits = jax.nn.log_sigmoid(-output)
return -1.0 * target * log_logits - (1.0 - target) * log_neg_logits
output = jnp.clip(output, backend.epsilon(), 1.0 - backend.epsilon())
bce = target * jnp.log(output)
bce += (1.0 - target) * jnp.log(1.0 - output)
return -bce
def moments(x, axes, keepdims=False, synchronized=False):
if synchronized:
raise NotImplementedError(
"Argument synchronized=True is not supported with JAX."
)
# The dynamic range of float16 is too limited for statistics. As a
# workaround, we simply perform the operations on float32 and convert back
# to float16
need_cast = False
ori_dtype = backend.standardize_dtype(x.dtype)
if ori_dtype in ("float16", "bfloat16"):
need_cast = True
x = cast(x, "float32")
mean = jnp.mean(x, axes, keepdims=True)
variance = jnp.var(x, axis=axes, keepdims=True)
if not keepdims:
mean = jnp.squeeze(mean, axes)
variance = jnp.squeeze(variance, axes)
if need_cast:
# avoid overflow and underflow when casting from float16 to float32
mean = jnp.clip(
mean, jnp.finfo(jnp.float16).min, jnp.finfo(jnp.float16).max
)
variance = jnp.clip(
variance, jnp.finfo(jnp.float16).min, jnp.finfo(jnp.float16).max
)
mean = cast(mean, ori_dtype)
variance = cast(variance, ori_dtype)
return mean, variance
def batch_normalization(
x, mean, variance, axis, offset=None, scale=None, epsilon=1e-3
):
shape = [1] * len(x.shape)
shape[axis] = mean.shape[0]
mean = jnp.reshape(mean, shape)
variance = jnp.reshape(variance, shape)
inv = jax.lax.rsqrt(variance + epsilon)
if scale is not None:
scale = jnp.reshape(scale, shape)
inv = inv * scale
res = -mean * inv
if offset is not None:
offset = jnp.reshape(offset, shape)
res = res + offset
return jnp.add(x * inv, res)
def ctc_loss(target, output, target_length, output_length, mask_index=0):
# Ref: https://github.com/google-deepmind/optax
# optax.ctc_loss_with_forward_probs
target = convert_to_tensor(target, dtype="int32")
output = convert_to_tensor(output)
target_length = convert_to_tensor(target_length, "int32")
output_length = convert_to_tensor(output_length, "int32")
batch_size, max_input_length, num_classes = output.shape
batch_size, max_label_length = target.shape
log_epsilon = -1e5
# Ensure that the dtype promotion behavior matches that of `tf.nn.ctc_loss`
dtype = backend.result_type(output.dtype, "float32")
output = cast(output, dtype)
def _lengths_to_paddings(lengths, max_length):
indices = jnp.arange(max_length).reshape(
(1,) * lengths.ndim + (max_length,)
)
lengths = jnp.expand_dims(lengths, axis=-1)
elem_valid = indices < lengths
return jnp.logical_not(elem_valid)
target_paddings = _lengths_to_paddings(target_length, max_label_length)
output_paddings = _lengths_to_paddings(output_length, max_input_length)
target_paddings = target_paddings.astype(output.dtype)
output_paddings = output_paddings.astype(output.dtype)
logprobs = jnn.log_softmax(output)
label_lengths = max_label_length - jnp.sum(target_paddings, axis=1).astype(
jnp.int32
)
# repeat[b, n] == 1.0 when label[b, n] == label[b, n+1].
repeat = (target[:, :-1] == target[:, 1:]).astype(jnp.float32)
repeat = jnp.pad(repeat, ((0, 0), (0, 1)))
logprobs_phi = logprobs[:, :, mask_index : mask_index + 1] # [B, T, 1]
logprobs_phi = jnp.transpose(logprobs_phi, (1, 0, 2)) # [T, B, 1]
_one_hot = jax.nn.one_hot(target, num_classes=num_classes) # [B, N, K]
logprobs_emit = jnp.einsum("btk,bnk->btn", logprobs, _one_hot)
logprobs_emit = jnp.transpose(logprobs_emit, (1, 0, 2)) # [T, B, N]
# [B, N]
logalpha_phi_init = (
jnp.ones((batch_size, max_label_length + 1), dtype=output.dtype)
* log_epsilon
)
logalpha_phi_init = logalpha_phi_init.at[:, 0].set(0.0)
logalpha_emit_init = (
jnp.ones((batch_size, max_label_length), dtype=output.dtype)
* log_epsilon
)
def update_phi_score(phi, added_score):
# Update `phi[:, 1:]`` with adding `added_score` in log space.
return jnp.concatenate(
[phi[:, :1], jnp.logaddexp(phi[:, 1:], added_score)], axis=-1
)
def loop_body(prev, x):
prev_phi, prev_emit = prev
# emit-to-phi epsilon transition, except if the next label is repetition
prev_phi_orig = prev_phi
prev_phi = update_phi_score(prev_phi, prev_emit + log_epsilon * repeat)
logprob_emit, logprob_phi, pad = x
# phi-to-emit transition
next_emit = jnp.logaddexp(
prev_phi[:, :-1] + logprob_emit, prev_emit + logprob_emit
)
# self-loop transition
next_phi = prev_phi + logprob_phi
# emit-to-phi blank transition only when the next label is repetition
next_phi = update_phi_score(
next_phi, prev_emit + logprob_phi + log_epsilon * (1.0 - repeat)
)
pad = pad.reshape((batch_size, 1))
next_emit = pad * prev_emit + (1.0 - pad) * next_emit
next_phi = pad * prev_phi_orig + (1.0 - pad) * next_phi
return (next_phi, next_emit), (next_phi, next_emit)
xs = (logprobs_emit, logprobs_phi, output_paddings.transpose((1, 0)))
_, (logalpha_phi, logalpha_emit) = jax.lax.scan(
loop_body, (logalpha_phi_init, logalpha_emit_init), xs
)
# last row needs to be updated with the last epsilon transition
logalpha_phi_last = update_phi_score(logalpha_phi[-1], logalpha_emit[-1])
logalpha_phi = logalpha_phi.at[-1].set(logalpha_phi_last)
# extract per_seq_loss
# [B, N+1]
_one_hot = jax.nn.one_hot(label_lengths, num_classes=max_label_length + 1)
per_seq_loss = -jnp.einsum("bn,bn->b", logalpha_phi_last, _one_hot)
return per_seq_loss
def _ctc_greedy_decode(
inputs,
sequence_lengths,
merge_repeated=True,
mask_index=None,
):
inputs = convert_to_tensor(inputs)
sequence_lengths = convert_to_tensor(sequence_lengths, dtype="int32")
batch_size, max_length, num_classes = inputs.shape
if mask_index is None:
mask_index = num_classes - 1
indices = jnp.argmax(inputs, axis=-1)
scores = jnp.max(inputs, axis=-1)
seqlen_mask = jnp.arange(max_length)[None, :]
seqlen_mask = seqlen_mask >= sequence_lengths[:, None]
indices = jnp.where(seqlen_mask, mask_index, indices)
scores = jnp.where(seqlen_mask, 0.0, scores)
if merge_repeated:
repeat_mask = indices[:, 1:] == indices[:, :-1]
repeat_mask = jnp.pad(repeat_mask, ((0, 0), (1, 0)))
indices = jnp.where(repeat_mask, mask_index, indices)
# We set to -1 for blank labels
invalid_mask = indices == mask_index
indices = jnp.where(invalid_mask, -1, indices)
# We rearrange the indices by moving `mask_index` to the end of the array
order = jnp.expand_dims(jnp.arange(max_length), axis=0) # [1, N]
order = jnp.tile(order, (batch_size, 1)) # [B, N]
order = jnp.where(invalid_mask, max_length, order)
order = jnp.argsort(order, axis=-1)
indices = jnp.take_along_axis(indices, order, axis=-1)
scores = -jnp.sum(scores, axis=1)[:, None]
indices = jnp.expand_dims(indices, axis=0)
return indices, scores
def _ctc_beam_search_decode(
inputs,
sequence_lengths,
beam_width=100,
top_paths=1,
mask_index=None,
):
inputs = convert_to_tensor(inputs)
sequence_lengths = convert_to_tensor(sequence_lengths)
batch_size, max_seq_len, num_classes = inputs.shape
inputs = jnn.log_softmax(inputs)
seqlen_mask = jnp.arange(max_seq_len)[None, :] >= sequence_lengths[:, None]
if mask_index is None:
mask_index = num_classes - 1
# This is a workaround for the fact that jnp.argsort does not support
# the order parameter which is used to break ties when scores are equal.
# For compatibility with the tensorflow implementation, we flip the inputs
# and the mask_index, and then flip the classes back to the correct indices
inputs = jnp.flip(inputs, axis=2)
mask_index = num_classes - mask_index - 1
_pad = -1
init_paths = jnp.full(
(batch_size, 2 * beam_width, max_seq_len), _pad, dtype=jnp.int32
)
num_init_paths = builtins.min(num_classes, beam_width)
max_classes = jnp.argsort(inputs[:, 0], axis=1)[:, -num_init_paths:]
init_classes = jnp.where(max_classes == mask_index, _pad, max_classes)
init_paths = init_paths.at[:, :num_init_paths, 0].set(init_classes)
init_scores = (
jnp.full((batch_size, 2 * beam_width), -jnp.inf, dtype=inputs.dtype)
.at[:, :num_init_paths]
.set(jnp.take_along_axis(inputs[:, 0], max_classes, axis=1))
)
init_masked = init_paths[:, :, 0] == _pad
def _extend_paths(paths, scores, masked, x):
paths = jnp.repeat(paths, num_classes, axis=0)
scores = jnp.repeat(scores, num_classes)
masked = jnp.repeat(masked, num_classes)
path_tail_index = jnp.argmax(paths == _pad, axis=1)
paths_arange = jnp.arange(2 * beam_width * num_classes)
path_tails = paths[paths_arange, path_tail_index - 1]
path_tails = jnp.where(path_tail_index == 0, _pad, path_tails)
classes = jnp.arange(num_classes).at[mask_index].set(_pad)
classes = jnp.tile(classes, 2 * beam_width)
prev_masked = masked
masked = classes == _pad
masked_repeat = ~prev_masked & (path_tails == classes)
classes = jnp.where(masked_repeat, _pad, classes)
paths = paths.at[paths_arange, path_tail_index].set(classes)
x = jnp.tile(x, 2 * beam_width)
scores = scores + x
return paths, scores, masked
def _merge_scores(unique_inverse, scores):
scores_max = jnp.max(scores)
scores_exp = jnp.exp(scores - scores_max)
scores = jnp.zeros_like(scores).at[unique_inverse].add(scores_exp)
scores = jnp.log(scores) + scores_max
return scores
def _prune_paths(paths, scores, masked):
paths, unique_inverse = jnp.unique(
paths,
return_inverse=True,
size=2 * num_classes * beam_width,
axis=0,
fill_value=_pad,
)
if len(unique_inverse.shape) >= 2:
unique_inverse = jnp.squeeze(unique_inverse, axis=1)
emit_scores = jnp.where(masked, -jnp.inf, scores)
mask_scores = jnp.where(masked, scores, -jnp.inf)
emit_scores = _merge_scores(unique_inverse, emit_scores)
mask_scores = _merge_scores(unique_inverse, mask_scores)
total_scores = jnp.logaddexp(emit_scores, mask_scores)
top_indices = jnp.argsort(total_scores)[-beam_width:]
paths = paths[top_indices]
emit_scores = emit_scores[top_indices]
mask_scores = mask_scores[top_indices]
paths = jnp.tile(paths, (2, 1))
scores = jnp.concatenate([emit_scores, mask_scores])
masked = jnp.concatenate(
[jnp.zeros(beam_width, bool), jnp.ones(beam_width, bool)]
)
return paths, scores, masked
def _decode_step(paths, scores, masked, x):
paths, scores, masked = _extend_paths(paths, scores, masked, x)
paths, scores, masked = _prune_paths(paths, scores, masked)
return paths, scores, masked
def _step(prev, x):
paths, scores, masked = prev
x, seqlen_mask = x
paths, scores, masked = lax.cond(
seqlen_mask,
lambda paths, scores, masked, x: (paths, scores, masked),
_decode_step,
paths,
scores,
masked,
x,
)
return (paths, scores, masked), None
def _decode_batch(
init_paths, init_scores, init_masked, inputs, seqlen_mask
):
(paths, scores, masked), _ = lax.scan(
_step,
(init_paths, init_scores, init_masked),
(inputs[1:], seqlen_mask[1:]),
)
paths, unique_inverse = jnp.unique(
paths,
return_inverse=True,
size=2 * num_classes * beam_width,
axis=0,
fill_value=_pad,
)
if len(unique_inverse.shape) >= 2:
unique_inverse = jnp.squeeze(unique_inverse, axis=1)
scores = _merge_scores(unique_inverse, scores)
top_indices = jnp.argsort(scores)[-top_paths:][::-1]
paths = paths[top_indices]
scores = scores[top_indices]
return paths, scores
paths, scores = jax.vmap(_decode_batch)(
init_paths, init_scores, init_masked, inputs, seqlen_mask
)
# convert classes back to the correct indices
paths = jnp.where(paths == _pad, _pad, num_classes - paths - 1)
paths = jnp.transpose(paths, [1, 0, 2])
return paths, scores
def ctc_decode(
inputs,
sequence_lengths,
strategy="greedy",
beam_width=100,
top_paths=1,
merge_repeated=True,
mask_index=0,
):
inputs = convert_to_tensor(inputs)
dtype = backend.result_type(inputs.dtype, "float32")
inputs = cast(inputs, dtype)
if strategy == "greedy":
return _ctc_greedy_decode(
inputs,
sequence_lengths,
merge_repeated=merge_repeated,
mask_index=mask_index,
)
elif strategy == "beam_search":
return _ctc_beam_search_decode(
inputs,
sequence_lengths,
beam_width=beam_width,
top_paths=top_paths,
mask_index=mask_index,
)
else:
raise ValueError(
f"Invalid strategy {strategy}. Supported values are "
"'greedy' and 'beam_search'."
)
def psnr(x1, x2, max_val):
if x1.shape != x2.shape:
raise ValueError(
f"Input shapes {x1.shape} and {x2.shape} must "
"match for PSNR calculation. "
)
max_val = convert_to_tensor(max_val, dtype=x2.dtype)
mse = jnp.mean(jnp.square(x1 - x2))
psnr = 20 * jnp.log10(max_val) - 10 * jnp.log10(mse)
return psnr
def _can_use_flash_attention(query, key, value, bias, raise_error=False):
"""Verify the availability of flash attention."""
try:
from jax._src.cudnn.fused_attention_stablehlo import _normalize_layout
from jax._src.cudnn.fused_attention_stablehlo import (
check_compute_capability,
)
from jax._src.cudnn.fused_attention_stablehlo import check_cudnn_version
from jax._src.cudnn.fused_attention_stablehlo import (
check_is_flash_attention,
)
from jax._src.cudnn.fused_attention_stablehlo import check_layout
from jax.nn import dot_product_attention as dot_product_attention
except ImportError:
if raise_error:
raise ImportError(
"Flash attention is not supported in your current JAX version. "
"Please update it by following the official guide: "
"https://jax.readthedocs.io/en/latest/installation.html"
)
return False
try:
# Check if cuDNN is installed and raise RuntimeError if cuDNN is not
# detected
cudnn_version = check_cudnn_version()
# Only support at least Ampere
if not check_compute_capability("8.0"):
raise RuntimeError("Require at least Ampere arch to run")
# Check inputs layout
check_layout(
query,
key,
value,
bias,
q_seqlen=None,
kv_seqlen=None,
layout=_normalize_layout("BTNH"),
)
check_is_flash_attention(
query,
key,
_normalize_layout("BTNH"),
cudnn_version,
bias is not None,
is_training=False,
)
return True
except:
if raise_error:
raise
return False
def _apply_masks(logits, mask, is_causal):
if mask is None and not is_causal:
return logits
combined_mask = jnp.ones_like(logits, dtype="bool")
if mask is not None:
combined_mask = jnp.logical_and(combined_mask, mask)
if is_causal:
T, S = logits.shape[2], logits.shape[3]
mask = jnp.tril(jnp.ones((T, S), dtype="bool"))
mask = mask[None, None, :, :]
combined_mask = jnp.logical_and(combined_mask, mask)
large_negative_number = jnp.asarray(
-0.7 * jnp.finfo(logits.dtype).max, dtype=logits.dtype
)
padded_logits = jnp.where(combined_mask, logits, large_negative_number)
return padded_logits
def _dot_product_attention_core(
query, key, value, bias, mask, is_causal, scale
):
logits_dtype = jnp.promote_types(query.dtype, jnp.float32)
logits = jnp.einsum(
"BTNH,BSNH->BNTS", query, key, preferred_element_type=logits_dtype
)
logits *= jnp.array(scale, dtype=logits.dtype)
if bias is not None:
logits = (logits + bias).astype(logits.dtype)
padded_logits = _apply_masks(logits, mask, is_causal)
# Softmax and it is always carried out in fp32.
padded_logits = padded_logits.astype(jnp.float32)
probs = jax.nn.softmax(padded_logits, axis=-1).astype(key.dtype)
return jnp.einsum("BNTS,BSNH->BTNH", probs, value)
def dot_product_attention(
query,
key,
value,
bias=None,
mask=None,
scale=None,
is_causal=False,
flash_attention=None,
):
query = convert_to_tensor(query)
key = convert_to_tensor(key)
value = convert_to_tensor(value)
if len(query.shape) != 4 or len(key.shape) != 4 or len(value.shape) != 4:
raise ValueError(
"`dot_product_attention` only supports 4D inputs. "
f"Received: query.shape={query.shape}, key.shape={key.shape}, "
f"value.shape={value.shape}."
)
if flash_attention is None:
flash_attention = _can_use_flash_attention(query, key, value, bias)
elif flash_attention is True:
# Use `raise_error=True` to provide more details if the inputs failed to
# use flash attention
_can_use_flash_attention(query, key, value, bias, raise_error=True)
if jax.devices()[0].platform == "tpu" and flash_attention:
# Use TPU-optimized flash attention from Pallas
return flash_attention_tpu(
query,
key,
value,
ab=bias,
segment_ids=mask,
causal=is_causal,
sm_scale=scale,
)
# `dot_product_attention` is only available in jax>=0.4.31
if hasattr(jax.nn, "dot_product_attention"):
return jax.nn.dot_product_attention(
query,
key,
value,
bias=bias,
mask=mask,
scale=scale,
is_causal=is_causal,
implementation="cudnn" if flash_attention else "xla",
)
if flash_attention:
raise RuntimeError(
"Flash attention is not supported in your current JAX version. "
"Please update it by following the official guide: "
"https://jax.readthedocs.io/en/latest/installation.html"
)
# Ref: jax.nn.dot_product_attention
# https://github.com/jax-ml/jax/blob/jax-v0.4.33/jax/_src/nn/functions.py#L886
# Not support `query_seq_lengths` and `key_value_seq_lengths` args
output_shape = query.shape
_, _, K, H = key.shape
scale = (1.0 / jnp.sqrt(H)) if scale is None else scale
# _dot_product_attention_xla
B, T, N, H = query.shape
G = N // K
query = jnp.reshape(query, (B, T, K, G, H))
def _reshape_to_grouped(t):
if t is not None:
tB, tN, tT, tS = t.shape
if tN == 1:
t = jnp.broadcast_to(t[:, :, None, :, :], (tB, tN, G, tT, tS))
else:
assert tN == N
t = jnp.reshape(t, (tB, K, G, tT, tS))
return t
bias = _reshape_to_grouped(bias)
mask = _reshape_to_grouped(mask)
vmapped_fn = jax.vmap(
_dot_product_attention_core,
in_axes=(3, None, None, 2, 2, None, None),
out_axes=3,
)
encoded = vmapped_fn(query, key, value, bias, mask, is_causal, scale)
return jnp.reshape(encoded, output_shape)
|
keras-teamREPO_NAMEkerasPATH_START.@keras_extracted@keras-master@keras@src@backend@jax@nn.py@.PATH_END.py
|
{
"filename": "fn_system.py",
"repo_name": "mtalapinto/moes",
"repo_path": "feros/optics/fn_system.py",
"type": "Python"
}
|
from . import transform
import copy
from . import cte
from . import spheric_surface
from . import refraction_index
import numpy as np
from . import trace
def tracing(H, DC, Tin, l0, t, p, fn_data):
H[:, 2] = 0.
#Orientation
DC_out = transform.transform(DC, -Tin)
H_out = transform.transform(H, -Tin)
H_out[:, 0] = H_out[:, 0] - (DC_out[:, 0] / DC_out[:, 2]) * (H_out[:, 2])
H_out[:, 1] = H_out[:, 1] - (DC_out[:, 1] / DC_out[:, 2]) * (H_out[:, 2])
H_out[:, 2] = 0.
H_plane = copy.copy(H_out)
# Lens 1
# sf 0
r_sf0 = fn_data[0][1]
r_sf0 = cte.recalc(r_sf0, 'sfpl51', t)
material_sf0 = fn_data[0][3]
H_out, n = spheric_surface.dZ(H_out, DC_out, r_sf0)
n0 = refraction_index.nair_abs(l0, t, p) # coming from air
n1 = refraction_index.n(l0, t, p, material_sf0)
k = n1 / n0
cosi = DC_out[:, 0] * n[:, 0] + DC_out[:, 1] * n[:, 1] + DC_out[:, 2] * n[:, 2]
sini = np.sqrt(1 - cosi ** 2)
sinr = sini / k
cosr = np.sqrt(1 - sinr ** 2)
DC_out[:, 0] = DC_out[:, 0] / k + (cosr - cosi / k) * n[:, 0]
DC_out[:, 1] = DC_out[:, 1] / k + (cosr - cosi / k) * n[:, 1]
DC_out[:, 2] = DC_out[:, 2] / k + (cosr - cosi / k) * n[:, 2]
# sf0 - sf1
z_l1_out = fn_data[0][2]
z_l1_out = cte.recalc(z_l1_out, 'sfpl51', t)
H_out = trace.to_next_surface(H_out, DC_out, z_l1_out)
H_out[:, 2] = 0.
# sf 1
r_sf0 = fn_data[1][1]
r_sf0 = cte.recalc(r_sf0, 'sfpl51', t)
material_sf1 = fn_data[1][3]
H_out, n = spheric_surface.dZ(H_out, DC_out, r_sf0)
n0 = refraction_index.n(l0, t, p, material_sf0)
n1 = refraction_index.nair_abs(l0, t, p) # coming from air
k = n1 / n0
cosi = DC_out[:, 0] * n[:, 0] + DC_out[:, 1] * n[:, 1] + DC_out[:, 2] * n[:, 2]
sini = np.sqrt(1 - cosi ** 2)
sinr = sini / k
cosr = np.sqrt(1 - sinr ** 2)
DC_out[:, 0] = DC_out[:, 0] / k + (cosr - cosi / k) * n[:, 0]
DC_out[:, 1] = DC_out[:, 1] / k + (cosr - cosi / k) * n[:, 1]
DC_out[:, 2] = DC_out[:, 2] / k + (cosr - cosi / k) * n[:, 2]
# End lens 1
# Lens 1 - Lens 2
z_l1_l2 = fn_data[1][2]
z_l1_l2 = cte.recalc(z_l1_l2, 'alum5083', t)
H_out = trace.to_next_surface(H_out, DC_out, z_l1_l2)
H_out[:, 2] = 0.
# Lens 2
r_sf0 = fn_data[2][1]
r_sf0 = cte.recalc(r_sf0, 'stim2', t)
material_sf0 = fn_data[2][3]
H_out, n = spheric_surface.dZ(H_out, DC_out, r_sf0)
# sf 0
n0 = refraction_index.nair_abs(l0, t, p) # coming from air
n1 = refraction_index.n(l0, t, p, material_sf0)
k = n1 / n0
cosi = DC_out[:, 0] * n[:, 0] + DC_out[:, 1] * n[:, 1] + DC_out[:, 2] * n[:, 2]
sini = np.sqrt(1 - cosi ** 2)
sinr = sini / k
cosr = np.sqrt(1 - sinr ** 2)
DC_out[:, 0] = DC_out[:, 0] / k + (cosr - cosi / k) * n[:, 0]
DC_out[:, 1] = DC_out[:, 1] / k + (cosr - cosi / k) * n[:, 1]
DC_out[:, 2] = DC_out[:, 2] / k + (cosr - cosi / k) * n[:, 2]
# sf 0 - sf 1
z_l2 = fn_data[2][2]
z_l2 = cte.recalc(z_l2, 'stim2', t)
H_out = trace.to_next_surface(H_out, DC_out, z_l2)
H_out[:, 2] = 0
# sf 1
r_sf1 = fn_data[3][1]
r_sf1 = cte.recalc(r_sf1, 'sfpl51', t)
material_sf1 = fn_data[3][3]
H_out, n = spheric_surface.dZ(H_out, DC_out, r_sf1)
n0 = refraction_index.n(l0, t, p, material_sf0)
n1 = refraction_index.n(l0, t, p, material_sf1)
k = n1 / n0
cosi = DC_out[:, 0] * n[:, 0] + DC_out[:, 1] * n[:, 1] + DC_out[:, 2] * n[:, 2]
sini = np.sqrt(1 - cosi ** 2)
sinr = sini / k
cosr = np.sqrt(1 - sinr ** 2)
DC_out[:, 0] = DC_out[:, 0] / k + (cosr - cosi / k) * n[:, 0]
DC_out[:, 1] = DC_out[:, 1] / k + (cosr - cosi / k) * n[:, 1]
DC_out[:, 2] = DC_out[:, 2] / k + (cosr - cosi / k) * n[:, 2]
# sf 1 - sf 2
z_ff_sf2 = fn_data[3][2]
z_ff_sf2 = cte.recalc(z_ff_sf2, 'sfpl51', t)
H_out = trace.to_next_surface(H_out, DC_out, z_ff_sf2)
H_out[:, 2] = 0.
# sf 2
r_sf2 = fn_data[4][1]
r_sf2 = cte.recalc(r_sf2, 'sfpl51', t)
material_sf2 = fn_data[4][3]
H_out, n = spheric_surface.dZ(H_out, DC_out, r_sf2)
n0 = refraction_index.n(l0, t, p, material_sf1)
n1 = refraction_index.nair_abs(l0, t, p)
k = n1 / n0
cosi = DC_out[:, 0] * n[:, 0] + DC_out[:, 1] * n[:, 1] + DC_out[:, 2] * n[:, 2]
sini = np.sqrt(1 - cosi ** 2)
sinr = sini / k
cosr = np.sqrt(1 - sinr ** 2)
DC_out[:, 0] = DC_out[:, 0] / k + (cosr - cosi / k) * n[:, 0]
DC_out[:, 1] = DC_out[:, 1] / k + (cosr - cosi / k) * n[:, 1]
DC_out[:, 2] = DC_out[:, 2] / k + (cosr - cosi / k) * n[:, 2]
z_fn_fp = fn_data[4][2]
z_fn_fp = cte.recalc(z_fn_fp, 'alum5083', t)
H_out = trace.to_next_surface(H_out, DC_out, z_fn_fp)
H_out[:, 2] = 0.
return H_out, DC_out
def set_data(fndata):
fn_data = [
[1, fndata[0], fndata[1], 'SFPL51-FN'],
[2, fndata[2], fndata[3], 'Air'],
[3, fndata[4], fndata[5], 'STIM2'],
[4, fndata[6], fndata[7], 'SFPL51-FN'],
[5, fndata[8], fndata[9], 'Air'],
]
return fn_data
def init():
fndata = [108.104, 4.,
-27.736, 0.5,
52.28, 2.5,
14.453, 7,
-48.519, 119.283]
return fndata
def load_data():
file_fn = open('optics/fn_data.dat','r')
fndata = []
for line in file_fn:
fndata.append(float(line))
return fndata
if __name__ == '__main__':
fndata = init()
file_fn = open('fn_data.dat','w')
for i in range(len(fndata)):
file_fn.write('%.8f\n' %(fndata[i]))
file_fn.close()
|
mtalapintoREPO_NAMEmoesPATH_START.@feros@optics@fn_system.py@.PATH_END.py
|
{
"filename": "_lat.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/scattergeo/_lat.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class LatValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="lat", parent_name="scattergeo", **kwargs):
super(LatValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@scattergeo@_lat.py@.PATH_END.py
|
{
"filename": "test_pulsar.py",
"repo_name": "vallis/libstempo",
"repo_path": "libstempo_extracted/libstempo-master/tests/test_pulsar.py",
"type": "Python"
}
|
import shutil
import unittest
from pathlib import Path
import libstempo as t2
import numpy as np
DATA_PATH = t2.__path__[0] + "/data/"
TMP_DIR = Path("test_output")
TMP_DIR.mkdir(exist_ok=True)
try:
NP_LONG_DOUBLE_TYPE = np.float128
except AttributeError:
NP_LONG_DOUBLE_TYPE = np.double
class TestDeterministicSignals(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.psr = t2.tempopulsar(
parfile=DATA_PATH + "/J1909-3744_NANOGrav_dfg+12.par", timfile=DATA_PATH + "/J1909-3744_NANOGrav_dfg+12.tim"
)
@classmethod
def tearDownClass(cls):
shutil.rmtree(TMP_DIR)
def test_attrs(self):
self.assertEqual(self.psr.nobs, 1001)
self.assertEqual(self.psr.name, "1909-3744")
self.assertEqual(len(self.psr.stoas), 1001)
self.assertTrue(np.all(self.psr.stoas > 50000) and np.all(self.psr.stoas < 59000))
self.assertTrue(np.all(self.psr.toaerrs > 0.01) and np.all(self.psr.toaerrs < 10))
self.assertTrue(np.all(self.psr.freqs > 700) and np.all(self.psr.freqs < 4000))
self.assertEqual(self.psr.stoas[0].dtype, NP_LONG_DOUBLE_TYPE)
def test_toas(self):
self.assertTrue(np.all(self.psr.toas() != self.psr.stoas))
self.assertTrue(np.allclose(self.psr.toas(), self.psr.stoas, atol=1))
def test_residuals(self):
self.assertTrue(np.all(self.psr.residuals() > -2e-5) and np.all(self.psr.residuals() < 1.5e-5))
def test_flags(self):
expected = {"B", "be", "bw", "chanid", "fe", "proc", "pta", "tobs"}
self.assertEqual(set(self.psr.flags()), expected)
def test_radec(self):
self.assertTrue(np.allclose(self.psr["RAJ"].val, 5.0169080674060326785))
self.assertTrue(np.allclose(self.psr["DECJ"].val, 7.753759525058565179e-10, atol=1))
expected = (True, True)
tested = (self.psr["RAJ"].set, self.psr["DECJ"].set)
self.assertEqual(tested, expected)
def test_fitpars(self):
expected = ("RAJ", "DECJ", "F0", "F1", "PMRA", "PMDEC", "PX", "SINI", "PB", "A1", "TASC", "EPS1", "EPS2", "M2")
fitpars = self.psr.pars()
self.assertEqual(fitpars[:14], expected)
setpars = self.psr.pars(which="set")
self.assertEqual(len(setpars), 158)
# different versions of tempo2 define different number of parameters
# allpars = self.psr.pars(which="all")
# self.assertEqual(len(allpars), 4487)
def test_fit(self):
_ = self.psr.fit()
fitvals = self.psr.vals()
self.assertEqual(len(fitvals), 82)
def test_designmatrix(self):
dmat = self.psr.designmatrix()
self.assertEqual(dmat.shape, (1001, 83))
def test_save_partim(self):
self.psr.savepar(str(TMP_DIR / "tmp.par"))
self.psr.savetim(str(TMP_DIR / "tmp.tim"))
self.assertTrue((TMP_DIR / "tmp.par").exists())
self.assertTrue((TMP_DIR / "tmp.tim").exists())
|
vallisREPO_NAMElibstempoPATH_START.@libstempo_extracted@libstempo-master@tests@test_pulsar.py@.PATH_END.py
|
{
"filename": "SNR_powerspec.py",
"repo_name": "JaidenCook/OSIRIS",
"repo_path": "OSIRIS_extracted/OSIRIS-main/pipeline/SNR_powerspec.py",
"type": "Python"
}
|
#!/usr/bin/python
__author__ = "Jaiden Cook"
__credits__ = ["Jaiden Cook"]
__version__ = "0.0.0"
__maintainer__ = "Jaiden Cook"
__email__ = "Jaiden.Cook@student.curtin.edu"
# Generic stuff:
#%matplotlib notebook
import os,sys
import time
from datetime import datetime
import glob
import shutil
import re
from math import pi
import warnings
import subprocess
warnings.filterwarnings("ignore")
# Array stuff:
import numpy as np
#warnings.simplefilter('ignore', np.RankWarning)
# Plotting stuff:
import matplotlib.pyplot as plt
import matplotlib
from mpl_toolkits.mplot3d import Axes3D
plt.style.use('seaborn-white')
plt.rcParams['mathtext.fontset'] = 'stix'
plt.rcParams['font.family'] = 'STIXGeneral'
plt.rcParams.update({'font.size': 12})
plt.rc('xtick', color='k', labelsize='medium', direction='out')
plt.rc('xtick.major', size=6, pad=4)
plt.rc('xtick.minor', size=4, pad=4)
plt.rc('ytick', color='k', labelsize='medium', direction='out')
plt.rc('ytick.major', size=6, pad=4)
plt.rc('ytick.minor', size=4, pad=4)
# Parser options:
from optparse import OptionParser
# Scipy stuff:
import scipy
from scipy import signal
from scipy.fft import fftn,ifftn,fftfreq,fftshift,ifftshift
from scipy import stats
import scipy.optimize as opt
sys.path.append(os.path.abspath("/home/jaiden/Documents/EoR/OSIRIS"))
import Iris
from Iris_degrid import *
from Iris_grid import *
import SNR_MWAbeam
import SNR_21cmps
def Vis_noise(freq_vec,dt,fine_chan_width=0.08e+6,Trec=50,Aeff=21.5):
"""
For an input set of frequencies, and time in seconds, calculate
the visibility noise using the radiometer equation. default MWA
values are given.
Parameters
----------
freq_vec : numpy array, float
Vector of fine channels in Hz.
dt : float
Observation time in seconds
Returns
-------
Vector of sigma values for each fine channel.
"""
# Boltzmans constant
kb = 1380.648 #[Jy K^-1 m^2]
freq_temp_vec = freq_vec/1e+6 # [MHz]
# calculate sky temperature.
Tsky_vec = 228*(freq_temp_vec/150)**(-2.53)
# Standard deviation term for the noise:
sigma_vec = 2*(kb/Aeff)*(Tsky_vec + Trec)*(1/np.sqrt(fine_chan_width*dt)) #[Jy]
return sigma_vec
def beam_kernel(freq_vec,N_ker_size,L,M,delays,gauss_beam=False,interp_cond=False):
"""
For an input set of frequencies, calculate the beam image.
This is used to construct the visibility degridding kernel.
Parameters
----------
freq_vec : numpy array, float
Vector of fine channels in Hz.
N_ker_size : int
Size of the beam kernel. This should be an odd number.
L : float
Size of the image space array in the l-direction.
M : float
Size of the image space array in the m-direction.
Returns
-------
Beam cube.
"""
### Might be better being incorporated into the kernel object.
l_b_vec = np.linspace(-L/2,L/2,N_ker_size)
m_b_vec = np.linspace(-M/2,M/2,N_ker_size)
# Creating the grid:
l_b_arr, m_b_arr = np.meshgrid(l_b_vec, m_b_vec)
# Why the hell do I do this? Is this back to front?
m_b_arr = m_b_arr # Should probably fix this issue with the sky-model class.
# Creating a radius array for masking purposes:
r_b_arr = np.sqrt(l_b_arr**2 + m_b_arr**2)
# Creating an index array, we want all pixels less than or equal to r = 1:
ind_b_arr = r_b_arr <= 1.0
# Here we want to create a new alt and az array that is the same size as l_arr and m_arr:
Alt_b_arr = np.zeros(np.shape(l_b_arr))
Az_b_arr = np.zeros(np.shape(l_b_arr))
# Now we want to determine the Altitude and Azimuth, but only in the region where r <= 1. Outside this region is
# beyond the boundary of the horizon.
# Alt = arccos([l^2 + m^2]^(1/2))
Alt_b_arr[ind_b_arr] = np.arccos(r_b_arr[ind_b_arr])
#arctan2() returns [-pi,pi] we want [0,2pi].
Az_b_arr[ind_b_arr] = 2*np.pi - (np.arctan2(l_b_arr[ind_b_arr],-m_b_arr[ind_b_arr]) + np.pi)
if gauss_beam:
# For testing purposes. Use a Gaussian beam.
X = np.arange(len(l_b_arr))
Y = np.arange(len(l_b_arr))
xx,yy = np.meshgrid(X,Y)
x_cent = 0.5*(np.max(X) - np.min(X))
y_cent = 0.5*(np.max(X) - np.min(X))
sigx = 2
sigy = 2
amaj = sigx * (2.0*np.sqrt(2.0*np.log(2.0)))
bmin = sigy * (2.0*np.sqrt(2.0*np.log(2.0)))
Amplitude = 2*np.pi*sigx*sigy
beam_temp = Iris.Gauss2D(xx, yy, Amplitude, x_cent, y_cent, 0, amaj, bmin)
Beam_cube = np.ones((len(l_b_arr),len(m_b_arr),len(freq_vec)))*beam_temp[:,:,None]
else:
# Need to interpolate the beam for the fine channels.
# Calculating the FEE beam values for each coarse channel.
Beam_cube = SNR_MWAbeam.MWA_beam(Az_b_arr,Alt_b_arr,ind_b_arr,freq_vec,delays,interp_cond=interp_cond)
return Beam_cube, l_b_arr, m_b_arr
def calc_powerspec(Sky_mod,MWA_array,channel,u_lam_arr,v_lam_arr,freq_cent,\
constants,delays,add_21cm=False,gauss_beam=False,interp_cond=False,\
wedge_cond=False,cosmo=None,beam_cond=True,taper_cond=True,wproj_cond=True):
"""
calculate the 1D and 2D power spectrums for an input sky-model, with an input
interferometric array and channel layout.
Parameters
----------
Sky_mod : object, float
stuff
MWA_array : object, float
stuff
channel : object, float
stuff
u_lam_arr : numpy array, float
2D numpy array of u coordinates in wavelengths.
v_lam_arr : numpy array, float
2D numpy array of u coordinates in wavelengths.
freq_cent : float
Central frequency of the observing band in Hz.
constants : list
List of constants used for calculations [L,M,N,N_ker_size,uvmax,c].
delays : numpy array, int
Array of delays used for constructing the MWA primary beam.
Returns
-------
Power : object, float
"""
np.random.seed(0)
# Unpacking constants.
L = constants[0]
M = constants[1]
N = constants[2] # Might not need this.
N_ker_size = constants[3]
uvmax = constants[4] # [lambda]
c = constants[5] # Speed of light. [m/s]
try:
# This is a filthy temporary fix. Earlier versions of the pipeline don't
# have the Tobs_hours parameter. This sets a default based on the try statement.
if constants[6]:
Tobs_hours = constants[6]
except IndexError:
Tobs_hours = 10
dA = (L*M)/(N**2) # Area element.
# Better this way.
u_lam_vec = u_lam_arr[0,:]
v_lam_vec = v_lam_arr[:,0]
#
## Calculating the True visibilities
#
# Initialising the complex visibility cube.
Vis_cube = np.zeros(np.shape(Sky_mod.model),dtype=complex)
print('Computing sky visibilities for each un-flagged fine channel...')
#for i in channel.chan_flag_inds:
for i in np.arange(len(channel.fine)):
Vis_cube[:,:,i] = Iris.Visibilities_2D(Sky_mod.model[:,:,i])*dA
#
## Calculating kernel beam cube
#
if beam_cond:
# Default condition, generate the beam.
beam_cube, l_b_arr, m_b_arr = beam_kernel(channel.fine,N_ker_size,L,M,delays,\
gauss_beam=gauss_beam,interp_cond=interp_cond)
print('Delays : %s' % (np.array(delays).astype('int')))
else:
# If there is no beam, then the beam kernel is uniform.
# This still includes w-projection.
print('No primary beam...')
l_b_vec = np.linspace(-L/2,L/2,N_ker_size)
m_b_vec = np.linspace(-M/2,M/2,N_ker_size)
# Creating the grid:
l_b_arr, m_b_arr = np.meshgrid(l_b_vec, m_b_vec)
m_b_arr = m_b_arr # Should probably fix this issue with the sky-model class.
#beam_cube = np.ones((len(l_b_arr),len(m_b_arr),len(channel.fine)))
beam_cube = np.zeros((len(l_b_arr),len(m_b_arr),len(channel.fine)))
# Cube of all ones has non-zero weights below the horizon. Causes huge sidelobes. Not good.
beam_cube[int(N_ker_size/2),int(N_ker_size/2),:] = 1
#
## Calculating the noise standard devitions
#
Tobs_sec = Tobs_hours*3600 #[seconds]
sigma_vec = Vis_noise(channel.fine,Tobs_sec,fine_chan_width=0.08e+6,Trec=50,Aeff=21.5)
#
## Degridding
#
lam_fine_chans = c/channel.fine
print('Degridding and gridding...')
if wproj_cond:
print('Performing w-projection.')
else:
print('Not performing w-projection.')
u_lam_list = []
v_lam_list = []
vis_list = []
baseline_list = []
counter = 0
#for i in channel.chan_flag_inds:
for i in np.arange(len(channel.fine)):# Not flagging.
kernel = w_kernel(beam_cube[:,:,i],l_b_arr,m_b_arr)
## Defining the temporary u,v, and w vectors for the ith fine channel.
# The (u,v,w) values for the ith fine channel:
MWA_array.uvw_lam(lam_fine_chans[i],uvmax)
u_lam_temp = MWA_array.u_lam
v_lam_temp = MWA_array.v_lam
w_lam_temp = MWA_array.w_lam
if wproj_cond:
# Calculating the degridded sky visibilities.
Vis_sky_deg = Vis_degrid(kernel,u_lam_vec,v_lam_vec,u_lam_temp,v_lam_temp,Vis_cube[:,:,i],
w_lam_temp,phase_cond=True)
else:
# Calculating the degridded sky visibilities.
Vis_sky_deg = Vis_degrid(kernel,u_lam_vec,v_lam_vec,u_lam_temp,v_lam_temp,Vis_cube[:,:,i],
phase_cond=True)
## There are negative pairs due to the hermitian nature of visibilities.
u_lam_list.append(np.concatenate((MWA_array.u_lam,-MWA_array.u_lam)))
v_lam_list.append(np.concatenate((MWA_array.v_lam,-MWA_array.v_lam)))
# Determining the complex conjugate values.
Vis_sky_deg = np.concatenate((Vis_sky_deg,np.conjugate(Vis_sky_deg)))
vis_list.append(Vis_sky_deg)
baseline_list.append(len(Vis_sky_deg))
# Adding white Gaussian noise:
Vis_noise_real = np.ones(len(Vis_sky_deg))*\
np.random.normal(0.0, sigma_vec[i], len(Vis_sky_deg))/np.sqrt(2)
Vis_noise_imag = np.ones(len(Vis_sky_deg))*\
np.random.normal(0.0, sigma_vec[i], len(Vis_sky_deg))/np.sqrt(2)
# Adding noise.
Vis_sky_deg.real = Vis_sky_deg.real + Vis_noise_real
Vis_sky_deg.imag = Vis_sky_deg.imag + Vis_noise_imag
counter += 1
#
## Gridding
#
# No flagged fine channels for gridding purposes.
#temp_ind = channel.chan_flag_inds
# Initialising the gridded visibility cube:
gridded_vis_cube = np.zeros(np.shape(Sky_mod.model),dtype=complex)
vis_weights_cube = np.zeros((N,N,len(channel.fine)))
# Natural weighting
#gridded_vis_cube[:,:,temp_ind], vis_weights_cube[:,:,temp_ind] = grid_cube(u_lam_list,v_lam_list,\
# vis_list, u_lam_arr, v_lam_arr,gridded_vis_cube[:,:,temp_ind], vis_weights_cube[:,:,temp_ind],\
# weighting='natural')
# Gaussian weighting
#gridded_vis_cube[:,:,temp_ind], vis_weights_cube[:,:,temp_ind] = grid_cube(u_lam_list,v_lam_list,\
# vis_list, u_lam_arr, v_lam_arr,gridded_vis_cube[:,:,temp_ind], vis_weights_cube[:,:,temp_ind],\
# weighting='gaussian')
print('Sky-model shape')
print(Sky_mod.model.shape)
# Removing the fine channel flagging. Use idealised scenario.
gridded_vis_cube, vis_weights_cube = grid_cube(u_lam_list,v_lam_list,\
vis_list, u_lam_arr, v_lam_arr,gridded_vis_cube, vis_weights_cube,\
weighting='gaussian')
# Applying the Blackman-Harris taper. This is applied along the freqency axis.
#gridded_vis_cube *= signal.blackmanharris(len(channel.fine))
if taper_cond:
print('Applying Blackman-Harris taper.')
gridded_vis_cube *= signal.blackmanharris(len(channel.fine))
else:
print('Not applying Blackman-Harris taper.')
# When testing spectral leakage without the beam, we want to remove the Blackman Harris window.
pass
#
## Fourier transforming with respect to frequency
#
# Determining the eta vector. Paramaterise this later.
N_chans = len(channel.fine)
# Fourier resolution:
dnu_fine = channel.bandwidth/N_chans
print('Fine Channel size = %5.3e [MHz]' % dnu_fine)
# Generating the Fourier sky cube.
Four_sky_cube = ifftshift(fftn(fftshift(gridded_vis_cube,axes=(2,)),axes=2,norm='forward'),axes=(2,))
Four_sky_cube *= (dnu_fine*1e+6)
eta = fftshift(fftfreq(N_chans,2*channel.bandwidth*1e+6/N_chans)) # Fix this, parameterise. Converting to Hz
test_cond = False
if test_cond:
print('Saving Fourier-sky cube for analysis.')
path = '/home/jaiden/Documents/EoR/SNR-Pipeline/workbooks/data/'
test_name1 = 'Zen-Four-sky-cube'
test_name2 = 'Gridded-Vis-cube'
np.savez_compressed(path + test_name1, Four_sky_cube = Four_sky_cube, eta=eta, u_lam_arr=u_lam_arr, v_lam_arr=v_lam_arr)
np.savez_compressed(path + test_name2, gridded_vis_cube = gridded_vis_cube, weights_cube=vis_weights_cube)
else:
pass
# Subsetting out the negative eta values:
Four_sky_cube = Four_sky_cube[:,:,eta >= 0]
eta = eta[eta >= 0]
if add_21cm:
# Condition for adding in the 21cm signal.
print('Calculating the 21cm signal visibilities...')
Vis_21cm = SNR_21cmps.calc_21cm_pspec(freq_cent,channel,u_lam_arr,v_lam_arr,eta)
Four_sky_cube = Four_sky_cube + Vis_21cm
else:
pass
temp_avg = np.mean(vis_weights_cube, axis=2)
# Defining the weights cube
weights_cube = np.ones(np.shape(Four_sky_cube))*temp_avg[:,:,None].real
del temp_avg
# Fourier resolution:
dnu_fine = channel.bandwidth/N_chans
dnu = channel.bandwidth
#
## Calculating the 1D and 2D power spectra.
#
# Initialising the power spectrum object.
Power = Iris.Power_spec(Four_sky_cube,eta,u_lam_arr,v_lam_arr,freq_cent,\
dnu,dnu_fine,weights_cube=weights_cube,cosmo=cosmo)
# Calculating the power spectra.
print('Calculating the spherically average 1D power spectrum...')
Power.Spherical(wedge_cond=wedge_cond)
print('Calculating the cylindrically averaged 2D power spectrum...')
Power.Cylindrical()
return Power
def calc_noise_powerspec(MWA_array,channel,u_lam_arr,v_lam_arr,freq_cent,\
constants,add_21cm=False,wedge_cond=False,cosmo=None):
"""
calculate the 1D and 2D power spectrums for a noise like visibilities, with an input
interferometric array and channel layout. Has the option of including the 21cm signal.
Parameters
----------
MWA_array : object, float
stuff
channel : object, float
stuff
u_lam_arr : numpy array, float
2D numpy array of u coordinates in wavelengths.
v_lam_arr : numpy array, float
2D numpy array of u coordinates in wavelengths.
freq_cent : float
Central frequency of the observing band in Hz.
constants : list
List of constants used for calculations [L,M,N,N_ker_size,uvmax,c].
Returns
-------
Power : object, float
"""
np.random.seed(0)
# Unpacking constants.
L = constants[0]
M = constants[1]
N = constants[2] # Might not need this.
uvmax = constants[4] # [lambda]
c = constants[5] # Speed of light. [m/s]
try:
# This is a filthy temporary fix. Earlier versions of the pipeline don't
# have the Tobs_hours parameter. This sets a default based on the try statement.
if constants[6]:
Tobs_hours = constants[6]
except IndexError:
Tobs_hours = 10
#
## Calculating the noise standard devitions
#
Tobs_sec = Tobs_hours*3600 #[seconds]
sigma_vec = Vis_noise(channel.fine,Tobs_sec,fine_chan_width=0.08e+6,Trec=50,Aeff=21.5)
#
## Degridding
#
lam_fine_chans = c/channel.fine
print('Degridding and gridding...')
u_lam_list = []
v_lam_list = []
vis_list = []
baseline_list = []
counter = 0
#for i in channel.chan_flag_inds:
for i in np.arange(len(channel.fine)):# Not flagging.
## Defining the temporary u,v, and w vectors for the ith fine channel.
# The (u,v,w) values for the ith fine channel:
MWA_array.uvw_lam(lam_fine_chans[i],uvmax)
u_lam_temp = MWA_array.u_lam
#v_lam_temp = MWA_array.v_lam
#w_lam_temp = MWA_array.w_lam
## There are negative pairs due to the hermitian nature of visibilities.
u_lam_list.append(np.concatenate((MWA_array.u_lam,-MWA_array.u_lam)))
v_lam_list.append(np.concatenate((MWA_array.v_lam,-MWA_array.v_lam)))
# Determining the complex conjugate values.
Vis_sky_deg = np.zeros(u_lam_temp.shape, dtype=complex)
# Adding white Gaussian noise:
Vis_sky_deg.real = np.ones(len(Vis_sky_deg))*\
np.random.normal(0.0, sigma_vec[i], len(Vis_sky_deg))/np.sqrt(2)
Vis_sky_deg.imag = np.ones(len(Vis_sky_deg))*\
np.random.normal(0.0, sigma_vec[i], len(Vis_sky_deg))/np.sqrt(2)
vis_list.append(Vis_sky_deg)
baseline_list.append(len(Vis_sky_deg))
counter += 1
#
## Gridding
#
# No flagged fine channels for gridding purposes.
#temp_ind = channel.chan_flag_inds
# Initialising the gridded visibility cube:
gridded_vis_cube = np.zeros((N,N,len(channel.fine)),dtype=complex)
vis_weights_cube = np.zeros((N,N,len(channel.fine)))
# Natural weighting.
#gridded_vis_cube[:,:,temp_ind], vis_weights_cube[:,:,temp_ind] = grid_cube(u_lam_list,v_lam_list,\
# vis_list, u_lam_arr, v_lam_arr,gridded_vis_cube[:,:,temp_ind], vis_weights_cube[:,:,temp_ind],\
# weighting='natural')
# Gaussian weighting.
#gridded_vis_cube[:,:,temp_ind], vis_weights_cube[:,:,temp_ind] = grid_cube(u_lam_list,v_lam_list,\
# vis_list, u_lam_arr, v_lam_arr,gridded_vis_cube[:,:,temp_ind], vis_weights_cube[:,:,temp_ind],\
# weighting='gaussian')
# Gaussian weighting.
gridded_vis_cube, vis_weights_cube = grid_cube(u_lam_list,v_lam_list,\
vis_list, u_lam_arr, v_lam_arr,gridded_vis_cube, vis_weights_cube,\
weighting='gaussian')
# Applying the Blackman-Harris taper. This is applied along the freqency axis.
gridded_vis_cube *= signal.blackmanharris(len(channel.fine))
#
## Fourier transforming with respect to frequency
#
# Determining the eta vector. Paramaterise this later.
N_chans = len(channel.fine)
# Fourier resolution:
dnu_fine = channel.bandwidth/N_chans
print('Fine Channel size = %5.3e [MHz]' % dnu_fine)
# Generating the Fourier sky cube.
Four_sky_cube = ifftshift(fftn(fftshift(gridded_vis_cube,axes=(2,)),axes=2,norm='forward'),axes=(2,))
Four_sky_cube *= (dnu_fine*1e+6)
eta = fftshift(fftfreq(N_chans,2*channel.bandwidth*1e+6/N_chans)) # Fix this, parameterise. Converting to Hz
# Subsetting out the negative eta values:
Four_sky_cube = Four_sky_cube[:,:,eta >= 0]
eta = eta[eta >= 0]
if add_21cm:
# Condition for adding in the 21cm signal.
print('Calculating the 21cm signal visibilities...')
Vis_21cm = SNR_21cmps.calc_21cm_pspec(freq_cent,channel,u_lam_arr,v_lam_arr,eta)
Four_sky_cube = Four_sky_cube + Vis_21cm
else:
pass
temp_avg = np.mean(vis_weights_cube, axis=2)
# Defining the weights cube
weights_cube = np.ones(np.shape(Four_sky_cube))*temp_avg[:,:,None].real
del temp_avg
# Fourier resolution:
dnu_fine = channel.bandwidth/N_chans
dnu = channel.bandwidth
#
## Calculating the 1D and 2D power spectra.
#
# Initialising the power spectrum object.
Power = Iris.Power_spec(Four_sky_cube,eta,u_lam_arr,v_lam_arr,freq_cent,\
dnu,dnu_fine,weights_cube=weights_cube,cosmo=cosmo)
# Calculating the power spectra.
print('Calculating the spherically average 1D power spectrum...')
Power.Spherical(wedge_cond=wedge_cond)
print('Calculating the cylindrically averaged 2D power spectrum...')
Power.Cylindrical()
return Power
|
JaidenCookREPO_NAMEOSIRISPATH_START.@OSIRIS_extracted@OSIRIS-main@pipeline@SNR_powerspec.py@.PATH_END.py
|
{
"filename": "test_sampled.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/timeseries/tests/test_sampled.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from datetime import datetime
import pytest
from numpy.testing import assert_allclose, assert_equal
from astropy import units as u
from astropy.table import Column, Table
from astropy.tests.helper import assert_quantity_allclose
from astropy.time import Time, TimeDelta
from astropy.timeseries.periodograms import BoxLeastSquares, LombScargle
from astropy.timeseries.sampled import TimeSeries
from astropy.units import Quantity, UnitsWarning
from astropy.utils.data import get_pkg_data_filename
INPUT_TIME = Time(["2016-03-22T12:30:31", "2015-01-21T12:30:32", "2016-03-22T12:30:40"])
PLAIN_TABLE = Table([[1, 2, 11], [3, 4, 1], [1, 1, 1]], names=["a", "b", "c"])
CSV_FILE = get_pkg_data_filename("data/sampled.csv")
def test_empty_initialization():
ts = TimeSeries()
ts["time"] = Time([50001, 50002, 50003], format="mjd")
def test_empty_initialization_invalid():
# Make sure things crash when the first column added is not a time column
ts = TimeSeries()
with pytest.raises(
ValueError,
match=(
r"TimeSeries object is invalid - expected 'time' as the first column but"
r" found 'flux'"
),
):
ts["flux"] = [1, 2, 3]
def test_initialize_only_time():
ts = TimeSeries(time=INPUT_TIME)
assert ts["time"] is ts.time
# NOTE: the object in the table is a copy
assert_equal(ts.time.isot, INPUT_TIME.isot)
def test_initialization_with_data():
ts = TimeSeries(time=INPUT_TIME, data=[[10, 2, 3], [4, 5, 6]], names=["a", "b"])
assert_equal(ts.time.isot, INPUT_TIME.isot)
assert_equal(ts["a"], [10, 2, 3])
assert_equal(ts["b"], [4, 5, 6])
def test_initialize_only_data():
with pytest.raises(
TypeError, match=r"Either 'time' or 'time_start' should be specified"
):
TimeSeries(data=[[10, 2, 3], [4, 5, 6]], names=["a", "b"])
def test_initialization_with_table():
ts = TimeSeries(time=INPUT_TIME, data=PLAIN_TABLE)
assert ts.colnames == ["time", "a", "b", "c"]
def test_initialization_with_time_delta():
ts = TimeSeries(
time_start=datetime(2018, 7, 1, 10, 10, 10),
time_delta=TimeDelta(3, format="sec"),
data=[[10, 2, 3], [4, 5, 6]],
names=["a", "b"],
)
assert_equal(
ts.time.isot,
[
"2018-07-01T10:10:10.000",
"2018-07-01T10:10:13.000",
"2018-07-01T10:10:16.000",
],
)
def test_initialization_missing_time_delta():
with pytest.raises(
TypeError, match=r"'time' is scalar, so 'time_delta' is required"
):
TimeSeries(
time_start=datetime(2018, 7, 1, 10, 10, 10),
data=[[10, 2, 3], [4, 5, 6]],
names=["a", "b"],
)
def test_initialization_invalid_time_and_time_start():
with pytest.raises(TypeError, match=r"Cannot specify both 'time' and 'time_start'"):
TimeSeries(
time=INPUT_TIME,
time_start=datetime(2018, 7, 1, 10, 10, 10),
data=[[10, 2, 3], [4, 5, 6]],
names=["a", "b"],
)
def test_initialization_invalid_time_delta():
with pytest.raises(
TypeError, match=r"'time_delta' should be a Quantity or a TimeDelta"
):
TimeSeries(
time_start=datetime(2018, 7, 1, 10, 10, 10),
time_delta=[1, 4, 3],
data=[[10, 2, 3], [4, 5, 6]],
names=["a", "b"],
)
def test_initialization_with_time_in_data():
data = PLAIN_TABLE.copy()
data["time"] = INPUT_TIME
ts1 = TimeSeries(data=data)
assert set(ts1.colnames) == {"time", "a", "b", "c"}
assert all(ts1.time == INPUT_TIME)
ts2 = TimeSeries(data=[[10, 2, 3], INPUT_TIME], names=["a", "time"])
assert set(ts2.colnames) == {"time", "a"}
assert all(ts2.time == INPUT_TIME)
MESSAGE = r"'time' has been given both in the table and as a keyword argument"
with pytest.raises(TypeError, match=MESSAGE):
# Don't allow ambiguous cases of passing multiple 'time' columns
TimeSeries(data=data, time=INPUT_TIME)
with pytest.raises(TypeError, match=MESSAGE):
# 'time' is a protected name, don't allow ambiguous cases
TimeSeries(time=INPUT_TIME, data=[[10, 2, 3], INPUT_TIME], names=["a", "time"])
def test_initialization_n_samples():
# Make sure things crash with incorrect n_samples
with pytest.raises(
TypeError,
match=(
r"'n_samples' has been given both and it is not the same length as the"
r" input data."
),
):
TimeSeries(time=INPUT_TIME, data=PLAIN_TABLE, n_samples=1000)
def test_initialization_length_mismatch():
with pytest.raises(
ValueError, match=r"Length of 'time' \(3\) should match data length \(2\)"
):
TimeSeries(time=INPUT_TIME, data=[[10, 2], [4, 5]], names=["a", "b"])
def test_initialization_invalid_both_time_and_time_delta():
with pytest.raises(
TypeError,
match=r"'time_delta' should not be specified since 'time' is an array",
):
TimeSeries(time=INPUT_TIME, time_delta=TimeDelta(3, format="sec"))
def test_fold():
times = Time([1, 2, 3, 8, 9, 12], format="unix")
ts = TimeSeries(time=times)
ts["flux"] = [1, 4, 4, 3, 2, 3]
# Try without epoch time, as it should default to the first time and
# wrapping at half the period.
tsf = ts.fold(period=3.2 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0, 1, -1.2, 0.6, -1.6, 1.4], rtol=1e-6)
# Try with epoch time
tsf = ts.fold(period=3.2 * u.s, epoch_time=Time(1.6, format="unix"))
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [-0.6, 0.4, 1.4, 0.0, 1.0, 0.8], rtol=1e-6, atol=1e-6)
# Now with wrap_phase set to the full period
tsf = ts.fold(period=3.2 * u.s, wrap_phase=3.2 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0, 1, 2, 0.6, 1.6, 1.4], rtol=1e-6)
# Now set epoch_phase to be 1/4 of the way through the phase
tsf = ts.fold(period=3.2 * u.s, epoch_phase=0.8 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0.8, -1.4, -0.4, 1.4, -0.8, -1.0], rtol=1e-6)
# And combining epoch_phase and wrap_phase
tsf = ts.fold(period=3.2 * u.s, epoch_phase=0.8 * u.s, wrap_phase=3.2 * u.s)
assert isinstance(tsf.time, TimeDelta)
assert_allclose(tsf.time.sec, [0.8, 1.8, 2.8, 1.4, 2.4, 2.2], rtol=1e-6)
# Now repeat the above tests but with normalization applied
# Try without epoch time, as it should default to the first time and
# wrapping at half the period.
tsf = ts.fold(period=3.2 * u.s, normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(
tsf.time.to_value(u.one),
[0, 1 / 3.2, -1.2 / 3.2, 0.6 / 3.2, -1.6 / 3.2, 1.4 / 3.2],
rtol=1e-6,
)
# Try with epoch time
tsf = ts.fold(
period=3.2 * u.s, epoch_time=Time(1.6, format="unix"), normalize_phase=True
)
assert isinstance(tsf.time, Quantity)
assert_allclose(
tsf.time.to_value(u.one),
[-0.6 / 3.2, 0.4 / 3.2, 1.4 / 3.2, 0.0 / 3.2, 1.0 / 3.2, 0.8 / 3.2],
rtol=1e-6,
atol=1e-6,
)
# Now with wrap_phase set to the full period
tsf = ts.fold(period=3.2 * u.s, wrap_phase=1, normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(
tsf.time.to_value(u.one),
[0, 1 / 3.2, 2 / 3.2, 0.6 / 3.2, 1.6 / 3.2, 1.4 / 3.2],
rtol=1e-6,
)
# Now set epoch_phase to be 1/4 of the way through the phase
tsf = ts.fold(period=3.2 * u.s, epoch_phase=0.25, normalize_phase=True)
assert isinstance(tsf.time, Quantity)
assert_allclose(
tsf.time.to_value(u.one),
[0.8 / 3.2, -1.4 / 3.2, -0.4 / 3.2, 1.4 / 3.2, -0.8 / 3.2, -1.0 / 3.2],
rtol=1e-6,
)
# And combining epoch_phase and wrap_phase
tsf = ts.fold(
period=3.2 * u.s, epoch_phase=0.25, wrap_phase=1, normalize_phase=True
)
assert isinstance(tsf.time, Quantity)
assert_allclose(
tsf.time.to_value(u.one),
[0.8 / 3.2, 1.8 / 3.2, 2.8 / 3.2, 1.4 / 3.2, 2.4 / 3.2, 2.2 / 3.2],
rtol=1e-6,
)
def test_fold_invalid_options():
times = Time([1, 2, 3, 8, 9, 12], format="unix")
ts = TimeSeries(time=times)
ts["flux"] = [1, 4, 4, 3, 2, 3]
with pytest.raises(
u.UnitsError, match="period should be a Quantity in units of time"
):
ts.fold(period=3.2)
with pytest.raises(
u.UnitsError, match="period should be a Quantity in units of time"
):
ts.fold(period=3.2 * u.m)
with pytest.raises(
u.UnitsError,
match=(
"epoch_phase should be a Quantity in units of "
"time when normalize_phase=False"
),
):
ts.fold(period=3.2 * u.s, epoch_phase=0.2)
with pytest.raises(
u.UnitsError,
match=(
"epoch_phase should be a dimensionless Quantity "
"or a float when normalize_phase=True"
),
):
ts.fold(period=3.2 * u.s, epoch_phase=0.2 * u.s, normalize_phase=True)
with pytest.raises(
u.UnitsError,
match=(
"wrap_phase should be a Quantity in units of "
"time when normalize_phase=False"
),
):
ts.fold(period=3.2 * u.s, wrap_phase=0.2)
with pytest.raises(
u.UnitsError,
match="wrap_phase should be dimensionless when normalize_phase=True",
):
ts.fold(period=3.2 * u.s, wrap_phase=0.2 * u.s, normalize_phase=True)
with pytest.raises(
ValueError, match="wrap_phase should be between 0 and the period"
):
ts.fold(period=3.2 * u.s, wrap_phase=-0.1 * u.s)
with pytest.raises(
ValueError, match="wrap_phase should be between 0 and the period"
):
ts.fold(period=3.2 * u.s, wrap_phase=-4.2 * u.s)
with pytest.raises(ValueError, match="wrap_phase should be between 0 and 1"):
ts.fold(period=3.2 * u.s, wrap_phase=-0.1, normalize_phase=True)
with pytest.raises(ValueError, match="wrap_phase should be between 0 and 1"):
ts.fold(period=3.2 * u.s, wrap_phase=2.2, normalize_phase=True)
def test_pandas():
pandas = pytest.importorskip("pandas")
df1 = pandas.DataFrame()
df1["a"] = [1, 2, 3]
df1.set_index(pandas.DatetimeIndex(INPUT_TIME.datetime64), inplace=True)
ts = TimeSeries.from_pandas(df1)
assert_equal(ts.time.isot, INPUT_TIME.isot)
assert ts.colnames == ["time", "a"]
assert len(ts.indices) == 1
assert (ts.indices["time"].columns[0] == INPUT_TIME).all()
ts_tcb = TimeSeries.from_pandas(df1, time_scale="tcb")
assert ts_tcb.time.scale == "tcb"
df2 = ts.to_pandas()
assert (df2.index.values == pandas.Index(INPUT_TIME.datetime64).values).all()
assert df2.columns == pandas.Index(["a"])
assert (df1["a"] == df2["a"]).all()
with pytest.raises(TypeError, match=r"Input should be a pandas DataFrame"):
TimeSeries.from_pandas(None)
df4 = pandas.DataFrame()
df4["a"] = [1, 2, 3]
with pytest.raises(TypeError, match=r"DataFrame does not have a DatetimeIndex"):
TimeSeries.from_pandas(df4)
def test_read_time_missing():
with pytest.raises(
ValueError,
match=(
r"``time_column`` should be provided since the default Table readers are"
r" being used\."
),
):
TimeSeries.read(CSV_FILE, format="csv")
def test_read_time_wrong():
with pytest.raises(
ValueError, match=r"Time column 'abc' not found in the input data\."
):
TimeSeries.read(CSV_FILE, time_column="abc", format="csv")
def test_read():
timeseries = TimeSeries.read(CSV_FILE, time_column="Date", format="csv")
assert timeseries.colnames == ["time", "A", "B", "C", "D", "E", "F", "G"]
assert len(timeseries) == 11
assert timeseries["time"].format == "iso"
assert timeseries["A"].sum() == 266.5
@pytest.mark.remote_data(source="astropy")
def test_kepler_astropy():
from astropy.units import UnitsWarning
filename = get_pkg_data_filename("timeseries/kplr010666592-2009131110544_slc.fits")
with pytest.warns(UnitsWarning):
timeseries = TimeSeries.read(filename, format="kepler.fits")
assert timeseries["time"].format == "isot"
assert timeseries["time"].scale == "tdb"
assert timeseries["sap_flux"].unit.to_string() == "electron / s"
assert len(timeseries) == 14280
assert len(timeseries.columns) == 20
@pytest.mark.remote_data(source="astropy")
def test_tess_astropy():
filename = get_pkg_data_filename(
"timeseries/hlsp_tess-data-alerts_tess_phot_00025155310-s01_tess_v1_lc.fits"
)
with pytest.warns((UserWarning, UnitsWarning)) as record:
timeseries = TimeSeries.read(filename, format="tess.fits")
# we might hit some warnings more than once, but the exact sequence probably
# does not matter too much, so we'll just try to match the *set* of unique warnings
unique_warnings = {(wm.category, wm.message.args[0]) for wm in record}
assert len(record) != len(unique_warnings)
expected = {
(
UnitsWarning,
"'BJD - 2457000, days' did not parse as fits unit: "
"At col 0, Unit 'BJD' not supported by the FITS standard. "
"If this is meant to be a custom unit, define it with 'u.def_unit'. "
"To have it recognized inside a file reader or "
"other code, enable it with 'u.add_enabled_units'. For details, see "
"https://docs.astropy.org/en/latest/units/combining_and_defining.html",
),
(
UnitsWarning,
"'e-/s' did not parse as fits unit: "
"At col 0, Unit 'e' not supported by the FITS standard. "
"If this is meant to be a custom unit, define it with 'u.def_unit'. "
"To have it recognized inside a file reader or other code, "
"enable it with 'u.add_enabled_units'. For details, see "
"https://docs.astropy.org/en/latest/units/combining_and_defining.html",
),
(
UnitsWarning,
"'pixels' did not parse as fits unit: "
"At col 0, Unit 'pixels' not supported by the FITS standard. "
"Did you mean pixel? "
"If this is meant to be a custom unit, define it with 'u.def_unit'. "
"To have it recognized inside a file "
"reader or other code, enable it with 'u.add_enabled_units'. For details, "
"see https://docs.astropy.org/en/latest/units/combining_and_defining.html",
),
(UserWarning, "Ignoring 815 rows with NaN times"),
}
assert (
unique_warnings == expected
), f"Got some unexpected warnings\n{unique_warnings - expected}"
assert timeseries["time"].format == "isot"
assert timeseries["time"].scale == "tdb"
assert timeseries["sap_flux"].unit.to_string() == "electron / s"
assert len(timeseries) == 19261
assert len(timeseries.columns) == 20
def test_required_columns():
# Test the machinery that makes sure that the required columns are present
ts = TimeSeries(time=INPUT_TIME, data=[[10, 2, 3], [4, 5, 6]], names=["a", "b"])
# In the examples below, the operation (e.g. remove_column) is actually
# carried out before the checks are made, so we need to use copy() so that
# we don't change the main version of the time series.
# Make sure copy works fine
ts.copy()
MESSAGE = (
r"TimeSeries object is invalid - expected 'time' as the first column but found"
r" '{}'"
)
with pytest.raises(ValueError, match=MESSAGE.format("c")):
ts.copy().add_column(Column([3, 4, 5], name="c"), index=0)
with pytest.raises(ValueError, match=MESSAGE.format("d")):
ts.copy().add_columns(
[Column([3, 4, 5], name="d"), Column([3, 4, 5], name="e")], indexes=[0, 1]
)
with pytest.raises(ValueError, match=MESSAGE.format("a")):
ts.copy().keep_columns(["a", "b"])
with pytest.raises(ValueError, match=MESSAGE.format("a")):
ts.copy().remove_column("time")
with pytest.raises(ValueError, match=MESSAGE.format("b")):
ts.copy().remove_columns(["time", "a"])
with pytest.raises(ValueError, match=MESSAGE.format("banana")):
ts.copy().rename_column("time", "banana")
# https://github.com/astropy/astropy/issues/13009
MESSAGE = (
r"TimeSeries object is invalid - expected \['time', 'a'\] as the first columns"
r" but found \['time', 'b'\]"
)
ts_2cols_required = ts.copy()
ts_2cols_required._required_columns = ["time", "a"]
with pytest.raises(ValueError, match=MESSAGE):
ts_2cols_required.remove_column("a")
@pytest.mark.parametrize("cls", [BoxLeastSquares, LombScargle])
def test_periodogram(cls):
# Note that we don't need to check the actual results from the periodogram
# classes here since these are tested extensively in
# astropy.timeseries.periodograms.
ts = TimeSeries(time=INPUT_TIME, data=[[10, 2, 3], [4, 5, 6]], names=["a", "b"])
p1 = cls.from_timeseries(ts, "a")
assert isinstance(p1, cls)
assert_allclose(p1.t.jd, ts.time.jd)
assert_equal(p1.y, ts["a"])
assert p1.dy is None
p2 = cls.from_timeseries(ts, "a", uncertainty="b")
assert_quantity_allclose(p2.dy, ts["b"])
p3 = cls.from_timeseries(ts, "a", uncertainty=0.1)
assert_allclose(p3.dy, 0.1)
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@timeseries@tests@test_sampled.py@.PATH_END.py
|
{
"filename": "ChainContext.py",
"repo_name": "BradGreig/21CMMC",
"repo_path": "21CMMC_extracted/21CMMC-master/21CMMC_SourceCode/Programs/CosmoHammer_21CMMC/likelihood/ChainContext.py",
"type": "Python"
}
|
from CosmoHammer_21CMMC.likelihood.ChainConstants import *
class ChainContext(object):
"""
Context holding a dict to store data and information durring the computation of the likelihood
"""
def __init__(self, parent, params):
"""
Constructor of the context
"""
self._data = dict()
self.add(PARENT_KEY, parent)
self.add(PARAMS_KEY, params)
self.add(DATA_KEY, dict())
def add(self, key, value):
"""
Adds the value to the context using the key
:param key: string
key to use
:param value: object
the value to store
"""
self._data[key] = value
def remove(self, key):
"""
Removes the value from the context
:param key: string
key to remove from the context
"""
assert key != None ("The key must not be empty!")
del(self._data[key])
def contains(self, key):
"""
Checks if the key is in the context
:param key: string
key to check
:return: True if the key is in the context
"""
return key in self._data
def get(self, key, default=None):
"""
Returns the value stored in the context at the key or the default value in the
context doesn't contain the key
:param key: string
key to use
:param default: string
the default value to use if the key is not available
"""
if(self.contains(key)):
return self._data[key]
return default
def getParams(self):
"""
Returns the currently processed parameters
:return: The param of this context
"""
return self.get(PARAMS_KEY)
def getParent(self):
"""
Returns the parent
:return: The parent chain of this context
"""
return self.get(PARENT_KEY)
def getData(self):
"""
Returns the data
:return: The data of this context
"""
return self.get(DATA_KEY)
|
BradGreigREPO_NAME21CMMCPATH_START.@21CMMC_extracted@21CMMC-master@21CMMC_SourceCode@Programs@CosmoHammer_21CMMC@likelihood@ChainContext.py@.PATH_END.py
|
{
"filename": "test_scrape.py",
"repo_name": "rhayes777/PyAutoFit",
"repo_path": "PyAutoFit_extracted/PyAutoFit-main/test_autofit/aggregator/test_scrape.py",
"type": "Python"
}
|
import pytest
from autofit import SearchOutput
from autofit.database.aggregator.scrape import _add_files
class MockFit:
def __init__(self):
self.jsons = {}
id = "id"
def set_json(self, name, json):
self.jsons[name] = json
def __setitem__(self, key, value):
pass
def set_pickle(self, key, value):
pass
def set_array(self, key, value):
pass
def set_hdu(self, key, value):
pass
@pytest.fixture(name="fit")
def make_fit(directory):
fit = MockFit()
_add_files(
fit=fit,
item=SearchOutput(directory / "search_output"),
)
return fit
def test_add_files(fit):
assert fit.jsons["model"] == {
"class_path": "autofit.example.model.Gaussian",
"type": "model",
"arguments": {
"centre": {
"lower_limit": "-inf",
"upper_limit": "inf",
"type": "Gaussian",
"id": 0,
"mean": 1.0,
"sigma": 1.0,
},
"normalization": {
"lower_limit": "-inf",
"upper_limit": "inf",
"type": "Gaussian",
"id": 1,
"mean": 1.0,
"sigma": 1.0,
},
"sigma": {
"lower_limit": "-inf",
"upper_limit": "inf",
"type": "Gaussian",
"id": 2,
"mean": 1.0,
"sigma": 1.0,
},
},
}
def test_add_recursive(fit):
assert fit.jsons["directory.example"] == {
"hello": "world",
}
|
rhayes777REPO_NAMEPyAutoFitPATH_START.@PyAutoFit_extracted@PyAutoFit-main@test_autofit@aggregator@test_scrape.py@.PATH_END.py
|
{
"filename": "BADASS3_config_test_example-checkpoint.ipynb",
"repo_name": "remingtonsexton/BADASS3",
"repo_path": "BADASS3_extracted/BADASS3-master/example_notebooks/.ipynb_checkpoints/BADASS3_config_test_example-checkpoint.ipynb",
"type": "Jupyter Notebook"
}
|
## Bayesian AGN Decomposition Analysis for SDSS Spectra (BADASS)
### Single Spectrum Configuration Test Example
This notebook shows how to perform configuration testing. Configuration testing is different from line testing. Line testing performs tests on individual lines, whereas configurations can be multiple different lines simultaenously. As a result, configuration testing requires the user to explicitly define which lines to test and the order to test them in. BADASS will *not* discriminate between simple and complex models, but instead performs the fits in the order the user supplies them, and then will choose the best one. It is up to the user to order the configurations in increasing complexity if the user wishes to test in such a way similar to the line test.
In this example, we showcase how the configuration test is best utilized, and where the line test would fail. Here we determine the best configuration to fit to the superposition of broad and narrow lines for the H$\alpha$/[NII] region. The line test would fail in this regard because the line test cannot test broad and narrow lines simultaenously.
#### Remington O. Sexton$^{1}$, Sara M. Doan$^{2}$, William Matzko$^{2}$ Michael A. Reefe$^{2}$,
$^{1}$United States Naval Observatory, $^{2}$George Mason University
```python
import glob
import time
import natsort
from IPython.display import clear_output
import os
import sys
import psutil
import pathlib
import natsort
# Import BADASS here
BADASS_DIR = pathlib.Path(os.getcwd()).resolve().parent
sys.path.insert(1,str(BADASS_DIR))
import badass as badass
import badass_check_input
import badass_config_options
from IPython.display import display, HTML
display(HTML("<style>.container { width:85% !important; }</style>"))
```
### BADASS Options
```python
options_file = "badass_config_options.py"
if os.path.exists(options_file):
print("\n Options file %s found.\n" % (options_file))
```
### Run BADASS on a single spectrum
The following is shows how to fit single SDSS spectra.
#### Directory Structure
```python
nobj = 2 # Object in the spec_dir list
########################## Directory Structure #################################
spec_dir = '../example_spectra/' # folder with spectra in it
# Get full list of spectrum folders; these will be the working directories
spec_loc = natsort.natsorted( glob.glob(spec_dir+'*') )[nobj]
################################################################################
print(len(spec_loc))
print(spec_loc)
```
#### Choose Spectrum
```python
file = glob.glob(spec_loc+'/*.fits')[0] # Get name of FITS spectra file
print(file)
```
#### Run IRSA Dust Query
To correct for Galactic extinction. This only needs to be done once so that the data is stored locally.
```python
badass_check_input.fetch_IRSA_dust(spec_loc)
```
#### Run
```python
import numpy as np
import importlib
importlib.reload(badass)
importlib.reload(badass_config_options)
if 1:
# for i in range(1):
# np.random.seed()
# print("\n---------------------------------------------------------")
# print(" Begin Test %d of %d" % (i+1,10))
# print("---------------------------------------------------------")
# Call the main function in BADASS
badass.run_BADASS(pathlib.Path(file),
options_file = options_file,
)
#
```
```python
```
```python
```
|
remingtonsextonREPO_NAMEBADASS3PATH_START.@BADASS3_extracted@BADASS3-master@example_notebooks@.ipynb_checkpoints@BADASS3_config_test_example-checkpoint.ipynb@.PATH_END.py
|
{
"filename": "conviqt.ipynb",
"repo_name": "hpc4cmb/toast",
"repo_path": "toast_extracted/toast-main/tutorial/04_Simulated_Instrument_Signal/conviqt.ipynb",
"type": "Jupyter Notebook"
}
|
# $4\pi$ beam convolution
TOAST provides an interface, `OpSimConviqt`, to the spherical harmonic convolution library, `libconviqt`. It was developed by Gary Prezeau and Martin Reinecke and described in
```
G. Prézeau and M. Reinecke:
Algorithm for the Evaluation of Reduced Wigner Matrices,
APJS 190 (2010) 267
```
[arXiv:1002.1050](https://arxiv.org/abs/1002.1050). This particular implementation of the algorithm is available at https://github.com/hpc4cmb/libconviqt.
```python
# Load common tools for all lessons
import sys
sys.path.insert(0, "..")
from lesson_tools import (
fake_focalplane
)
# Capture C++ output in the jupyter cells
%reload_ext wurlitzer
```
## Method
`libconviqt` takes in spherical harmonic expansions of the beam and the sky and then synthesizes TOD samples at sample positions in the proper orientation. For efficiency, the sky is distributed as isolatitude rings and then each process gets the detector samples that fall on their rings. The calculation itself has two steps, first `conviqt` builds a 3D interpolator of the beam-convolved sky on a grid of $(\theta, \phi, \psi)$ and then the detector samples are interpolated from the grid. Finally the samples are communited back to the processes that own them.
Typically the interpolation step dominates but if there are few detector samples and the sky and beam expansion orders are high, it is possible that building the interpolator is more expensive.
## Example
In this section we create a TOAST data object with simulated signal and noise and process the data into hit maps, pixels noise matrices and signal maps.
```python
import toast
import toast.todmap
import toast.pipeline_tools
from toast.mpi import MPI
import numpy as np
import matplotlib.pyplot as plt
mpiworld, procs, rank = toast.mpi.get_world()
comm = toast.mpi.Comm(mpiworld)
# A pipeline would create the args object with argparse
class args:
sample_rate = 10 # Hz
hwp_rpm = None
hwp_step_deg = None
hwp_step_time_s = None
spin_period_min = 1 # 10
spin_angle_deg = 20 # 30
prec_period_min = 100 # 50
prec_angle_deg = 30 # 65
coord = "E"
nside = 64
nnz = 3
outdir = "maps"
sky_file = "slm.fits"
beam_file = "blm.fits"
# Create a fake focalplane, we could also load one from file.
# The Focalplane class interprets the focalplane dictionary
# created by fake_focalplane() but it can also load the information
# from file.
focalplane = fake_focalplane(samplerate=args.sample_rate, fknee=0.1, alpha=2)
detectors = sorted(focalplane.keys())
detquats = {}
for d in detectors:
detquats[d] = focalplane[d]["quat"]
nsample = 100000
start_sample = 0
start_time = 0
iobs = 0
tod = toast.todmap.TODSatellite(
comm.comm_group,
detquats,
nsample,
coord=args.coord,
firstsamp=start_sample,
firsttime=start_time,
rate=args.sample_rate,
spinperiod=args.spin_period_min,
spinangle=args.spin_angle_deg,
precperiod=args.prec_period_min,
precangle=args.prec_angle_deg,
detranks=comm.group_size,
hwprpm=args.hwp_rpm,
hwpstep=args.hwp_step_deg,
hwpsteptime=args.hwp_step_time_s,
)
# Constantly slewing precession axis
precquat = np.empty(4 * tod.local_samples[1], dtype=np.float64).reshape((-1, 4))
toast.todmap.slew_precession_axis(
precquat,
firstsamp=start_sample + tod.local_samples[0],
samplerate=args.sample_rate,
degday=360.0 / 365.25,
)
tod.set_prec_axis(qprec=precquat)
noise = toast.pipeline_tools.get_analytic_noise(args, comm, focalplane)
obs = {}
obs["name"] = "science_{:05d}".format(iobs)
obs["tod"] = tod
obs["intervals"] = None
obs["baselines"] = None
obs["noise"] = noise
obs["id"] = iobs
# Conviqt requires at least minimal focal plane information to be present in the observation
obs["focalplane"] = toast.pipeline_tools.Focalplane(focalplane)
"""
for det in tod.local_dets:
obs["focalplane"][det] = {
"epsilon" : focalplane[det]["epsilon"],
}
if det.endswith("A"):
obs["focalplane"][det]["psi_pol_deg"] = 0,
elif det.endswith("B"):
obs["focalplane"][det]["psi_pol_deg"] = 90,
"""
data = toast.Data(comm)
data.obs.append(obs)
```
### Create a high resolution point source map to convolve with the beam
```python
import healpy as hp
import numpy as np
nside_high = 1024
npix_high = 12 * nside_high ** 2
pointsource_map = np.zeros([3, npix_high])
coords = []
for lon in np.linspace(0, 360, 9, endpoint=False):
for lat in np.linspace(-90, 90, 7):
pix = hp.ang2pix(nside_high, lon, lat, lonlat=True)
# Add a completely unpolarized source and see if beam asymmetries manufacture polarization
pointsource_map[0, pix] = 1
coords.append((lon, lat))
coords = np.vstack(coords).T
hp.mollview(np.zeros(12), title="Input signal", cmap="coolwarm")
hp.projplot(np.pi/2 - np.radians(coords[1]), np.radians(coords[0]), 'o')
lmax_high = nside_high * 2
cl, alm = hp.anafast(pointsource_map, lmax=lmax_high, iter=0, alm=True)
hp.write_map("sim_sources_map.fits", hp.reorder(pointsource_map, r2n=True), nest=True, overwrite=True)
hp.write_alm(args.sky_file, alm, overwrite=True)
```
### Create asymmetric beam
```python
beam_map = np.zeros([3, npix_high])
x, y, z = hp.pix2vec(nside_high, np.arange(npix_high))
```
```python
xvar = .01
yvar = 5 * xvar
beam = np.exp(-(x ** 2 / xvar + y ** 2 / yvar))
beam[z < 0] = 0
hp.mollview(beam, cmap="coolwarm", rot=[0, 90])
beam_map = np.zeros([3, npix_high])
beam_map[0] = beam
beam_map[1] = beam
bl, blm = hp.anafast(beam_map, lmax=lmax_high, iter=0, alm=True)
hp.write_alm(args.beam_file, blm, overwrite=True)
```
### Now simulate sky signal
```python
import toast
toast.todmap.OpPointingHpix(nside=args.nside, nest=True, mode="IQU").exec(data)
```
```python
npix = 12 * args.nside ** 2
hitmap = np.zeros(npix)
tod = data.obs[0]["tod"]
for det in tod.local_dets:
pixels = tod.cache.reference("pixels_{}".format(det))
hitmap[pixels] = 1
hitmap[hitmap == 0] = hp.UNSEEN
hp.mollview(hitmap, nest=True, title="all hit pixels", cbar=False)
hp.graticule(22.5, verbose=False)
```
```python
name = "signal"
toast.tod.OpCacheClear(name).exec(data)
conviqt = toast.todmap.OpSimConviqt(
comm.comm_rank,
args.sky_file,
args.beam_file,
lmax=512, # Will use maximum from file
beammmax=16, # Will use maximum from file
pol=True,
fwhm=0,
order=13,
calibrate=True,
dxx=True,
out=name,
quat_name=None,
flag_name=None,
flag_mask=255,
common_flag_name=None,
common_flag_mask=255,
apply_flags=False,
remove_monopole=False,
remove_dipole=False,
normalize_beam=True,
verbosity=1,
)
conviqt.exec(data)
```
Destripe the signal and make a map. We use the nascent TOAST mapmaker because it can be run in serial mode without MPI. The TOAST mapmaker is still significantly slower so production runs should used `libMadam`.
```python
mapmaker = toast.todmap.OpMapMaker(
nside=args.nside,
nnz=3,
name=name,
outdir=args.outdir,
outprefix="toast_test_",
baseline_length=10,
# maskfile=self.maskfile_binary,
# weightmapfile=self.maskfile_smooth,
# subharmonic_order=None,
iter_max=100,
use_noise_prior=False,
# precond_width=30,
)
mapmaker.exec(data)
```
Plot a segment of the timelines
```python
plt.figure(figsize=[12, 8])
hitmap = hp.read_map("maps/toast_test_hits.fits")
hitmap[hitmap == 0] = hp.UNSEEN
hp.mollview(hitmap, sub=[2, 2, 1], title="hits")
binmap = hp.read_map("maps/toast_test_binned.fits")
binmap[binmap == 0] = hp.UNSEEN
hp.mollview(binmap, sub=[2, 2, 2], title="binned map", cmap="coolwarm")
destriped = hp.read_map("maps/toast_test_destriped.fits")
destriped[destriped == 0] = hp.UNSEEN
hp.mollview(destriped, sub=[2, 2, 3], title="destriped map", cmap="coolwarm")
inmap = hp.ud_grade(hp.read_map("sim_sources_map.fits"), args.nside)
inmap[hitmap == hp.UNSEEN] = hp.UNSEEN
hp.mollview(inmap, sub=[2, 2, 4], title="input map", cmap="coolwarm")
```
## Exercises
- Plot the polarization of the simulated signal above
- Modify the scan strategy so that the beam elongation is more visible
```python
```
|
hpc4cmbREPO_NAMEtoastPATH_START.@toast_extracted@toast-main@tutorial@04_Simulated_Instrument_Signal@conviqt.ipynb@.PATH_END.py
|
{
"filename": "test_embeddings.py",
"repo_name": "langchain-ai/langchain",
"repo_path": "langchain_extracted/langchain-master/libs/cli/langchain_cli/integration_template/tests/integration_tests/test_embeddings.py",
"type": "Python"
}
|
"""Test __ModuleName__ embeddings."""
from typing import Type
from __module_name__.embeddings import __ModuleName__Embeddings
from langchain_tests.integration_tests import EmbeddingsIntegrationTests
class TestParrotLinkEmbeddingsIntegration(EmbeddingsIntegrationTests):
@property
def embeddings_class(self) -> Type[__ModuleName__Embeddings]:
return __ModuleName__Embeddings
@property
def embedding_model_params(self) -> dict:
return {"model": "nest-embed-001"}
|
langchain-aiREPO_NAMElangchainPATH_START.@langchain_extracted@langchain-master@libs@cli@langchain_cli@integration_template@tests@integration_tests@test_embeddings.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "Jammy2211/PyAutoLens",
"repo_path": "PyAutoLens_extracted/PyAutoLens-main/test_autolens/interferometer/plot/__init__.py",
"type": "Python"
}
|
Jammy2211REPO_NAMEPyAutoLensPATH_START.@PyAutoLens_extracted@PyAutoLens-main@test_autolens@interferometer@plot@__init__.py@.PATH_END.py
|
|
{
"filename": "__init__.py",
"repo_name": "NASA-Planetary-Science/sbpy",
"repo_path": "sbpy_extracted/sbpy-main/sbpy/data/tests/__init__.py",
"type": "Python"
}
|
NASA-Planetary-ScienceREPO_NAMEsbpyPATH_START.@sbpy_extracted@sbpy-main@sbpy@data@tests@__init__.py@.PATH_END.py
|
|
{
"filename": "_name.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/mesh3d/colorbar/tickformatstop/_name.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class NameValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(
self, plotly_name="name", parent_name="mesh3d.colorbar.tickformatstop", **kwargs
):
super(NameValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@mesh3d@colorbar@tickformatstop@_name.py@.PATH_END.py
|
{
"filename": "_opacitysrc.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py2/plotly/validators/funnel/marker/_opacitysrc.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class OpacitysrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="opacitysrc", parent_name="funnel.marker", **kwargs):
super(OpacitysrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
role=kwargs.pop("role", "info"),
**kwargs
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py2@plotly@validators@funnel@marker@_opacitysrc.py@.PATH_END.py
|
{
"filename": "_color.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/choroplethmapbox/colorbar/tickfont/_color.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ColorValidator(_plotly_utils.basevalidators.ColorValidator):
def __init__(
self,
plotly_name="color",
parent_name="choroplethmapbox.colorbar.tickfont",
**kwargs,
):
super(ColorValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@choroplethmapbox@colorbar@tickfont@_color.py@.PATH_END.py
|
{
"filename": "log.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/traitlets/py3/traitlets/log.py",
"type": "Python"
}
|
"""Grab the global logger instance."""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import annotations
import logging
from typing import Any
_logger: logging.Logger | logging.LoggerAdapter[Any] | None = None
def get_logger() -> logging.Logger | logging.LoggerAdapter[Any]:
"""Grab the global logger instance.
If a global Application is instantiated, grab its logger.
Otherwise, grab the root logger.
"""
global _logger # noqa: PLW0603
if _logger is None:
from .config import Application
if Application.initialized():
_logger = Application.instance().log
else:
_logger = logging.getLogger("traitlets")
# Add a NullHandler to silence warnings about not being
# initialized, per best practice for libraries.
_logger.addHandler(logging.NullHandler())
return _logger
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@traitlets@py3@traitlets@log.py@.PATH_END.py
|
{
"filename": "setup.py",
"repo_name": "rpoleski/MulensModel",
"repo_path": "MulensModel_extracted/MulensModel-master/setup.py",
"type": "Python"
}
|
from pathlib import Path
import platform
from setuptools import Extension, find_packages, setup
PROJECT_PATH = Path(__file__).resolve().parent
SOURCE_PATH = PROJECT_PATH / "source"
DATA_PATH = PROJECT_PATH / "data"
file_required = PROJECT_PATH / "requirements.txt"
with file_required.open() as file_:
install_requires = file_.read().splitlines()
data_files = [
(
str(file_required.relative_to(PROJECT_PATH)),
[str(file_required.relative_to(PROJECT_PATH))],
)
]
version = "unknown"
with Path(SOURCE_PATH / "MulensModel" / "version.py").open() as in_put:
for line_ in in_put.readlines():
if line_.startswith('__version__'):
version = line_.split()[2][1:-1]
source_VBBL = SOURCE_PATH / "VBBL"
source_AC = SOURCE_PATH / "AdaptiveContouring"
source_MM = SOURCE_PATH / "MulensModel"
source_MMmo = source_MM / "mulensobjects"
# C/C++ Extensions
kwargs = dict()
if platform.system().upper() != "WINDOWS":
kwargs['libraries'] = ["m"]
ext_AC = Extension(
"MulensModel.AdaptiveContouring", **kwargs,
sources=[str(f.relative_to(PROJECT_PATH)) for f in source_AC.glob("*.c")])
ext_VBBL = Extension(
"MulensModel.VBBL", **kwargs,
sources=[
str(f.relative_to(PROJECT_PATH)) for f in source_VBBL.glob("*.cpp")])
setup(
name='MulensModel',
version=version,
url='https://github.com/rpoleski/MulensModel',
project_urls={
'documentation': 'https://github.com/rpoleski/MulensModel'},
ext_modules=[ext_AC, ext_VBBL],
author='Radek Poleski & Jennifer Yee',
author_email='radek.poleski@gmail.com',
description='package for modeling gravitational microlensing events',
long_description='package for modeling gravitational microlensing events',
packages=find_packages(where="source"),
package_dir={"": "source"},
include_package_data=True,
data_files=data_files,
python_requires=">=3.6",
install_requires=install_requires,
)
|
rpoleskiREPO_NAMEMulensModelPATH_START.@MulensModel_extracted@MulensModel-master@setup.py@.PATH_END.py
|
{
"filename": "_x.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/surface/contours/_x.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class XValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="x", parent_name="surface.contours", **kwargs):
super(XValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "X"),
data_docs=kwargs.pop(
"data_docs",
"""
color
Sets the color of the contour lines.
end
Sets the end contour level value. Must be more
than `contours.start`
highlight
Determines whether or not contour lines about
the x dimension are highlighted on hover.
highlightcolor
Sets the color of the highlighted contour
lines.
highlightwidth
Sets the width of the highlighted contour
lines.
project
:class:`plotly.graph_objects.surface.contours.x
.Project` instance or dict with compatible
properties
show
Determines whether or not contour lines about
the x dimension are drawn.
size
Sets the step between each contour level. Must
be positive.
start
Sets the starting contour level value. Must be
less than `contours.end`
usecolormap
An alternate to "color". Determines whether or
not the contour lines are colored using the
trace "colorscale".
width
Sets the width of the contour lines.
""",
),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@surface@contours@_x.py@.PATH_END.py
|
{
"filename": "test_hotregions.py",
"repo_name": "xpsi-group/xpsi",
"repo_path": "xpsi_extracted/xpsi-main/xpsi/tests/test_hotregions.py",
"type": "Python"
}
|
import numpy as np
from xpsi.HotRegion import HotRegion
from xpsi.HotRegions import HotRegions
import pytest
class TestHotRegions(object):
def setup_class(cls):
cls.super_colatitude_bounds = (None, None)
cls.super_radius_bounds = (None, None)
cls.phase_shift_bounds = (0.0, 0.1)
cls.super_temperature_bounds = (5.1, 6.8)
cls.bounds = {'super_colatitude': cls.super_colatitude_bounds,
'super_radius': cls.super_radius_bounds,
'phase_shift': cls.phase_shift_bounds,
'super_temperature': cls.super_temperature_bounds,}
cls.values = {}
cls.primary = HotRegion(cls.bounds, cls.values, prefix='p')
cls.secondary = HotRegion(cls.bounds, cls.values, prefix='s')
def test_hotregions_class_initializes(self):
hrs = HotRegions((self.primary, self.secondary))
def test_hotregions_breaks_with_one_hotregion(self):
with pytest.raises(ValueError):
hrs = HotRegions((self.primary))
|
xpsi-groupREPO_NAMExpsiPATH_START.@xpsi_extracted@xpsi-main@xpsi@tests@test_hotregions.py@.PATH_END.py
|
{
"filename": "screening_util.py",
"repo_name": "pynucastro/pynucastro",
"repo_path": "pynucastro_extracted/pynucastro-main/pynucastro/screening/screening_util.py",
"type": "Python"
}
|
"""Some helper functions for determining which rates need screening"""
from pynucastro.nucdata import Nucleus
from pynucastro.rates import ApproximateRate
class ScreeningPair:
"""a pair of nuclei that will have rate screening applied. We store a
list of all rates that match this pair of nuclei"""
def __init__(self, name, nuc1, nuc2, rate=None):
self.name = name
self.n1 = nuc1
self.n2 = nuc2
if rate is None:
self.rates = []
else:
self.rates = [rate]
def add_rate(self, rate):
if rate not in self.rates:
self.rates.append(rate)
def __str__(self):
ostr = f"screening for {self.n1} + {self.n2}\n"
ostr += "rates:\n"
for r in self.rates:
ostr += f" {r}\n"
return ostr
def __eq__(self, other):
"""all we care about is whether the names are the same -- that conveys
what the reaction is"""
return self.name == other.name
def get_screening_map(rates, *, symmetric_screening=False):
"""a screening map is just a list of ScreeningPair objects
containing the information about nuclei pairs for screening If
symmetric_screening=True, then for reverse rates, we screen using
the forward rate nuclei (assuming that we got here via detailed
balance).
"""
screening_map = []
# we need to consider the child rates that come with ApproximateRate
all_rates = []
for r in rates:
if isinstance(r, ApproximateRate):
all_rates += r.get_child_rates()
else:
all_rates.append(r)
for r in all_rates:
screen_nuclei = r.ion_screen
if symmetric_screening:
screen_nuclei = r.symmetric_screen
# screen_nuclei may be [] if it is a decay, gamma-capture, or
# neutron-capture
if not screen_nuclei:
continue
nucs = "_".join([str(q) for q in screen_nuclei])
scr = [q for q in screening_map if q.name == nucs]
assert len(scr) <= 1
if scr:
# we already have the reactants in our map, so we
# will already be doing the screening factors.
# Just append this new rate to the list we are
# keeping of the rates where this screening is
# needed -- if the rate is already in the list, then
# this is a no-op
scr[0].add_rate(r)
# if we got here because nuc == "He4_He4_He4",
# then we also have to add to "He4_He4_He4_dummy"
if nucs == "He4_He4_He4":
scr2 = [q for q in screening_map if q.name == nucs + "_dummy"]
assert len(scr2) == 1
scr2[0].add_rate(r)
else:
# we handle 3-alpha specially -- we actually need
# 2 screening factors for it
if nucs == "He4_He4_He4":
# he4 + he4
scr1 = ScreeningPair(nucs, screen_nuclei[0], screen_nuclei[1], r)
# he4 + be8
be8 = Nucleus("Be8", dummy=True)
scr2 = ScreeningPair(nucs + "_dummy", screen_nuclei[2], be8, r)
screening_map.append(scr1)
screening_map.append(scr2)
else:
scr1 = ScreeningPair(nucs, screen_nuclei[0], screen_nuclei[1], r)
screening_map.append(scr1)
return screening_map
|
pynucastroREPO_NAMEpynucastroPATH_START.@pynucastro_extracted@pynucastro-main@pynucastro@screening@screening_util.py@.PATH_END.py
|
{
"filename": "_r.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/barpolar/_r.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class RValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="r", parent_name="barpolar", **kwargs):
super(RValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc+clearAxisTypes"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@barpolar@_r.py@.PATH_END.py
|
{
"filename": "_showticksuffix.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/histogram2d/colorbar/_showticksuffix.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class ShowticksuffixValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(
self, plotly_name="showticksuffix", parent_name="histogram2d.colorbar", **kwargs
):
super(ShowticksuffixValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
values=kwargs.pop("values", ["all", "first", "last", "none"]),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@histogram2d@colorbar@_showticksuffix.py@.PATH_END.py
|
{
"filename": "utils.py",
"repo_name": "astropy/astropy",
"repo_path": "astropy_extracted/astropy-main/astropy/units/utils.py",
"type": "Python"
}
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Miscellaneous utilities for `astropy.units`.
None of the functions in the module are meant for use outside of the
package.
"""
from __future__ import annotations
import io
import re
from fractions import Fraction
from typing import TYPE_CHECKING, overload
import numpy as np
from numpy import finfo
from .errors import UnitScaleError
if TYPE_CHECKING:
from collections.abc import Generator, Sequence
from typing import Literal, SupportsFloat, TypeVar
from numpy.typing import NDArray
from .core import UnitBase
from .quantity import Quantity
from .typing import Complex, Real, UnitPower, UnitScale
DType = TypeVar("DType", bound=np.generic)
FloatLike = TypeVar("FloatLike", bound=SupportsFloat)
_float_finfo = finfo(float)
# take float here to ensure comparison with another float is fast
# give a little margin since often multiple calculations happened
_JUST_BELOW_UNITY = float(1.0 - 4.0 * _float_finfo.epsneg)
_JUST_ABOVE_UNITY = float(1.0 + 4.0 * _float_finfo.eps)
def _get_first_sentence(s: str) -> str:
"""
Get the first sentence from a string and remove any carriage
returns.
"""
x = re.match(r".*?\S\.\s", s)
if x is not None:
s = x.group(0)
return s.replace("\n", " ")
def _iter_unit_summary(
namespace: dict[str, object],
) -> Generator[tuple[UnitBase, str, str, str, Literal["Yes", "No"]], None, None]:
"""
Generates the ``(unit, doc, represents, aliases, prefixes)``
tuple used to format the unit summary docs in `generate_unit_summary`.
"""
from . import core
# Get all of the units, and keep track of which ones have SI
# prefixes
units = []
has_prefixes = set()
for key, val in namespace.items():
# Skip non-unit items
if not isinstance(val, core.UnitBase):
continue
# Skip aliases
if key != val.name:
continue
if isinstance(val, core.PrefixUnit):
# This will return the root unit that is scaled by the prefix
# attached to it
has_prefixes.add(val._represents.bases[0].name)
else:
units.append(val)
# Sort alphabetically, case insensitive
units.sort(key=lambda x: x.name.lower())
for unit in units:
doc = _get_first_sentence(unit.__doc__).strip()
represents = ""
if isinstance(unit, core.Unit):
represents = f":math:`{unit._represents.to_string('latex')[1:-1]}`"
aliases = ", ".join(f"``{x}``" for x in unit.aliases)
yield (
unit,
doc,
represents,
aliases,
"Yes" if unit.name in has_prefixes else "No",
)
def generate_unit_summary(namespace: dict[str, object]) -> str:
"""
Generates a summary of units from a given namespace. This is used
to generate the docstring for the modules that define the actual
units.
Parameters
----------
namespace : dict
A namespace containing units.
Returns
-------
docstring : str
A docstring containing a summary table of the units.
"""
docstring = io.StringIO()
docstring.write(
"""
.. list-table:: Available Units
:header-rows: 1
:widths: 10 20 20 20 1
* - Unit
- Description
- Represents
- Aliases
- SI Prefixes
"""
)
template = """
* - ``{}``
- {}
- {}
- {}
- {}
"""
for unit_summary in _iter_unit_summary(namespace):
docstring.write(template.format(*unit_summary))
return docstring.getvalue()
def generate_prefixonly_unit_summary(namespace: dict[str, object]) -> str:
"""
Generates table entries for units in a namespace that are just prefixes
without the base unit. Note that this is intended to be used *after*
`generate_unit_summary` and therefore does not include the table header.
Parameters
----------
namespace : dict
A namespace containing units that are prefixes but do *not* have the
base unit in their namespace.
Returns
-------
docstring : str
A docstring containing a summary table of the units.
"""
from . import PrefixUnit
faux_namespace = {}
for unit in namespace.values():
if isinstance(unit, PrefixUnit):
base_unit = unit.represents.bases[0]
faux_namespace[base_unit.name] = base_unit
docstring = io.StringIO()
template = """
* - Prefixes for ``{}``
- {} prefixes
- {}
- {}
- Only
"""
for unit_summary in _iter_unit_summary(faux_namespace):
docstring.write(template.format(*unit_summary))
return docstring.getvalue()
def is_effectively_unity(value: Complex) -> bool:
# value is *almost* always real, except, e.g., for u.mag**0.5, when
# it will be complex. Use try/except to ensure normal case is fast
try:
return _JUST_BELOW_UNITY <= value <= _JUST_ABOVE_UNITY
except TypeError: # value is complex
return (
_JUST_BELOW_UNITY <= value.real <= _JUST_ABOVE_UNITY
and _JUST_BELOW_UNITY <= value.imag + 1 <= _JUST_ABOVE_UNITY
)
def sanitize_scale_type(scale: Complex) -> UnitScale:
if not scale:
raise UnitScaleError("cannot create a unit with a scale of 0.")
# Maximum speed for regular case where scale is a float.
if scale.__class__ is float:
return scale
# We cannot have numpy scalars, since they don't autoconvert to
# complex if necessary. They are also slower.
return scale.item() if isinstance(scale, np.number) else scale
def sanitize_scale_value(scale: UnitScale) -> UnitScale:
if is_effectively_unity(scale):
return 1.0
# All classes that scale can be (int, float, complex, Fraction)
# have an "imag" attribute.
if scale.imag:
if abs(scale.real) > abs(scale.imag):
if is_effectively_unity(scale.imag / scale.real + 1):
return scale.real
elif is_effectively_unity(scale.real / scale.imag + 1):
return complex(0.0, scale.imag)
return scale
else:
return scale.real
def maybe_simple_fraction(p: Real, max_denominator: int = 100) -> UnitPower:
"""Fraction very close to x with denominator at most max_denominator.
The fraction has to be such that fraction/x is unity to within 4 ulp.
If such a fraction does not exist, returns the float number.
The algorithm is that of `fractions.Fraction.limit_denominator`, but
sped up by not creating a fraction to start with.
If the input is zero, an integer or `fractions.Fraction`, just return it.
"""
if p.__class__ is int or p.__class__ is Fraction:
return p
if p == 0:
return 0 # p might be some numpy number, but we want a Python int
n, d = float(p).as_integer_ratio()
a = n // d
# Normally, start with 0,1 and 1,0; here we have applied first iteration.
n0, d0 = 1, 0
n1, d1 = a, 1
while d1 <= max_denominator:
if _JUST_BELOW_UNITY <= n1 / (d1 * p) <= _JUST_ABOVE_UNITY:
return Fraction(n1, d1)
n, d = d, n - a * d
a = n // d
n0, n1 = n1, n0 + a * n1
d0, d1 = d1, d0 + a * d1
return float(p)
def sanitize_power(p: Real) -> UnitPower:
"""Convert the power to a float, an integer, or a Fraction.
If a fractional power can be represented exactly as a floating point
number, convert it to a float, to make the math much faster; otherwise,
retain it as a `fractions.Fraction` object to avoid losing precision.
Conversely, if the value is indistinguishable from a rational number with a
low-numbered denominator, convert to a Fraction object.
If a power can be represented as an integer, use that.
Parameters
----------
p : float, int, Rational, Fraction
Power to be converted.
"""
if p.__class__ is int:
return p
denom = getattr(p, "denominator", None)
if denom is None:
# This returns either a (simple) Fraction or the same float.
p = maybe_simple_fraction(p)
# If still a float, nothing more to be done.
if isinstance(p, float):
return p
# Otherwise, check for simplifications.
denom = p.denominator
if denom == 1:
return int(p.numerator)
elif (denom & (denom - 1)) == 0:
# Above is a bit-twiddling hack to see if denom is a power of two.
# If so, float does not lose precision and will speed things up.
p = float(p)
return p
def resolve_fractions(a: Real, b: Real) -> tuple[Real, Real]:
"""
If either input is a Fraction, convert the other to a Fraction
(at least if it does not have a ridiculous denominator).
This ensures that any operation involving a Fraction will use
rational arithmetic and preserve precision.
"""
# We short-circuit on the most common cases of int and float, since
# isinstance(a, Fraction) is very slow for any non-Fraction instances.
a_is_fraction = (
a.__class__ is not int and a.__class__ is not float and isinstance(a, Fraction)
)
b_is_fraction = (
b.__class__ is not int and b.__class__ is not float and isinstance(b, Fraction)
)
if a_is_fraction and not b_is_fraction:
b = maybe_simple_fraction(b)
elif not a_is_fraction and b_is_fraction:
a = maybe_simple_fraction(a)
return a, b
@overload
def quantity_asanyarray(a: Sequence[int]) -> NDArray[int]: ...
@overload
def quantity_asanyarray(a: Sequence[int], dtype: DType) -> NDArray[DType]: ...
@overload
def quantity_asanyarray(a: Sequence[Quantity]) -> Quantity: ...
def quantity_asanyarray(
a: Sequence[int] | Sequence[Quantity], dtype: DType | None = None
) -> NDArray[int] | NDArray[DType] | Quantity:
from .quantity import Quantity
if (
not isinstance(a, np.ndarray)
and not np.isscalar(a)
and any(isinstance(x, Quantity) for x in a)
):
return Quantity(a, dtype=dtype)
else:
# skip over some dtype deprecation.
dtype = np.float64 if dtype is np.inexact else dtype
return np.asanyarray(a, dtype=dtype)
|
astropyREPO_NAMEastropyPATH_START.@astropy_extracted@astropy-main@astropy@units@utils.py@.PATH_END.py
|
{
"filename": "sourcepairing.py",
"repo_name": "juanep97/iop4",
"repo_path": "iop4_extracted/iop4-main/iop4lib/utils/sourcepairing.py",
"type": "Python"
}
|
import iop4lib.config
iop4conf = iop4lib.Config(config_db=False)
import numpy as np
import scipy as sp
import matplotlib as mplt
import matplotlib.pyplot as plt
import itertools
from scipy.spatial import cKDTree
def get_pairs_d(pos, d0=None,
d_eps=None, d_min=None, d_max=None,
bins=None, hist_range=None, redf=None, doplot=False, ax=None):
"""
From a list of positions, finds the most common distance between them (d0),
and pairs the points that are at such distance. If d0 is given, it is used instead of computing it.
The pairs are ordered such that for pair (p1, p2), p1 is always to the left (smaller x value) than p2.
"""
d_eps = d_eps or 0.8
d_min = d_min or 0
d_max = d_max or 60
if bins is None:
if redf is not None:
bins = int( 0.75 * max(redf.data.shape) )
else:
raise ValueError("bins must be specified if redf is not given")
if hist_range is None:
if redf is not None:
hist_range = (0, min(redf.data.shape))
else:
raise ValueError("hist_range must be specified if redf is not given")
if pos is None or len(pos) < 2:
return [], [], None, None
pairs = list(itertools.combinations(pos, 2))
distances = [np.linalg.norm(p1-p2) for p1,p2 in pairs]
if d0 is None:
hist, edges = np.histogram(distances, bins=bins, range=hist_range)
centers = (edges[:-1]+edges[1:])/2
idx = (d_min <= centers) & (centers <= d_max)
idx_max = np.argmax(hist[idx])
d0 = centers[idx][idx_max]
paired = [(p1,p2) for p1,p2 in pairs if np.abs(np.linalg.norm(p1-p2)-d0) < d_eps]
paired = [[p1,p2] if p1[0]>p2[0] else [p2,p1] for (p1,p2) in paired]
if len(paired) == 0:
return [], [], d0, None
list1, list2 = list(zip(*paired))
pos1 = np.array(list1)
pos2 = np.array(list2)
disp_sign = np.mean(pos2-pos1, axis=0)
# Plotting (optional)
if doplot:
if ax is None:
ax = plt.gca()
cnts, edges, bars = ax.hist(distances, bins=bins, range=hist_range)
ax.axvline(x=d0, color='r', linestyle='--', linewidth=1, alpha=0.5)
bars[np.argmax(cnts)].set_facecolor('red')
return list1, list2, d0, disp_sign
def get_pairs_dxy(pos, disp=None,
dx_eps=None, dy_eps=None, d_eps=None, dx_min=None, dx_max=None, dy_min=None, dy_max=None, d_min=None,
bins=None, hist_range=None, redf=None, doplot=False, axs=None, fig=None):
"""
From a list of positions, finds the most common distances between them in both x and y axes (disp),
and pairs the points that are at such distances.
If disp is given, it is used as the most common distance in both axes instead of computing it.
The pairs are ordered such that for pair (p1, p2), p1 is always to the left (smaller x value) than p2.
Note: this function is similar to get_pairs_d(), but finds the most common distances both in x and y axes.
"""
dx_eps = dx_eps or 0.8
dy_eps = dy_eps or 0.8
d_eps = d_eps or 0.8
dx_min = dx_min or 0
dx_max = dx_max or 60
dy_min = dy_min or 0
dy_max = dy_max or 60
d_min = d_min or 0
if pos is None or len(pos) < 2:
return [], [], None, None
pairs = list(itertools.combinations(pos, 2))
if disp is None:
if bins is None:
if redf is not None:
bins = int( 0.75 * max(redf.data.shape) )
else:
raise ValueError("bins must be specified if redf is not given")
if hist_range is None:
if redf is not None:
hist_range = (0, min(redf.data.shape))
else:
raise ValueError("hist_range must be specified if redf is not given")
disp = list()
for i, d_min, d_max in zip([0, 1], [dx_min, dy_min], [dx_max, dy_max]): # for each axis
distances = [abs(p1[i]-p2[i]) for p1,p2 in pairs]
hist, edges = np.histogram(distances, bins=bins, range=hist_range)
centers = (edges[:-1]+edges[1:])/2
idx = (d_min <= centers) & (centers <= d_max)
idx_max = np.argmax(hist[idx])
d0 = centers[idx][idx_max]
disp.append(d0)
paired = [(p1,p2) for p1,p2 in pairs if ( abs( abs( p1[0] - p2[0] ) - disp[0] ) < dx_eps and abs( (abs( p1[1] - p2[1] ) - disp[1] ) ) < dy_eps )]
paired = [[p1,p2] if p1[0]>p2[0] else [p2,p1] for (p1,p2) in paired]
if len(paired) == 0:
return [], [], disp, None
list1, list2 = list(zip(*paired))
pos1 = np.array(list1)
pos2 = np.array(list2)
disp_sign = np.mean(pos2-pos1, axis=0)
# Plotting (optional)
if doplot:
if axs is None:
if fig is None:
fig = plt.gcf()
axs = fig.subplots(nrows=2, sharex=True)
if len(axs) == 1:
ax = axs[0]
for i, color in zip([0, 1], ['r','b']):
distances = [abs(p1[i]-p2[i]) for p1,p2 in pairs]
cnts, edges, bars = ax.hist(distances, bins=bins, range=hist_range, alpha=0.3)
ax.axvline(x=disp[i], color=color, linestyle='--', linewidth=1, alpha=0.3)
bars[np.argmax(cnts)].set_facecolor('red')
elif len(axs) == 2:
for i in [0, 1]:
distances = [abs(p1[i]-p2[i]) for p1,p2 in pairs]
cnts, edges, bars = axs[i].hist(distances, bins=bins, range=hist_range)
axs[i].axvline(x=disp[i], color='k', linestyle='--', linewidth=1, alpha=0.5)
bars[np.argmax(cnts)].set_facecolor('red')
else:
raise ValueError("axs must be a list of length 1 or 2")
return list1, list2, disp, disp_sign
def get_best_pairs(list1, list2, disp_sign, dist_err=None, disp_sign_err=None):
"""
From two lists which correspond to paired points, if there are points participating in more
than one pair, return only the best pair according to displacement disp_sign.
If dist_err (scalar) or disp_sign_err are given, only pairs whose points are displace by disp_sign within
a distance of dist_err (1d) or within disp_sign_err (2d) are considered.
Example
-------
An example of pair finding where first we detect the sources and find the pairs without a priori knowledge of the displacement.
```
# Detect sources in the sky (pos_seg is a list of (x,y) positions)
# Define some parameters
d_eps = 0.8
bins = int( 0.75 * max(redf.data.shape) )
hist_range = (0, min(redf.data.shape))
## Find pairs of sources matching in scalar distance, using the most common distance between sources
seg1, seg2, seg_d0, seg_disp_sign = get_pairs_d(pos_seg, d_eps=d_eps, bins=bins, hist_range=hist_range)
## Get only the best pairs, according to the displacement sign
seg1_best, seg2_best, seg_disp_best, seg_disp_sign_best = get_best_pairs(seg1, seg2, seg_disp_sign)
## Find pairs of sources matching in 2d position, using the most common displacement between sources
seg1xy, seg2xy, seg_disp_xy, seg_disp_sign_xy = get_pairs_dxy(pos_seg, d_eps=d_eps, bins=bins, hist_range=hist_range)
## Get only the best pairs, according to the displacement sign
seg1xy_best, seg2xy_best, seg_disp_xy_best, seg_disp_sign_xy_best = get_best_pairs(seg1xy, seg2xy, seg_disp_sign_xy)
```
Alternatively, using a priori knowledge of the displacement, we can find the pairs directly:
```
# Get the average displacement between pairs and its std from already calibrated sources
disp_mean = np.mean([redf.astrometry_info[-1]['seg_disp_sign_xy'] for redf in ReducedFit.objects.filter(flags__has=ReducedFit.FLAGS.BUILT_REDUCED, obsmode="POLARIMETRY") if 'seg_disp_sign_xy' in redf.astrometry_info[-1] and isinstance(redf.astrometry_info[-1]['seg_disp_sign_xy'], np.ndarray)], axis=0)
disp_std = np.std([redf.astrometry_info[-1]['seg_disp_sign_xy'] for redf in ReducedFit.objects.filter(flags__has=ReducedFit.FLAGS.BUILT_REDUCED, obsmode="POLARIMETRY") if 'seg_disp_sign_xy' in redf.astrometry_info[-1] and isinstance(redf.astrometry_info[-1]['seg_disp_sign_xy'], np.ndarray)], axis=0)
# Detect sources (pos_seg is a list of (x,y) positions)
# Directly find the best pairs with the mean disp_mmmean and disp_std
seg1xy_best, seg2xy_best, seg_disp_xy_best, seg_disp_sign_xy_best = get_best_pairs(pos_seg, pos_seg, disp_mean, disp_sign_err=5*disp_std)
```
Parameters
----------
list1, list2 : list, list
paired lists of points
disp_sign: (float, float)
the displacement between points
Returns
-------
list1, list2 : list, list
paired list of points
d0_new : float
recomputed points
disp_sign_new :
recomputed displacement
"""
if list1 is None or len(list1) < 2:
return [], [], None, None
set1 = {tuple(p1) for p1 in list1}
set2 = {tuple(p2) for p2 in list2}
def get_best_companion(p,pL):
p = np.array(p) ## so now p+disp_sign-x is an array even if disp_sign and x are tuples
return min(pL, key=lambda x: np.abs(np.linalg.norm(p+disp_sign-x)))
paired = [(p1, get_best_companion(p1,set2)) for p1 in set1]
if dist_err is not None:
paired = [pair for pair in paired if np.linalg.norm(pair[0]+disp_sign-pair[1]) < dist_err]
if disp_sign_err is not None:
paired = [pair for pair in paired if np.abs(pair[0][0]+disp_sign[0]-pair[1][0]) < disp_sign_err[0]]
paired = [pair for pair in paired if np.abs(pair[0][1]+disp_sign[1]-pair[1][1]) < disp_sign_err[1]]
if len(paired) == 0:
return [], [], None, None
list1, list2 = list(zip(*paired))
pos1 = np.array(list1)
pos2 = np.array(list2)
disp_sign_new = np.mean(pos2-pos1, axis=0)
d0_new = np.linalg.norm(disp_sign_new)
return list1, list2, d0_new, disp_sign_new
|
juanep97REPO_NAMEiop4PATH_START.@iop4_extracted@iop4-main@iop4lib@utils@sourcepairing.py@.PATH_END.py
|
{
"filename": "ExposureTime.ipynb",
"repo_name": "rodluger/starry",
"repo_path": "starry_extracted/starry-master/notebooks/ExposureTime.ipynb",
"type": "Jupyter Notebook"
}
|
# Exposure time integration
Here we'll briefly discuss how to account for finite exposure time integration, which tends to smooth out occultation light curves. Unfortunately, there's no analytic way (that I know of) to tackle this, so the thing to do is to oversample the light curve on a fine time grid and numerically integrate over the exposure window.
```python
%matplotlib inline
```
```python
%run notebook_setup.py
```
```python
import numpy as np
import starry
starry.config.lazy = False
starry.config.quiet = True
```
## Creating a star-planet system
Let's instantiate a simple `Primary` object:
```python
star = starry.Primary(starry.Map(ydeg=0, udeg=2, amp=1.0), m=1.0, r=1.0)
```
And let's give it some limb darkening:
```python
star.map[1] = 0.40
star.map[2] = 0.26
```
Here's what that looks like:
```python
star.map.show()
```
Let's now create a featureless hot Jupiter:
```python
planet = starry.kepler.Secondary(
starry.Map(2, amp=0), m=0, r=0.1, porb=1.0, ecc=0.3, w=30, t0=0,
)
```
Now that we have a star and a planet, we can instantiate the planetary system. By default, exposure time integration is **disabled**.
```python
sys = starry.System(star, planet)
```
## Computing a transit light curve
We're ready to compute a transit light curve. Let's do this over 1000 cadences between $t=-0.1$ and $t=0.1$.
```python
# HACK: run this to pre-compile the flux method
sys.flux(0.0);
```
```python
%%time
time = np.linspace(-0.1, 0.1, 1000)
flux = sys.flux(time)
```
Cool -- `starry` computed 1,000 cadences in just a few ms. Let's check it out:
```python
plt.plot(time, flux)
plt.xlabel("time [days]")
plt.ylabel("system flux");
```
Ok, now let's enable exposure time integration. We have to instantiate a new ``System`` object for this:
```python
sys_exp = starry.System(star, planet, texp=0.01, oversample=9, order=0)
```
We passed in the three keywords controlling exposure time integration. The first is ``texp``, the length of the exposure window in ``sys_exp.time_unit`` (usually days). The second is ``oversample``, the factor by which the light curve is oversampled. The larger this number, the more accurate the model will be, at the cost of extra computation time. Finally, ``order`` controls the order of the numerical integration: ``0`` for a centered Riemann sum
(equivalent to the "resampling" procedure suggested by Kipping 2010), ``1`` for the trapezoid rule, or ``2`` for Simpson’s rule.
```python
# HACK: run this to pre-compile the flux method
sys_exp.flux(0.0);
```
Let's compute the flux (and time the computation):
```python
%%time
flux_exp = sys_exp.flux(time)
```
That was a factor of a few slower than the original evaluation, but it's not bad. Here's the comparison of the two light curves:
```python
plt.plot(time, flux, label=r"$t_{exp} = 0$")
plt.plot(time, flux_exp, label=r"$t_{exp} = 0.01$")
plt.legend()
plt.xlabel("time [days]")
plt.ylabel("system flux");
```
## Computing a phase curve
Exposure time integration also affects phase curves. Let's give the planet a random map and compute its phase curve with and without exposure time integration. We'll make the planet's rotational period really short so we can clearly tell the difference between the two:
```python
planet.map.amp = 0.1
planet.prot = 0.05
planet.map[1:, :] = 0.1 * np.random.randn(planet.map.Ny - 1)
```
```python
planet.map.show()
```
Let's grab just the planet flux (by passing ``total=False`` to the ``flux`` method and keeping only the second vector):
```python
%%time
flux = sys.flux(time, total=False)[1]
```
```python
%%time
flux_exp = sys_exp.flux(time, total=False)[1]
```
Here are the two light curves; it's clear that the finite exposure time has smoothed out the phase curve.
```python
plt.plot(time, flux, label=r"$t_{exp} = 0$")
plt.plot(time, flux_exp, label=r"$t_{exp} = 0.01$")
plt.legend()
plt.xlabel("time [days]")
plt.ylabel("planet flux");
```
|
rodlugerREPO_NAMEstarryPATH_START.@starry_extracted@starry-master@notebooks@ExposureTime.ipynb@.PATH_END.py
|
{
"filename": "create.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/lite/g3doc/api_docs/python/tflite_model_maker/recommendation/create.md",
"type": "Markdown"
}
|
page_type: reference
description: Loads data and train the model for recommendation.
<link rel="stylesheet" href="/site-assets/css/style.css">
<!-- DO NOT EDIT! Automatically generated file. -->
<div itemscope itemtype="http://developers.google.com/ReferenceObject">
<meta itemprop="name" content="tflite_model_maker.recommendation.create" />
<meta itemprop="path" content="Stable" />
</div>
# tflite_model_maker.recommendation.create
<!-- Insert buttons and diff -->
<table class="tfo-notebook-buttons tfo-api nocontent" align="left">
<td>
<a target="_blank" href="https://github.com/tensorflow/examples/blob/master/tensorflow_examples/lite/model_maker/core/task/recommendation.py#L213-L265">
<img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />
View source on GitHub
</a>
</td>
</table>
Loads data and train the model for recommendation.
<pre class="devsite-click-to-copy prettyprint lang-py tfo-signature-link">
<code>@classmethod</code>
<code>tflite_model_maker.recommendation.create(
train_data,
model_spec: <a href="../../tflite_model_maker/recommendation/ModelSpec"><code>tflite_model_maker.recommendation.ModelSpec</code></a>,
model_dir: str = None,
validation_data=None,
batch_size: int = 16,
steps_per_epoch: int = 10000,
epochs: int = 1,
learning_rate: float = 0.1,
gradient_clip_norm: float = 1.0,
shuffle: bool = True,
do_train: bool = True
)
</code></pre>
<!-- Placeholder for "Used in" -->
<!-- Tabular view -->
<table class="responsive fixed orange">
<colgroup><col width="214px"><col></colgroup>
<tr><th colspan="2"><h2 class="add-link">Args</h2></th></tr>
<tr>
<td>
`train_data`<a id="train_data"></a>
</td>
<td>
Training data.
</td>
</tr><tr>
<td>
`model_spec`<a id="model_spec"></a>
</td>
<td>
ModelSpec, Specification for the model.
</td>
</tr><tr>
<td>
`model_dir`<a id="model_dir"></a>
</td>
<td>
str, path to export model checkpoints and summaries.
</td>
</tr><tr>
<td>
`validation_data`<a id="validation_data"></a>
</td>
<td>
Validation data.
</td>
</tr><tr>
<td>
`batch_size`<a id="batch_size"></a>
</td>
<td>
Batch size for training.
</td>
</tr><tr>
<td>
`steps_per_epoch`<a id="steps_per_epoch"></a>
</td>
<td>
int, Number of step per epoch.
</td>
</tr><tr>
<td>
`epochs`<a id="epochs"></a>
</td>
<td>
int, Number of epochs for training.
</td>
</tr><tr>
<td>
`learning_rate`<a id="learning_rate"></a>
</td>
<td>
float, learning rate.
</td>
</tr><tr>
<td>
`gradient_clip_norm`<a id="gradient_clip_norm"></a>
</td>
<td>
float, clip threshold (<= 0 meaning no clip).
</td>
</tr><tr>
<td>
`shuffle`<a id="shuffle"></a>
</td>
<td>
boolean, whether the training data should be shuffled.
</td>
</tr><tr>
<td>
`do_train`<a id="do_train"></a>
</td>
<td>
boolean, whether to run training.
</td>
</tr>
</table>
<!-- Tabular view -->
<table class="responsive fixed orange">
<colgroup><col width="214px"><col></colgroup>
<tr><th colspan="2"><h2 class="add-link">Returns</h2></th></tr>
<tr class="alt">
<td colspan="2">
An instance based on Recommendation.
</td>
</tr>
</table>
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@lite@g3doc@api_docs@python@tflite_model_maker@recommendation@create.md@.PATH_END.py
|
{
"filename": "setup_chgcentre.py",
"repo_name": "IanHeywood/oxkat",
"repo_path": "oxkat_extracted/oxkat-master/waterhole/setup_chgcentre.py",
"type": "Python"
}
|
import glob
import os
import sys
def write_slurm(opfile,jobname,logfile,syscall):
f = open(opfile,'w')
f.writelines(['#!/bin/bash\n',
'#file: '+opfile+':\n',
'#SBATCH --job-name='+jobname+'\n',
'#SBATCH --time=01:00:00\n',
'#SBATCH --partition=Main\n'
'#SBATCH --ntasks=1\n',
'#SBATCH --nodes=1\n',
'#SBATCH --cpus-per-task=8\n',
'#SBATCH --mem=32GB\n',
'#SBATCH --account=b24-thunderkat-ag\n',
'#SBATCH --output='+logfile+'\n',
syscall+'\n',
'sleep 10\n'])
f.close()
def main():
CHGCENTRE_CONTAINER = '/idia/software/containers/STIMELA_IMAGES/stimela_chgcentre_1.2.0.sif'
runfile = 'submit_chgcentre_jobs.sh'
op = open(runfile,'w')
op.writelines(['#!/bin/bash\n'])
sunfile = sys.argv[1]
f = open(sunfile)
line = f.readline()
while line:
cols = line.split()
if len(cols) == 18:
scan = cols[5]
field = cols[7]
ra = cols[10]
dec = cols[11]
myms = glob.glob('*_'+field+'_scan'+scan+'.ms')
if len(myms) == 1:
myms = myms[0]
code = 'chgcen'+scan
syscall = 'singularity exec '+CHGCENTRE_CONTAINER+' '
syscall += 'chgcentre '+myms+' '+ra+' '+dec
slurm_file = 'SCRIPTS/slurm_'+code+'.sh'
log_file = 'LOGS/slurm_'+code+'.log'
write_slurm(opfile=slurm_file,jobname=code,logfile=log_file,syscall=syscall)
op.writelines(['sbatch '+slurm_file+'\n'])
line = f.readline()
f.close()
op.close()
print('Wrote '+runfile+' script')
if __name__ == "__main__":
main()
|
IanHeywoodREPO_NAMEoxkatPATH_START.@oxkat_extracted@oxkat-master@waterhole@setup_chgcentre.py@.PATH_END.py
|
{
"filename": "runSimulator.py",
"repo_name": "LSSTDESC/Spectractor",
"repo_path": "Spectractor_extracted/Spectractor-master/runSimulator.py",
"type": "Python"
}
|
from spectractor import parameters
from spectractor.simulation.simulator import AtmosphereGrid, SpectrumSimulatorSimGrid
from spectractor.config import load_config
from spectractor.simulation.image_simulation import ImageSim
from spectractor.logbook import LogBook
from spectractor.extractor.extractor import Spectractor
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument(dest="input", metavar='path', default=["tests/data/reduc_20170530_134.fits"],
help="Input fits file name. It can be a list separated by spaces, or it can use * as wildcard.",
nargs='*')
parser.add_argument("-d", "--debug", dest="debug", action="store_true",
help="Enter debug mode (more verbose and plots).", default=False)
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true",
help="Enter verbose (print more stuff).", default=False)
parser.add_argument("-o", "--output_directory", dest="output_directory", default="outputs/",
help="Write results in given output directory (default: ./outputs/).")
parser.add_argument("-l", "--logbook", dest="logbook", default="./tests/data/ctiofulllogbook_jun2017_v5.csv",
help="CSV logbook file. (default: ./tests/data/ctiofulllogbook_jun2017_v5.csv).")
parser.add_argument("-c", "--config", dest="config", default="config/ctio.ini",
help="INI config file. (default: config.ctio.ini).")
args = parser.parse_args()
parameters.VERBOSE = args.verbose
if args.debug:
parameters.DEBUG = True
parameters.VERBOSE = True
file_names = args.input
load_config(args.config)
logbook = LogBook(logbook=args.logbook)
for file_name in file_names:
tag = file_name.split('/')[-1]
disperser_label, target, xpos, ypos = logbook.search_for_image(tag)
if target is None or xpos is None or ypos is None:
continue
spectrum_file_name = args.output_directory + '/' + tag.replace('.fits', '_spectrum.fits')
atmgrid = AtmosphereGrid(file_name)
image = ImageSim(file_name, spectrum_file_name, args.output_directory, A1=1, A2=1,
pwv=5, ozone=300, aerosols=0.03,
psf_poly_params=None, with_starfield=True)
sim_file_name = args.output_directory + tag.replace('reduc_', 'sim_')
Spectractor(sim_file_name, args.output_directory, target, [xpos, ypos], disperser_label, args.config)
|
LSSTDESCREPO_NAMESpectractorPATH_START.@Spectractor_extracted@Spectractor-master@runSimulator.py@.PATH_END.py
|
{
"filename": "prop_divide.py",
"repo_name": "ajeldorado/falco-python",
"repo_path": "falco-python_extracted/falco-python-master/falco/proper/prop_divide.py",
"type": "Python"
}
|
# Copyright 2016, 2017 California Institute of Technology
# Users must agree to abide by the restrictions listed in the
# file "LegalStuff.txt" in the PROPER library directory.
#
# PROPER developed at Jet Propulsion Laboratory/California Inst. Technology
# Original IDL version by John Krist
# Python translation by Navtej Saini, with Luis Marchen and Nikta Amiri
#
# Modified by J. Krist on 19 April 2019: changed to /=
import falco.proper as proper
import numpy as np
def prop_divide(wf, value):
"""Divide the current wavefront amplitude by a user-provided value or array.
Parameters
----------
wf : obj
The current WaveFront class object
value : numpy ndarray
Either a scalar or a 2-D array containing the amplitude map by which
the current wavefront will be divided. The map is assumed to be
centered at pixel (n/2,n/2).
Returns
-------
None
"""
if type(value) != np.ndarray and type(value) != list:
value = float(value)
wf.wfarr /= complex(value, 0.)
else:
value = np.asarray(value)
wf.wfarr /= proper.prop_shift_center(value)
return
|
ajeldoradoREPO_NAMEfalco-pythonPATH_START.@falco-python_extracted@falco-python-master@falco@proper@prop_divide.py@.PATH_END.py
|
{
"filename": "main.py",
"repo_name": "cdslaborg/paramonte",
"repo_path": "paramonte_extracted/paramonte-main/example/fortran/pm_distMultiNorm/setMultiNormRand/main.py",
"type": "Python"
}
|
#!/usr/bin/env python
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import glob
import sys
linewidth = 2
fontsize = 17
for kind in ["RK"]:
pattern = "*." + kind + ".txt"
fileList = glob.glob(pattern)
for file in fileList:
df = pd.read_csv(file, delimiter = ",", header = None)
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
spacing = 0.015
# start with a square Figure
fig = plt.figure(figsize = (8, 8))
plt.rcParams.update({'font.size': fontsize - 2})
ax = fig.add_axes([left, bottom, width, height]) # scatter plot
ax_histx = fig.add_axes([left, bottom + height + spacing, width, 0.2], sharex = ax) # histx
ax_histy = fig.add_axes([left + width + spacing, bottom, 0.2, height], sharey = ax) # histy
for axes in [ax, ax_histx, ax_histy]:
axes.grid(visible = True, which = "both", axis = "both", color = "0.85", linestyle = "-")
axes.tick_params(axis = "y", which = "minor")
axes.tick_params(axis = "x", which = "minor")
# no labels
ax_histy.tick_params(axis = "y", labelleft = False)
ax_histx.tick_params(axis = "x", labelbottom = False)
# the scatter plot:
ax.scatter ( df.values[:, 0]
, df.values[:, 1]
, s = 8
, zorder = 1000
)
ax_histx.hist(df.values[:, 0], bins = 50, zorder = 1000)
ax_histy.hist(df.values[:, 1], bins = 50, orientation = "horizontal", zorder = 1000)
ax.set_xlabel("X", fontsize = 17)
ax.set_ylabel("Y", fontsize = 17)
plt.savefig(file.replace(".txt",".png"))
|
cdslaborgREPO_NAMEparamontePATH_START.@paramonte_extracted@paramonte-main@example@fortran@pm_distMultiNorm@setMultiNormRand@main.py@.PATH_END.py
|
{
"filename": "_hoverlabel.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/graph_objs/waterfall/_hoverlabel.py",
"type": "Python"
}
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Hoverlabel(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "waterfall"
_path_str = "waterfall.hoverlabel"
_valid_props = {
"align",
"alignsrc",
"bgcolor",
"bgcolorsrc",
"bordercolor",
"bordercolorsrc",
"font",
"namelength",
"namelengthsrc",
}
# align
# -----
@property
def align(self):
"""
Sets the horizontal alignment of the text content within hover
label box. Has an effect only if the hover label text spans
more two or more lines
The 'align' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'right', 'auto']
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
Any|numpy.ndarray
"""
return self["align"]
@align.setter
def align(self, val):
self["align"] = val
# alignsrc
# --------
@property
def alignsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `align`.
The 'alignsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["alignsrc"]
@alignsrc.setter
def alignsrc(self, val):
self["alignsrc"] = val
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the background color of the hover labels for this trace
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bgcolorsrc
# ----------
@property
def bgcolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for `bgcolor`.
The 'bgcolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bgcolorsrc"]
@bgcolorsrc.setter
def bgcolorsrc(self, val):
self["bgcolorsrc"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the border color of the hover labels for this trace.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# bordercolorsrc
# --------------
@property
def bordercolorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
The 'bordercolorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["bordercolorsrc"]
@bordercolorsrc.setter
def bordercolorsrc(self, val):
self["bordercolorsrc"] = val
# font
# ----
@property
def font(self):
"""
Sets the font used in hover labels.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.waterfall.hoverlabel.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
colorsrc
Sets the source reference on Chart Studio Cloud
for `color`.
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans", "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud
for `family`.
lineposition
Sets the kind of decoration line(s) with text,
such as an "under", "over" or "through" as well
as combinations e.g. "under+over", etc.
linepositionsrc
Sets the source reference on Chart Studio Cloud
for `lineposition`.
shadow
Sets the shape and color of the shadow behind
text. "auto" places minimal shadow and applies
contrast text font color. See
https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional
options.
shadowsrc
Sets the source reference on Chart Studio Cloud
for `shadow`.
size
sizesrc
Sets the source reference on Chart Studio Cloud
for `size`.
style
Sets whether a font should be styled with a
normal or italic face from its family.
stylesrc
Sets the source reference on Chart Studio Cloud
for `style`.
textcase
Sets capitalization of text. It can be used to
make text appear in all-uppercase or all-
lowercase, or with each word capitalized.
textcasesrc
Sets the source reference on Chart Studio Cloud
for `textcase`.
variant
Sets the variant of the font.
variantsrc
Sets the source reference on Chart Studio Cloud
for `variant`.
weight
Sets the weight (or boldness) of the font.
weightsrc
Sets the source reference on Chart Studio Cloud
for `weight`.
Returns
-------
plotly.graph_objs.waterfall.hoverlabel.Font
"""
return self["font"]
@font.setter
def font(self, val):
self["font"] = val
# namelength
# ----------
@property
def namelength(self):
"""
Sets the default length (in number of characters) of the trace
name in the hover labels for all traces. -1 shows the whole
name regardless of length. 0-3 shows the first 0-3 characters,
and an integer >3 will show the whole name if it is less than
that many characters, but if it is longer, will truncate to
`namelength - 3` characters and add an ellipsis.
The 'namelength' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [-1, 9223372036854775807]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|numpy.ndarray
"""
return self["namelength"]
@namelength.setter
def namelength(self, val):
self["namelength"] = val
# namelengthsrc
# -------------
@property
def namelengthsrc(self):
"""
Sets the source reference on Chart Studio Cloud for
`namelength`.
The 'namelengthsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["namelengthsrc"]
@namelengthsrc.setter
def namelengthsrc(self, val):
self["namelengthsrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
"""
def __init__(
self,
arg=None,
align=None,
alignsrc=None,
bgcolor=None,
bgcolorsrc=None,
bordercolor=None,
bordercolorsrc=None,
font=None,
namelength=None,
namelengthsrc=None,
**kwargs,
):
"""
Construct a new Hoverlabel object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.waterfall.Hoverlabel`
align
Sets the horizontal alignment of the text content
within hover label box. Has an effect only if the hover
label text spans more two or more lines
alignsrc
Sets the source reference on Chart Studio Cloud for
`align`.
bgcolor
Sets the background color of the hover labels for this
trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud for
`bgcolor`.
bordercolor
Sets the border color of the hover labels for this
trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud for
`bordercolor`.
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of characters) of
the trace name in the hover labels for all traces. -1
shows the whole name regardless of length. 0-3 shows
the first 0-3 characters, and an integer >3 will show
the whole name if it is less than that many characters,
but if it is longer, will truncate to `namelength - 3`
characters and add an ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud for
`namelength`.
Returns
-------
Hoverlabel
"""
super(Hoverlabel, self).__init__("hoverlabel")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.waterfall.Hoverlabel
constructor must be a dict or
an instance of :class:`plotly.graph_objs.waterfall.Hoverlabel`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("align", None)
_v = align if align is not None else _v
if _v is not None:
self["align"] = _v
_v = arg.pop("alignsrc", None)
_v = alignsrc if alignsrc is not None else _v
if _v is not None:
self["alignsrc"] = _v
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bgcolorsrc", None)
_v = bgcolorsrc if bgcolorsrc is not None else _v
if _v is not None:
self["bgcolorsrc"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("bordercolorsrc", None)
_v = bordercolorsrc if bordercolorsrc is not None else _v
if _v is not None:
self["bordercolorsrc"] = _v
_v = arg.pop("font", None)
_v = font if font is not None else _v
if _v is not None:
self["font"] = _v
_v = arg.pop("namelength", None)
_v = namelength if namelength is not None else _v
if _v is not None:
self["namelength"] = _v
_v = arg.pop("namelengthsrc", None)
_v = namelengthsrc if namelengthsrc is not None else _v
if _v is not None:
self["namelengthsrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@graph_objs@waterfall@_hoverlabel.py@.PATH_END.py
|
{
"filename": "convert.md",
"repo_name": "amrvac/amrvac",
"repo_path": "amrvac_extracted/amrvac-master/doc/convert.md",
"type": "Markdown"
}
|
# Data file conversion
[TOC]
# Introduction {#convert_intro}
The standard [MPI-AMRVAC dataformat](fileformat.md), i.e. the _*.dat_
files usable for restart, contain all the conservative variables in
all gridblocks, and hence suffice for visualization.
Additional variables can be added in the _*.dat_ files as explained [here](dat_convert.md).
> Since late 2019, it can be directly read, visualised and
> analysed with the Python package [yt](https://yt-project.org),
> see [our documentation to get started with yt](yt_usage.md).
However, in many instances, one would like to use data formats that
are directly readable by some of the more widespread visualization
software packages. Therefore, we created the _convert.t_ module, which
ensures that this post-process data file conversion can be done with
the same executable (but possibly even on a different platform). The
many possibilities include conversion to _*.vtu_ (*VTK* *U*nstructured
data format) directly readable by [Paraview](http://www.paraview.org/)
(or [ViSiT](https://wci.llnl.gov/codes/visit/)), to _*.plt_ format for
the commercial package [Tecplot](http://www.tecplot.com/). Also,
**this info will not explain you how to use the mentioned software for
visualization, but just explain how to do the conversion.**
Furthermore, this part of the code is subject to continuous change and
improvement, and we welcome extra contributions.
We now give some brief info on how to use the same executable _amrvac_ (which
you already compiled and used to obtain output _*.dat_ files with), to convert
a single or all _*.dat_ file(s) to one of these formats.
# Converting (on a single CPU) {#converting}
** Note that all the steps below assume you're running on a single CPU. The same steps are to be taken for obtaining any of the other precoded data formats. One important warning is due: when you run a simulation for some reason twice, and you did not erase the previously created _*.dat_ files, these files are overwritten (if the base_filename has not changed). Then, it may be that conversion fails, since the end of the file may contain some leftover data from the previous time, if the filelength has changed due to some other reason. The only remedy to this is that one should always remove old _*.dat_ files, or never forget to change the name for the files accordingly, by setting _base_filename_ in the _&filelist;_.**
We will assume that you ran the standard 2D advection problem used for test
purposes, i.e. that you did the following steps beforehand:
cd $AMRVAC_DIR/tests/rho/vac
$AMRVAC_DIR/setup.pl -d=2
make
mpirun -np 1 amrvac
We also assume that in the parameter file amrvac.par, the namelist
_&filelist;_ was stating (note that the end of the namelist is indicated
as usual by a backslash)
&filelist
base_filename='vaclogo'
/
If all went well, you then have created as many _*.dat_ files as requested
through the settings you provided in the combined _&savelist;_ and
_&stoplist;_ namelists from the [par-file](par.md). For the example,
they normally default to asking a full data dump at time zero, as well as
every time the time has increased by 0.05, and this till _tmax=1.0d0_, such
that we actually have 21 snapshots in total. You should thus have files like
_vaclogo0000.dat_ up to _vaclogo0020.dat_. You can now
individually convert such _*.dat_ file to a _*.vtu_ file by doing the
following. Edit the amrvac.par file, to select a visualization data format like
&filelist
base_filename='vaclogo'
convert_type='vtuBCC'
convert=.true.
restart_from_file='vaclogo0000.dat'
/
you can then convert the single _vaclogo0000.dat_ file simply
running again
mpirun -np 1 amrvac
or, which is actually equivalent (single CPU)
amrvac
Note that this will create a new file, namely _vaclogo0000.vtu_, which
can be directly imported in Paraview. It will, under the settings above, just
contain the density on the grid hierarchy at time zero. The
_convert_type='vtuBCC'_ indicates that the data is exactly as the code
interprets and updates the values, namely as cell-centered quantities.
Realizing that you typically want to convert multiple data files, you can do
this by repeating the above as many times as there are _*.dat_ files, by
raising/changing the _restart_from_file_ identifier. Since you typically want to
convert all data files between a minimum and maximum number of similarly named
files, the script **aiconvert** is added. If you have a line
`PATH="$AMRVAC_DIR:$AMRVAC_DIR/tools:./:$PATH"` in `~/.bash_profile` (or
`~/.bashrc`), typing _aiconvert_ will tell you its intended usage.
In the example case at hand, where we created 21 data files from running the
advection problem, **this _aiconvert_ script needs the intended _base_filename_
and the executable _amrvac_ to exist in the same directory.** It will complain when the parfile
does not exist, and obviously requires the existence of all files between the
start and stopindex (0 and 20 here). With paraview, you will then be able to
immediately import all 21 _*.vtu_ files with the same base filename, and
directly make movies or still images from them.
For example, to convert snapshots from number 10 to number 20:
aiconvert 10 20
or to convert the snapshot number 12
aiconvert 12
or just type
aiconvert
to convert all the snapshots! You can also specify a parfile other than amrvac.par as:
aiconvert newname.par 0 20
or to convert the snapshot number 12
aiconvert newname.par 12
For details of aiconvert, please read the header of the
$AMRVAC_DIR/tools/aiconvert.
# Parallel conversion options {#parallel_convert}
For very large simulations (typically 3D, and/or runs achieving high effective
resolutions), even the data conversion may need to be done in parallel (and
ultimately, the visualization itself too). The _convert.t_ allows to perform
some of the _*.dat_ conversions in parallel, in particular, this is true for
the _*.vtu_ format, and for the _*.plt_ format. You should then select one of
convert_type='vtumpi'
convert_type='vtuCCmpi'
convert_type='vtuBmpi'
convert_type='vtuBCCmpi'
convert_type='vtimpi'
convert_type='vtiCCmpi'
convert_type='pvtumpi'
convert_type='pvtuCCmpi'
convert_type='pvtuBmpi'
convert_type='pvtuBCCmpi'
convert_type='tecplotmpi'
convert_type='tecplotCCmpi'
Here, the prefix _p_ stands for the parallel file format, where each process
is allowed to dump its data into a single (e.g. _*.vtu_) file and a master
file (e.g. _*.pvtu_) is stored by rank zero. This has the advantage that the
write operation on suitable file systems is sped up significantly. In a
visualization software, only the _*.pvtu_ files need to be imported and also
the reading process is sped up in case of parallel visualization.
You can again use aiconvert as explained above, and type in the number
of processors to use by answering a popup question:
How many processors do you want to use? (default=1) 4
# Autoconvert {#autoconvert}
In addition to the conversion after the run, AMRVAC now offers also to
directly output files ready to use for visualization along with the
simulation. A parallel run will however only be capable to provide the file-
types ready for parallel conversion (see parallel conversion). To enable this
capability, simply set the switch _autoconvert=.true._. The example above
would then read
&filelist;
base_filename='vaclogo'
autoconvert=.true.
convert_type='vtuBCCmpi'
/
and when the code is run via
mpirun -np 2 amrvac
three new output files (_vaclogoXXXX0000.vtu,
vaclogoXXXX0001.vtu_) will appear simultaneous to the _vaclogoXXXX.dat_
files, stored at given intervals. All functionality of the usual convert is
conserved, e.g. derived quantities and primitive variables (using the
_saveprim=.true._ option) can be stored in the output files.
# Notes on conversion possibilities {#notes}
## Cell center versus cell corner values {#cell_vs_corner}
In all cases mentioned below, the difference between convert-types with or
without _CC_ relates to the difference between cell center (`CC') versus cell
corner values. For the cell center case, no interpolation of the computed data
is needed, since the (conservative) variables actually represent volume
averages stored at cell centers. For the other cases (without 'CC'), the
_convert.t_ tries to interpolate from cell center to the cell corners, and
then the data will be known at the grid nodes. This will be noticable on
reading in the data in _paraview_, which reports whether data is cell data
(cell centered) or point data (nodes or corners). In principle, the conversion
from cell centered (or cell data) to cell corner (or point data) types can
also be achieved in paraview itself, with the filter _Cell data to Point
data_. There may be subtle differences between the way MPI-AMRVAC does this
interpolation, and the way it happens internally to paraview, so we provide
both options as output _*.vtu_ files. Similar observations hold for the
Tecplot format.
## Conservative/primitive storage and adding derived quantities {#cons_vs_prim}
The **saveprim** logical allows you to select whether the conservative or
primitive variables need to be stored in the resulting output file. The names
for the conservative variables and primitive ones are hard coded depending on
the physics.
Another very useful option is to specify which variables actually need to be
converted: by default all conservative variables available in the _*.dat_ file
will be included, but then again filesizes may become restrictive. For that
purpose, the logical array _w_write_ allows to select which variable(s) to
store (and this in combination with saveprim, possibly). You can then create
different files for selected variables, knowing that the output filename will
start with _base_filename_.
We allow the possibility to compute derived variables from the _*.dat_ file in
the userfile, by setting how many you add beyond the _nw_ variables typcial
for the physics module at hand, in the integer _nwauxio_. Correspondingly that
many variables, you should then compute and store in the _w(*,nw+1)_ ...
_w(*,nw+nwauxio)_ entries, in the user-written subroutine _ specialvar_output_
(as defined in _mod_usr_methods.t_). The names for these variables then
need to be provided in the corresponding _specialvarnames_output_ subroutine.
This feature is very useful, for the same reason as above: you can let the
code compute gradients of scalar fields, divergence of vector quantities, curls
of vectors, etc, using the precoded subroutines for that purpose found in _geometry.t_.
You then do not rely on visualization software to do interpolations or
discretizations, which may not reflect those actually taken in MPI-AMRVAC.
Another useful feature is the possibility to select the output AMR level. You
can let the code compute from the _*.dat_ file an output residing on a
specified level _level_io_. This then uses the MPI-AMRVAC internal means to
perform restriction and prolongations, and you can then make sure to have a
single uniform grid output too.
### _convert_type='vtu'_ or _convert_type='vtuCC'_
Does the conversion to _*.vtu_ data files. This option works on 1 CPU. The
resulting _*.vtu_ files contain data in ASCII format.
### _convert_type='vtuB'_ or _convert_type='vtuBCC'_
Does the conversion to _*.vtu_ data files. This option works on 1 CPU. The
resulting _*.vtu_ files are in binary format.
### _convert_type='vtumpi'_ or _convert_type='vtuCCmpi'_
Does the conversion to _*.vtu_ data files. This option works on multiple CPUs.
The resulting _*.vtu_ files contain data in ASCII format.
### _convert_type='vtuBmpi'_ or _convert_type='vtuBCCmpi'_
Does the conversion to _*.vtu_ data files. This option works on multiple CPUs.
The resulting _*.vtu_ files contain data in binary format. (recommended)
### _convert_type='tecplot'_ or _convert_type='tecplotCC'_
This realizes conversion to _*.plt_ files, which can be read in directly by
Tecplot. Note that the output is in ASCII format, which may mean huge data
sets, but Tecplot has its own **preplot** command that will allow you to
operate on such a file, and thereby make a binary version for it. The above is
for single CPU execution, allows to add user-defined variables with _nwauxio_,
and renormalization using the _normt_ and _normvar_ array.
### _convert_type='tecplotmpi'_ or _convert_type='tecplotCCmpi'_
Same as above, but allows to perform the conversion in parallel. One can add
user -defined variables with _nwauxio_, and renormalize using the _normt_ and
_normvar_ array. The current implementation is such that tecplotmpi and
tecplotCCmpi will create different length output ASCII files when used on 1
versus multiple CPUs. In fact, on 1 CPU, there will be as many (tecplot) zones
as there are levels, while on on multiple CPUs, there will be a number of
zones up to the number of levels times the number of CPUs (or less, when some
level does not exist on a certain CPU).
### onegrid(mpi), oneblock(B), ...
Extra possibilities to allow creation of a single uniform grid level output.
Please inspect the workings in _convert.t_.
|
amrvacREPO_NAMEamrvacPATH_START.@amrvac_extracted@amrvac-master@doc@convert.md@.PATH_END.py
|
{
"filename": "_customdata.py",
"repo_name": "catboost/catboost",
"repo_path": "catboost_extracted/catboost-master/contrib/python/plotly/py3/plotly/validators/sankey/_customdata.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class CustomdataValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="customdata", parent_name="sankey", **kwargs):
super(CustomdataValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs,
)
|
catboostREPO_NAMEcatboostPATH_START.@catboost_extracted@catboost-master@contrib@python@plotly@py3@plotly@validators@sankey@_customdata.py@.PATH_END.py
|
{
"filename": "repplugin.py",
"repo_name": "wokast/PyCactus",
"repo_path": "PyCactus_extracted/PyCactus-master/SimRep/simrep/plugins/repplugin.py",
"type": "Python"
}
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from builtins import str
import os
import sys
import glob
from simrep.pydocument import *
valid_image_formats = set(['svg', 'png', 'jpeg', 'jpg',
'gif', 'pdf', 'eps'])
valid_movie_formats = set(['mpeg', 'wmv', 'mov', 'mp4'])
def script(sn, *args, **kwargs):
scrpath = os.path.abspath(sys.path[0])
scr = os.path.abspath(os.path.join(scrpath, sn))
args2 = ['--'+x+'='+str(kwargs[x]) for x in kwargs]
cmd = "%s %s %s" % (scr, ' '.join(args), ' '.join(args2))
print("executing script", sn)
success = (os.WEXITSTATUS(os.system(cmd)) == 0)
if not success:
print("Script %s failed." % sn)
#
return success
#
def remove_missing_figures(repdir, figures):
np = lambda p : p if os.path.isabs(p) else os.path.join(repdir, p)
chk = lambda p : any([os.path.isfile(p+'.'+e)
for e in valid_image_formats])
return [fig for fig in figures if chk(np(fig.path))]
#
def remove_missing_movies(repdir, movies):
chk = lambda p : any([os.path.isfile(p+'.'+e)
for e in valid_movie_formats])
np = lambda p : p if os.path.isabs(p) else os.path.join(repdir, p)
return [m for m in movies if chk(np(m.path))]
#
def glob_figs(repdir, pattern):
pat = os.path.join(repdir, pattern+'.*')
l = glob.glob(pat)
l = [os.path.split(p)[1] for p in l]
l = [os.path.splitext(p) for p in l]
l = set([n for n,e in l if e[1:] in valid_image_formats])
return list(l)
#
|
wokastREPO_NAMEPyCactusPATH_START.@PyCactus_extracted@PyCactus-master@SimRep@simrep@plugins@repplugin.py@.PATH_END.py
|
{
"filename": "tfsa-2021-024.md",
"repo_name": "tensorflow/tensorflow",
"repo_path": "tensorflow_extracted/tensorflow-master/tensorflow/security/advisory/tfsa-2021-024.md",
"type": "Markdown"
}
|
## TFSA-2021-024: `CHECK`-fail in `SparseConcat`
### CVE Number
CVE-2021-29534
### Impact
An attacker can trigger a denial of service via a `CHECK`-fail in
`tf.raw_ops.SparseConcat`:
```python
import tensorflow as tf
import numpy as np
indices_1 = tf.constant([[514, 514], [514, 514]], dtype=tf.int64)
indices_2 = tf.constant([[514, 530], [599, 877]], dtype=tf.int64)
indices = [indices_1, indices_2]
values_1 = tf.zeros([0], dtype=tf.int64)
values_2 = tf.zeros([0], dtype=tf.int64)
values = [values_1, values_2]
shape_1 = tf.constant([442, 514, 514, 515, 606, 347, 943, 61, 2], dtype=tf.int64)
shape_2 = tf.zeros([9], dtype=tf.int64)
shapes = [shape_1, shape_2]
tf.raw_ops.SparseConcat(indices=indices, values=values, shapes=shapes, concat_dim=2)
```
This is because the
[implementation](https://github.com/tensorflow/tensorflow/blob/b432a38fe0e1b4b904a6c222cbce794c39703e87/tensorflow/core/kernels/sparse_concat_op.cc#L76)
takes the values specified in `shapes[0]` as dimensions for the output shape:
```cc
TensorShape input_shape(shapes[0].vec<int64>());
```
The [`TensorShape`
constructor](https://github.com/tensorflow/tensorflow/blob/6f9896890c4c703ae0a0845394086e2e1e523299/tensorflow/core/framework/tensor_shape.cc#L183-L188)
uses a `CHECK` operation which triggers when
[`InitDims`](https://github.com/tensorflow/tensorflow/blob/6f9896890c4c703ae0a0845394086e2e1e523299/tensorflow/core/framework/tensor_shape.cc#L212-L296)
returns a non-OK status.
```cc
template <class Shape>
TensorShapeBase<Shape>::TensorShapeBase(gtl::ArraySlice<int64> dim_sizes) {
set_tag(REP16);
set_data_type(DT_INVALID);
TF_CHECK_OK(InitDims(dim_sizes));
}
```
In our scenario, this occurs when adding a dimension from the argument results
in overflow:
```cc
template <class Shape>
Status TensorShapeBase<Shape>::InitDims(gtl::ArraySlice<int64> dim_sizes) {
...
Status status = OkStatus();
for (int64 s : dim_sizes) {
status.Update(AddDimWithStatus(internal::SubtleMustCopy(s)));
if (!status.ok()) {
return status;
}
}
}
template <class Shape>
Status TensorShapeBase<Shape>::AddDimWithStatus(int64 size) {
...
int64 new_num_elements;
if (kIsPartial && (num_elements() < 0 || size < 0)) {
new_num_elements = -1;
} else {
new_num_elements = MultiplyWithoutOverflow(num_elements(), size);
if (TF_PREDICT_FALSE(new_num_elements < 0)) {
return errors::Internal("Encountered overflow when multiplying ",
num_elements(), " with ", size,
", result: ", new_num_elements);
}
}
...
}
```
This is a legacy implementation of the constructor and operations should
use `BuildTensorShapeBase` or `AddDimWithStatus` to prevent `CHECK`-failures in
the presence of overflows.
### Patches
We have patched the issue in GitHub commit
[69c68ecbb24dff3fa0e46da0d16c821a2dd22d7c](https://github.com/tensorflow/tensorflow/commit/69c68ecbb24dff3fa0e46da0d16c821a2dd22d7c).
The fix will be included in TensorFlow 2.5.0. We will also cherrypick this
commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow
2.1.4, as these are also affected and still in supported range.
### For more information
Please consult [our security
guide](https://github.com/tensorflow/tensorflow/blob/master/SECURITY.md) for
more information regarding the security model and how to contact us with issues
and questions.
### Attribution
This vulnerability has been reported by Yakun Zhang and Ying Wang of Baidu
X-Team.
|
tensorflowREPO_NAMEtensorflowPATH_START.@tensorflow_extracted@tensorflow-master@tensorflow@security@advisory@tfsa-2021-024.md@.PATH_END.py
|
{
"filename": "toy_ms_pulsar.py",
"repo_name": "lucabaldini/ixpeobssim",
"repo_path": "ixpeobssim_extracted/ixpeobssim-main/ixpeobssim/examples/toy_ms_pulsar.py",
"type": "Python"
}
|
# Copyright (C) 2022, the ixpeobssim team.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from __future__ import print_function, division
import numpy
import ixpeobssim.core.pipeline as pipeline
from ixpeobssim.utils.misc import pairwise, pairwise_enum
from ixpeobssim.utils.matplotlib_ import plt, setup_gca, residual_plot
import ixpeobssim.config.toy_ms_pulsar as input_model
# Common settings for the simulation.
DURATION = 25000.
SIM_KWARGS = dict(occult=False, saa=False, duration=DURATION, seed=101)
def create_event_list():
"""Run the simulation and fold the events in phase.
"""
file_list = pipeline.xpobssim(**SIM_KWARGS)
pipeline.xpphase(*file_list, suffix='folded', **input_model.ephemeris.dict())
def create_photon_list():
"""Create a photon list to be fed into ixpesim. This can be fed into ixpesim
with something along the lines of
ixpesim
--src-photon-list ~/ixpeobssimdata/toy_ms_pulsar_du1_photon_list.fits
-n 100000000
--dme-pressure 645.
--output-file ~/ixpeobssimdata/toy_ms_pulsar_du1_ixpesim.fits
--dead-time-offset 760
--dead-time-slope 300
See https://bitbucket.org/ixpesw/gpdsw/issues/334 for all the indication
about the deadtime.
"""
file_list = pipeline.xpphotonlist(**SIM_KWARGS)
def run():
"""Run all.
"""
create_event_list()
if __name__ == '__main__':
pipeline.bootstrap_pipeline('toy_ms_pulsar')
|
lucabaldiniREPO_NAMEixpeobssimPATH_START.@ixpeobssim_extracted@ixpeobssim-main@ixpeobssim@examples@toy_ms_pulsar.py@.PATH_END.py
|
{
"filename": "__init__.py",
"repo_name": "galsci/pysm",
"repo_path": "pysm_extracted/pysm-main/src/pysm3/data/__init__.py",
"type": "Python"
}
|
galsciREPO_NAMEpysmPATH_START.@pysm_extracted@pysm-main@src@pysm3@data@__init__.py@.PATH_END.py
|
|
{
"filename": "cov_output.py",
"repo_name": "rreischke/OneCovariance",
"repo_path": "OneCovariance_extracted/OneCovariance-main/onecov/cov_output.py",
"type": "Python"
}
|
import enum
import numpy as np
import os
from astropy.units.cgs import K
# from astropy.io import fits
#plot stuff
import matplotlib.pyplot as plt
from matplotlib import rc, rcParams
# rc('font', **{'family': 'serif', 'serif': ['Computer Modern']})
plt.rc('font', family='sans-serif')
#
rcParams['figure.figsize'] = (8., 6.)
rcParams['axes.linewidth'] = 2
rcParams['axes.labelsize'] = 20
rcParams['axes.titlesize'] = 30
rcParams['font.size'] = 16
rcParams['lines.linewidth'] = 3
rcParams['lines.markersize'] = 10
rcParams['lines.markeredgewidth'] = 2
rcParams['xtick.major.size'] = 8
rcParams['xtick.minor.size'] = 4
rcParams['xtick.major.width'] = 1.5
rcParams['xtick.labelsize'] = 16
rcParams['ytick.major.size'] = 8
rcParams['ytick.minor.size'] = 4
rcParams['ytick.major.width'] = 1.5
rcParams['ytick.labelsize'] = 16
class Output():
"""
Class writing the output of the OneCovariance code. Methods of this class collect
all the necessary blocks of the covariance matrix.
Parameters:
-----------
output_dict : dictionary
Example :
---------
from cov_input import Input, FileInput
from cov_ell_space import CovELLSpace
from cov_output import Output
inp = Input()
covterms, observables, output, cosmo, bias, hod, survey_params, \
prec = inp.read_input()
fileinp = FileInput()
read_in_tables = fileinp.read_input()
covell = CovELLSpace(covterms, observables, output, cosmo, bias,
hod, survey_params, prec, read_in_tables)
gauss, nongauss, ssc = \
covell.calc_covELL(observables, output, bias, hod,
survey_params, prec, read_in_tables)
out = Output(output)
out.write_cov(covterms, observables, covell.n_tomo_clust,
covell.n_tomo_lens, covell.ellrange, gauss, nongauss,
ssc)
"""
def __init__(self, output_dict, projected_clust = None, projected_lens = None):
self.filename = output_dict['file']
self.__check_filetype()
self.style = output_dict['style']
self.plot = output_dict['make_plot']
self.trispecfile = output_dict['trispec']
self.Cellfile = output_dict['Cell']
self.tex = output_dict['use_tex']
self.save_as_binary = output_dict['save_as_binary']
self.list_style_spatial_first = output_dict['list_style_spatial_first']
self.projected_lens = projected_lens
self.projected_clust = projected_clust
def __check_filetype(self):
for idx,fn in enumerate(self.filename):
if fn == '':
continue
dotloc = fn[::-1].find('.')
if dotloc == -1 or dotloc == 0:
filetype = 'dat'
else:
dotloc = len(fn) - dotloc
filetype = fn[dotloc:]
if filetype == 'fits':
print("ConfigWarning: Fits output is not implemented yet, sorry " +
":(. The file extension will be changed to 'dat'.")
self.filename[idx] = fn[:dotloc] + 'dat'
return True
def __add_string_to_filename(self,
addin,
fname):
dotloc = fname[::-1].find('.')
if dotloc == -1 or dotloc == 0:
dotloc = 1
dotloc = len(fname) - dotloc - 1
if type(addin) == str:
fname = fname[:dotloc] + '_' + addin + fname[dotloc:]
else:
fname = fname[:dotloc] + '_' + str(round(addin,4)) + fname[dotloc:]
return fname
def write_arbitrary_cov(self,
cov_dict,
obs_dict,
n_tomo_clust,
n_tomo_lens,
read_in_tables,
gauss,
nongauss,
ssc):
"""
Writes the covariance matrix to a file depending on the specifications in the config.ini.
Parameters
----------
cov_dict : dictionary
Specifies which terms of the covariance (Gaussian, non-Gaussian,
super-sample covariance) should be calculated. To be passed from
the read_input method of the Input class.
obs_dict : dictionary
with the following keys (To be passed from the read_input method
of the Input class.)
'observables' : dictionary
Specifies which observables (cosmic shear, galaxy-galaxy
lensing and/or clustering) should be calculated. Also,
indicates whether cross-terms are evaluated.
'ELLspace' : dictionary
Specifies the exact details of the projection to ell space.
The projection from wavefactor k to angular scale ell is
done first, followed by the projection to real space in this
class
'THETAspace' : dictionary
Specifies the exact details of the projection to real space,
e.g., theta_min/max and the number of theta bins to be
calculated.
'COSEBIs' : dictionary
Specifies the exact details of the projection to COSEBIs,
e.g. the number of modes to be calculated.
'bandpowers' : dictionary
Specifies the exact details of the projection to bandpowers,
e.g. the ell modes and their spacing.
n_tomo_clust : int
Number of clustering (lens) bins
n_tomo_lens : int
Number of lensing (source) bins
proj_quant : array
The projected quantity associated with the covariance, e.g. theta, or ell
gauss : list of arrays
Gaussian covariance split into the different components
nongauss : list of arrays
Non-Gaussian covariance
ssc : list of arrays
Supersample covariance
"""
self.has_gauss, self.has_nongauss, self.has_ssc = cov_dict['gauss'], cov_dict['nongauss'], cov_dict['ssc']
gauss, nongauss, ssc = self.__none_to_zero(gauss, nongauss, ssc)
obslist, obsbool, obslength = self.__get_obslist(obs_dict)
gg, gm, mm = obsbool[0], obsbool[3], obsbool[5]
xipp, xipm, ximm = None, None, None
mult = 1
if len(gauss) == obslength:
...
elif len(gauss) == obslength+3:
gauss = [gauss[0]+gauss[6], gauss[1],
gauss[2], gauss[3]+gauss[7],
gauss[4], gauss[5]+gauss[8]]
elif len(gauss) == 10:
obslist, obsbool, obslength = self.__get_obslist(obs_dict, True)
xipp, xipm, ximm = obsbool[7], obsbool[8], obsbool[9]
elif len(gauss) == 14:
gauss = [gauss[0]+gauss[10], gauss[1], gauss[2], gauss[3],
gauss[4]+gauss[11], gauss[5], gauss[6],
gauss[7]+gauss[12], gauss[8],
gauss[9]+gauss[13]]
obslist, obsbool, obslength = self.__get_obslist(obs_dict, True)
xipp, xipm, ximm = obsbool[7], obsbool[8], obsbool[9]
elif len(gauss) == obslength*3:
mult = 3
elif len(gauss) == 30:
mult = 3
obslist, obsbool, obslength = self.__get_obslist(obs_dict, True)
xipp, xipm, ximm = obsbool[7], obsbool[8], obsbool[9]
elif len(gauss) == 22 and (obs_dict['ELLspace']['n_spec'] is not None or obs_dict['ELLspace']['n_spec'] != 0):
...
elif len(gauss) == 66 and (obs_dict['ELLspace']['n_spec'] is not None or obs_dict['ELLspace']['n_spec'] != 0):
mult = 3
else:
raise Exception("OutputError: The gaussian covariance needs at " +
"least 6 entries in the order ['gggg', 'gggm', 'ggmm', " +
"'gmgm', 'mmgm', 'mmmm'] or ['gggg', 'gggm', 'ggxip', " +
"'ggxim', 'gmgm', 'gmxip', 'gmxim', 'xipxip', 'xi_pm', " +
"'xi_mm']. Replacing the respective inputs with 0 or None " +
"is supported.")
if len(nongauss) != obslength and (obs_dict['ELLspace']['n_spec'] is None or obs_dict['ELLspace']['n_spec'] == 0):
raise Exception("OutputError: The nongaussian covariance needs " +
"at least 6 entries in the order ['gggg', 'gggm', 'ggmm', " +
"'gmgm', 'mmgm', 'mmmm'] or ['gggg', 'gggm', 'ggxip', " +
"'ggxim', 'gmgm', 'gmxip', 'gmxim', 'xipxip', 'xi_pm', " +
"'xi_mm']. Replacing the respective inputs with 0 or None " +
"is supported.")
if len(ssc) != obslength and (obs_dict['ELLspace']['n_spec'] is None or obs_dict['ELLspace']['n_spec'] == 0):
raise Exception("OutputError: The super-sample covariance needs " +
"at least 6 entries in the order ['gggg', 'gggm', 'ggmm', " +
"'gmgm', 'mmgm', 'mmmm'] or ['gggg', 'gggm', 'ggxip', " +
"'ggxim', 'gmgm', 'gmxip', 'gmxim', 'xipxip', 'xi_pm', " +
"'xi_mm']. Replacing the respective inputs with 0 or None " +
"is supported.")
sampledim = self.__get_sampledim(gauss, nongauss, ssc)
gaussidx = 0
for idx in range(obslength):
gausstest = 0
if n_tomo_lens is None and n_tomo_clust is None:
shape = [len(sampledim)]*2 + [sampledim]*2
else:
tomodim = \
self.__get_tomodim(gauss[gaussidx], nongauss[idx], ssc[idx])
if tomodim[0] == -1:
for _ in range(mult):
gausstest += gauss[gaussidx]
gaussidx += 1
if type(gausstest+nongauss[idx]+ssc[idx]) == int and \
obsbool[idx]:
obsbool[idx] = False
gg = False if obslist[idx] == 'gggg' else gg
gm = False if obslist[idx] == 'gmgm' else gm
mm = False if obslist[idx] == 'mmmm' else mm
print("OutputWarning: According to the config file " +
"the covariance for " + obslist[idx] + " ('m' " +
"might be 'kappa' in your case) is supposed " +
"to be calculated but neither the Gaussian, " +
"non-Gaussian or super-sample covariance has " +
"any values. This term will be manually set " +
"to False.")
continue
for _ in range(mult):
if isinstance(gauss[gaussidx], np.ndarray):
gauss[gaussidx] = self.__check_for_empty_input(gauss[gaussidx], gauss[gaussidx].shape)
gaussidx += 1
if isinstance(nongauss[idx], np.ndarray):
nongauss[idx] = self.__check_for_empty_input(nongauss[idx], nongauss[idx].shape)
if isinstance(ssc[idx], np.ndarray):
ssc[idx] = self.__check_for_empty_input(ssc[idx], ssc[idx].shape)
if ('terminal' in self.style or 'list' in self.style):
fct_args = [obslist, obsbool]
if self.list_style_spatial_first:
self.__write_cov_list_arbitrary_cosmosis_style(cov_dict, obs_dict, n_tomo_clust,
n_tomo_lens, sampledim, read_in_tables,
gauss, nongauss, ssc, fct_args)
else:
self.__write_cov_list_arbitrary(cov_dict, obs_dict, n_tomo_clust,
n_tomo_lens, sampledim, read_in_tables,
gauss, nongauss, ssc, fct_args)
if 'matrix' in self.style or self.plot:
fct_args = [obslist, obsbool, obslength, mult,
gg, gm, mm, xipp, xipm, ximm]
self.__write_cov_matrix_arbitrary(obs_dict, cov_dict, n_tomo_clust,
n_tomo_lens, sampledim, read_in_tables,
gauss, nongauss, ssc, fct_args)
return True
def write_cov(self,
cov_dict,
obs_dict,
n_tomo_clust,
n_tomo_lens,
proj_quant,
gauss,
nongauss,
ssc):
"""
Writes the covariance matrix to a file depending on the specifications in the config.ini.
Parameters
----------
cov_dict : dictionary
Specifies which terms of the covariance (Gaussian, non-Gaussian,
super-sample covariance) should be calculated. To be passed from
the read_input method of the Input class.
obs_dict : dictionary
with the following keys (To be passed from the read_input method
of the Input class.)
'observables' : dictionary
Specifies which observables (cosmic shear, galaxy-galaxy
lensing and/or clustering) should be calculated. Also,
indicates whether cross-terms are evaluated.
'ELLspace' : dictionary
Specifies the exact details of the projection to ell space.
The projection from wavefactor k to angular scale ell is
done first, followed by the projection to real space in this
class
'THETAspace' : dictionary
Specifies the exact details of the projection to real space,
e.g., theta_min/max and the number of theta bins to be
calculated.
'COSEBIs' : dictionary
Specifies the exact details of the projection to COSEBIs,
e.g. the number of modes to be calculated.
'bandpowers' : dictionary
Specifies the exact details of the projection to bandpowers,
e.g. the ell modes and their spacing.
n_tomo_clust : int
Number of clustering (lens) bins
n_tomo_lens : int
Number of lensing (source) bins
proj_quant : array
The projected quantity associated with the covariance, e.g. theta, or ell
gauss : list of arrays
Gaussian covariance split into the different components
nongauss : list of arrays
Non-Gaussian covariance
ssc : list of arrays
Supersample covariance
"""
self.has_gauss, self.has_nongauss, self.has_ssc = cov_dict['gauss'], cov_dict['nongauss'], cov_dict['ssc']
gauss, nongauss, ssc = self.__none_to_zero(gauss, nongauss, ssc)
obslist, obsbool, obslength = self.__get_obslist(obs_dict)
gg, gm, mm = obsbool[0], obsbool[3], obsbool[5]
xipp, xipm, ximm = None, None, None
mult = 1
if len(gauss) == obslength:
...
elif len(gauss) == obslength+3:
gauss = [gauss[0]+gauss[6], gauss[1],
gauss[2], gauss[3]+gauss[7],
gauss[4], gauss[5]+gauss[8]]
elif len(gauss) == 10:
obslist, obsbool, obslength = self.__get_obslist(obs_dict, True)
xipp, xipm, ximm = obsbool[7], obsbool[8], obsbool[9]
elif len(gauss) == 14:
gauss = [gauss[0]+gauss[10], gauss[1], gauss[2], gauss[3],
gauss[4]+gauss[11], gauss[5], gauss[6],
gauss[7]+gauss[12], gauss[8],
gauss[9]+gauss[13]]
obslist, obsbool, obslength = self.__get_obslist(obs_dict, True)
xipp, xipm, ximm = obsbool[7], obsbool[8], obsbool[9]
elif len(gauss) == obslength*3:
mult = 3
elif len(gauss) == 30:
mult = 3
obslist, obsbool, obslength = self.__get_obslist(obs_dict, True)
xipp, xipm, ximm = obsbool[7], obsbool[8], obsbool[9]
elif len(gauss) == 22 and (obs_dict['ELLspace']['n_spec'] is not None or obs_dict['ELLspace']['n_spec'] != 0):
...
elif len(gauss) == 66 and (obs_dict['ELLspace']['n_spec'] is not None or obs_dict['ELLspace']['n_spec'] != 0):
mult = 3
else:
raise Exception("OutputError: The gaussian covariance needs at " +
"least 6 entries in the order ['gggg', 'gggm', 'ggmm', " +
"'gmgm', 'mmgm', 'mmmm'] or ['gggg', 'gggm', 'ggxip', " +
"'ggxim', 'gmgm', 'gmxip', 'gmxim', 'xipxip', 'xi_pm', " +
"'xi_mm']. Replacing the respective inputs with 0 or None " +
"is supported.")
if len(nongauss) != obslength and (obs_dict['ELLspace']['n_spec'] is None or obs_dict['ELLspace']['n_spec'] == 0):
raise Exception("OutputError: The nongaussian covariance needs " +
"at least 6 entries in the order ['gggg', 'gggm', 'ggmm', " +
"'gmgm', 'mmgm', 'mmmm'] or ['gggg', 'gggm', 'ggxip', " +
"'ggxim', 'gmgm', 'gmxip', 'gmxim', 'xipxip', 'xi_pm', " +
"'xi_mm']. Replacing the respective inputs with 0 or None " +
"is supported.")
if len(ssc) != obslength and (obs_dict['ELLspace']['n_spec'] is None or obs_dict['ELLspace']['n_spec'] == 0):
raise Exception("OutputError: The super-sample covariance needs " +
"at least 6 entries in the order ['gggg', 'gggm', 'ggmm', " +
"'gmgm', 'mmgm', 'mmmm'] or ['gggg', 'gggm', 'ggxip', " +
"'ggxim', 'gmgm', 'gmxip', 'gmxim', 'xipxip', 'xi_pm', " +
"'xi_mm']. Replacing the respective inputs with 0 or None " +
"is supported.")
sampledim = self.__get_sampledim(gauss, nongauss, ssc)
gaussidx = 0
for idx in range(obslength):
gausstest = 0
if n_tomo_lens is None and n_tomo_clust is None:
shape = [len(proj_quant)]*2 + [sampledim]*2
else:
tomodim = \
self.__get_tomodim(gauss[gaussidx], nongauss[idx], ssc[idx])
if tomodim[0] == -1:
for _ in range(mult):
gausstest += gauss[gaussidx]
gaussidx += 1
if type(gausstest+nongauss[idx]+ssc[idx]) == int and \
obsbool[idx]:
obsbool[idx] = False
gg = False if obslist[idx] == 'gggg' else gg
gm = False if obslist[idx] == 'gmgm' else gm
mm = False if obslist[idx] == 'mmmm' else mm
print("OutputWarning: According to the config file " +
"the covariance for " + obslist[idx] + " ('m' " +
"might be 'kappa' in your case) is supposed " +
"to be calculated but neither the Gaussian, " +
"non-Gaussian or super-sample covariance has " +
"any values. This term will be manually set " +
"to False.")
continue
for _ in range(mult):
if isinstance(gauss[gaussidx], np.ndarray):
gauss[gaussidx] = self.__check_for_empty_input(gauss[gaussidx], gauss[gaussidx].shape)
gaussidx += 1
if isinstance(nongauss[idx], np.ndarray):
nongauss[idx] = self.__check_for_empty_input(nongauss[idx], nongauss[idx].shape)
if isinstance(ssc[idx], np.ndarray):
ssc[idx] = self.__check_for_empty_input(ssc[idx], ssc[idx].shape)
if ('terminal' in self.style or 'list' in self.style):
fct_args = [obslist, obsbool]
if self.list_style_spatial_first:
self.__write_cov_list_cosmosis_style(cov_dict, obs_dict, n_tomo_clust,
n_tomo_lens, sampledim, proj_quant,
gauss, nongauss, ssc, fct_args)
else:
self.__write_cov_list(cov_dict, obs_dict, n_tomo_clust,
n_tomo_lens, sampledim, proj_quant,
gauss, nongauss, ssc, fct_args)
if 'matrix' in self.style or self.plot:
fct_args = [obslist, obsbool, obslength, mult,
gg, gm, mm, xipp, xipm, ximm]
self.__write_cov_matrix_new(obs_dict, cov_dict, n_tomo_clust,
n_tomo_lens, sampledim, proj_quant,
gauss, nongauss, ssc, fct_args)
def plot_corrcoeff_matrix(self,
obs_dict,
covmatrix,
cov_diag,
proj_quant,
n_tomo_clust,
n_tomo_lens,
sampledim,
filename = None,
fct_args = None):
"""
Plots the Pearson correlation coefficient of the covariance matrix
to a file depending on the specifications in the config.ini.
Parameters
----------
cov_dict : dictionary
Specifies which terms of the covariance (Gaussian, non-Gaussian,
super-sample covariance) should be calculated. To be passed from
the read_input method of the Input class.
obs_dict : dictionary
with the following keys (To be passed from the read_input method
of the Input class.)
'observables' : dictionary
Specifies which observables (cosmic shear, galaxy-galaxy
lensing and/or clustering) should be calculated. Also,
indicates whether cross-terms are evaluated.
'ELLspace' : dictionary
Specifies the exact details of the projection to ell space.
The projection from wavefactor k to angular scale ell is
done first, followed by the projection to real space in this
class
'THETAspace' : dictionary
Specifies the exact details of the projection to real space,
e.g., theta_min/max and the number of theta bins to be
calculated.
'COSEBIs' : dictionary
Specifies the exact details of the projection to COSEBIs,
e.g. the number of modes to be calculated.
'bandpowers' : dictionary
Specifies the exact details of the projection to bandpowers,
e.g. the ell modes and their spacing.
covmatrix : 2d array
The full covariance matrix with all contributions
cov_diag : 2d array
The diagonal block part of the covariance matrix
n_tomo_clust : int
Number of clustering (lens) bins
n_tomo_lens : int
Number of lensing (source) bins
sampledim : int
Number of sample bins
filename : str
Filename of the plot
"""
obslist, obsbool, obslength, mult, gg, gm, mm, xipp, xipm, ximm = \
fct_args
ratio = len(covmatrix) / 140
if self.tex:
plt.rc('text', usetex=True)
#plt.rc('image', interpolation='none')
else:
plt.rc('text', usetex=False)
#plt.rc('image', interpolation='none')
fig, ax = plt.subplots(1, 1, figsize=(12,12))
corr_covmatrix = self.__correlation_matrix(covmatrix)
limit = max(-min(corr_covmatrix.flatten()), max(corr_covmatrix.flatten()))
cbar = ax.imshow(corr_covmatrix, cmap = 'seismic',
extent = [0, len(corr_covmatrix), 0, len(corr_covmatrix)],
vmin=-limit, vmax=limit)
fig.colorbar(cbar, location='bottom', shrink=.775, aspect=30, pad=0.055).ax.tick_params(axis='x', direction='in')
ax.text(len(covmatrix)/2, -6*ratio, 'Correlation coefficients', fontsize=16, ha='center', va='center')
labels_position = []
labels_position_y = []
labels_text = []
position = 0
old_position = 0
if gg:
if np.any(self.projected_clust):
proj_quant = self.projected_clust
sub_position_tomo = 0
for sub_tomo in range(int(n_tomo_clust*(n_tomo_clust + 1)/2)):
sub_position_sample = sub_position_tomo
sub_position_tomo += len(proj_quant)*sampledim
ax.axhline(y=len(covmatrix)-sub_position_tomo, color='black', linewidth=.3, ls = "--")
ax.axvline(x=sub_position_tomo, color='black', linewidth=.3, ls = "--")
for sub_sample in range(sampledim):
sub_position_sample += len(proj_quant)
ax.axhline(y=len(covmatrix)-sub_position_sample, color='black', linewidth=.15, ls = ":")
ax.axvline(x=sub_position_sample, color='black', linewidth=.15, ls = ":")
position += len(proj_quant)*sampledim*n_tomo_clust*(n_tomo_clust + 1)/2
old_position = position
labels_position.append(position/2)
labels_position_y.append(len(covmatrix) - position/2)
if obs_dict['observables']['est_clust'] == 'k_space':
labels_text.append(r'$P_\mathrm{gg}(k)$')
if obs_dict['observables']['est_clust'] == 'C_ell':
labels_text.append(r'$C_\mathrm{gg}(\ell)$')
if obs_dict['observables']['est_clust'] == 'w':
labels_text.append(r'$w(\theta)$')
if obs_dict['observables']['est_clust'] == 'cosebi':
labels_text.append(r'$\Psi^\mathrm{gg}_n$')
if obs_dict['observables']['est_clust'] == 'bandpowers':
labels_text.append(r'$\mathcal{C}_\mathrm{gg}(L)$')
ax.axhline(y=len(covmatrix)-position, color='black', linewidth=.5, ls = "-")
ax.axvline(x=position, color='black', linewidth=.5, ls = "-")
if gm:
if np.any(self.projected_clust):
proj_quant = self.projected_clust
sub_position_tomo = old_position
for sub_tomo in range(int(n_tomo_clust*n_tomo_lens)):
sub_position_sample = sub_position_tomo
sub_position_tomo += len(proj_quant)*sampledim
ax.axhline(y=len(covmatrix)-sub_position_tomo, color='black', linewidth=.3, ls = "--")
ax.axvline(x=sub_position_tomo, color='black', linewidth=.3, ls = "--")
for sub_sample in range(sampledim):
sub_position_sample += len(proj_quant)
ax.axhline(y=len(covmatrix)-sub_position_sample, color='black', linewidth=.15, ls = ":")
ax.axvline(x=sub_position_sample, color='black', linewidth=.15, ls = ":")
position += len(proj_quant)*sampledim*n_tomo_clust*n_tomo_lens
labels_position.append(old_position + (position- old_position)/2)
labels_position_y.append(len(covmatrix) - old_position - (position- old_position)/2)
old_position = position
if obs_dict['observables']['est_ggl'] == 'k_space':
labels_text.append(r'$P_\mathrm{gm}(k)$')
if obs_dict['observables']['est_ggl'] == 'C_ell':
labels_text.append(r'$C_\mathrm{gm}(\ell)$')
if obs_dict['observables']['est_ggl'] == 'gamma_t':
labels_text.append(r'$\gamma_\mathrm{t}(\theta)$')
if obs_dict['observables']['est_ggl'] == 'cosebi':
labels_text.append(r'$\Psi^\mathrm{gm}_n$')
if obs_dict['observables']['est_ggl'] == 'bandpowers':
labels_text.append(r'$\mathcal{C}_\mathrm{gm}(L)$')
ax.axhline(y=len(covmatrix)-position, color='black', linewidth=.5, ls = "-")
ax.axvline(x=position, color='black', linewidth=.5, ls = "-")
if mm:
if np.any(self.projected_lens):
proj_quant = self.projected_lens
sub_position_tomo = old_position
for sub_tomo in range(int(n_tomo_lens*(n_tomo_lens + 1)/2)):
sub_position_sample = sub_position_tomo
sub_position_tomo += len(proj_quant)
ax.axhline(y=len(covmatrix)-sub_position_tomo, color='black', linewidth=.3, ls = "--")
ax.axvline(x=sub_position_tomo, color='black', linewidth=.3, ls = "--")
position += len(proj_quant)*n_tomo_lens*(n_tomo_lens + 1)/2
labels_position.append(old_position + (position- old_position)/2)
labels_position_y.append(len(covmatrix) - old_position - (position- old_position)/2)
old_position = position
if obs_dict['observables']['est_shear'] == 'k_space':
labels_text.append(r'$P_\mathrm{mm}(k)$')
if obs_dict['observables']['est_shear'] == 'C_ell':
labels_text.append(r'$C_\mathrm{mm}(\ell)$')
if obs_dict['observables']['est_shear'] == 'xi_pm':
labels_text.append(r'$\xi_+(\theta)$')
labels_text.append(r'$\xi_-(\theta)$')
if obs_dict['observables']['est_shear'] == 'cosebi':
labels_text.append(r'$E_n$')
labels_text.append(r'$B_n$')
if obs_dict['observables']['est_shear'] == 'bandpowers':
labels_text.append(r'$\mathcal{C}_\mathrm{E}(L)$')
labels_text.append(r'$\mathcal{C}_\mathrm{B}(L)$')
ax.axhline(y=len(covmatrix)-position, color='black', linewidth=.5, ls = "-")
ax.axvline(x=position, color='black', linewidth=.5, ls = "-")
sub_position_tomo = old_position
if obs_dict['observables']['est_shear'] != 'C_ell':
for sub_tomo in range(int(n_tomo_lens*(n_tomo_lens + 1)/2)):
sub_position_sample = sub_position_tomo
sub_position_tomo += len(proj_quant)
ax.axhline(y=len(covmatrix)-sub_position_tomo, color='black', linewidth=.3, ls = "--")
ax.axvline(x=sub_position_tomo, color='black', linewidth=.3, ls = "--")
position += len(proj_quant)*n_tomo_lens*(n_tomo_lens + 1)/2
labels_position.append(old_position + (position- old_position)/2)
labels_position_y.append(len(covmatrix) - old_position - (position- old_position)/2)
old_position = position
ax.axhline(y=len(covmatrix)-position, color='black', linewidth=.5, ls = "-")
ax.axvline(x=position, color='black', linewidth=.5, ls = "-")
ax.xaxis.tick_top()
plt.yticks(labels_position_y, labels_text)
plt.xticks(labels_position, labels_text)
if filename is not None:
fig.savefig(filename, bbox_inches='tight', pad_inches=0.1)
print("Plotting correlation matrix")
def plot_corrcoeff_matrix_arbitrary(self,
obs_dict,
covmatrix,
cov_diag,
summary,
n_tomo_clust,
n_tomo_lens,
sampledim,
filename = None,
fct_args = None):
"""
Plots the Pearson correlation coefficient of the covariance matrix
to a file depending on the specifications in the config.ini.
Parameters
----------
cov_dict : dictionary
Specifies which terms of the covariance (Gaussian, non-Gaussian,
super-sample covariance) should be calculated. To be passed from
the read_input method of the Input class.
obs_dict : dictionary
with the following keys (To be passed from the read_input method
of the Input class.)
'observables' : dictionary
Specifies which observables (cosmic shear, galaxy-galaxy
lensing and/or clustering) should be calculated. Also,
indicates whether cross-terms are evaluated.
'ELLspace' : dictionary
Specifies the exact details of the projection to ell space.
The projection from wavefactor k to angular scale ell is
done first, followed by the projection to real space in this
class
'THETAspace' : dictionary
Specifies the exact details of the projection to real space,
e.g., theta_min/max and the number of theta bins to be
calculated.
'COSEBIs' : dictionary
Specifies the exact details of the projection to COSEBIs,
e.g. the number of modes to be calculated.
'bandpowers' : dictionary
Specifies the exact details of the projection to bandpowers,
e.g. the ell modes and their spacing.
covmatrix : 2d array
The full covariance matrix with all contributions
cov_diag : 2d array
The diagonal block part of the covariance matrix
n_tomo_clust : int
Number of clustering (lens) bins
n_tomo_lens : int
Number of lensing (source) bins
sampledim : int
Number of sample bins
filename : str
Filename of the plot
"""
obslist, obsbool, obslength, mult, gg, gm, mm, xipp, xipm, ximm = \
fct_args
ratio = len(covmatrix) / 140
if self.tex:
plt.rc('text', usetex=True)
#plt.rc('image', interpolation='none')
else:
plt.rc('text', usetex=False)
#plt.rc('image', interpolation='none')
fig, ax = plt.subplots(1, 1, figsize=(12,12))
corr_covmatrix = self.__correlation_matrix(covmatrix)
limit = max(-min(corr_covmatrix.flatten()), max(corr_covmatrix.flatten()))
cbar = ax.imshow(corr_covmatrix, cmap = 'seismic',
extent = [0, len(corr_covmatrix), 0, len(corr_covmatrix)],
vmin=-limit, vmax=limit)
fig.colorbar(cbar, location='bottom', shrink=.775, aspect=30, pad=0.055).ax.tick_params(axis='x', direction='in')
ax.text(len(covmatrix)/2, -6*ratio, 'Correlation coefficients', fontsize=16, ha='center', va='center')
labels_position = []
labels_position_y = []
labels_text = []
position = 0
old_position = 0
if gg:
gg_summary_length = []
gg_summary_length.append(int(summary['arb_number_first_summary_gg']))
if summary['number_summary_gg'] > 1:
gg_summary_length.append(int(len(summary['WL_gg']) - summary['arb_number_first_summary_gg']))
sub_position_tomo = 0
for i in range(summary['number_summary_gg']):
for sub_tomo in range(int(n_tomo_clust*(n_tomo_clust + 1)/2)):
sub_position_sample = sub_position_tomo
sub_position_tomo += gg_summary_length[i]*sampledim
ax.axhline(y=len(covmatrix)-sub_position_tomo, color='black', linewidth=.3, ls = "--")
ax.axvline(x=sub_position_tomo, color='black', linewidth=.3, ls = "--")
for sub_sample in range(sampledim):
sub_position_sample += gg_summary_length[i]
ax.axhline(y=len(covmatrix)-sub_position_sample, color='black', linewidth=.15, ls = ":")
ax.axvline(x=sub_position_sample, color='black', linewidth=.15, ls = ":")
position += gg_summary_length[i]*sampledim*n_tomo_clust*(n_tomo_clust + 1)/2
labels_position.append(position - gg_summary_length[i]*sampledim*n_tomo_clust*(n_tomo_clust + 1)/2/2)
old_position = position
labels_position_y.append(len(covmatrix) - (position - gg_summary_length[i]*sampledim*n_tomo_clust*(n_tomo_clust + 1)/2/2))
labels_text.append(r'$\mathcal{O}_{\mathrm{gg},p_'+str(i+1)+'}(L)$')
ax.axhline(y=len(covmatrix)-position, color='black', linewidth=.5, ls = "-")
ax.axvline(x=position, color='black', linewidth=.5, ls = "-")
if gm:
gm_summary_length = []
gm_summary_length.append(int(summary['arb_number_first_summary_gm']))
if summary['number_summary_gm'] > 1:
gm_summary_length.append(int(len(summary['WL_gm']) - summary['arb_number_first_summary_gm']))
sub_position_tomo = old_position
for i in range(summary['number_summary_gm']):
for sub_tomo in range(int(n_tomo_clust*n_tomo_lens)):
sub_position_sample = sub_position_tomo
sub_position_tomo += gm_summary_length[i]*sampledim
ax.axhline(y=len(covmatrix)-sub_position_tomo, color='black', linewidth=.3, ls = "--")
ax.axvline(x=sub_position_tomo, color='black', linewidth=.3, ls = "--")
for sub_sample in range(sampledim):
sub_position_sample += gm_summary_length[i]
ax.axhline(y=len(covmatrix)-sub_position_sample, color='black', linewidth=.15, ls = ":")
ax.axvline(x=sub_position_sample, color='black', linewidth=.15, ls = ":")
position += gm_summary_length[i]*sampledim*n_tomo_clust*n_tomo_lens
labels_position.append(position - gm_summary_length[i]*sampledim*n_tomo_clust*n_tomo_lens/2)
labels_position_y.append(len(covmatrix) - (position - gm_summary_length[i]*sampledim*n_tomo_clust*n_tomo_lens/2))
old_position = position
labels_text.append(r'$\mathcal{O}_{\mathrm{gm},p_'+str(i+1)+'}(L)$')
ax.axhline(y=len(covmatrix)-position, color='black', linewidth=.5, ls = "-")
ax.axvline(x=position, color='black', linewidth=.5, ls = "-")
if mm:
mm_summary_length = []
mm_summary_length.append(int(summary['arb_number_first_summary_mm']))
if summary['number_summary_mm'] > 1:
mm_summary_length.append(int(len(summary['WL_mmE']) - summary['arb_number_first_summary_mm']))
sub_position_tomo = old_position
for i in range(summary['number_summary_mm']):
for sub_tomo in range(int(n_tomo_lens*(n_tomo_lens + 1)/2)):
sub_position_sample = sub_position_tomo
sub_position_tomo += mm_summary_length[i]
ax.axhline(y=len(covmatrix)-sub_position_tomo, color='black', linewidth=.3, ls = "--")
ax.axvline(x=sub_position_tomo, color='black', linewidth=.3, ls = "--")
position += mm_summary_length[i]*n_tomo_lens*(n_tomo_lens + 1)/2
labels_position.append(position - mm_summary_length[i]*n_tomo_lens*(n_tomo_lens + 1)/2/2)
labels_position_y.append(len(covmatrix) - (position - mm_summary_length[i]*n_tomo_lens*(n_tomo_lens + 1)/2/2))
old_position = position
labels_text.append(r'$\mathcal{O}_{\mathrm{mmE},p_'+str(i+1)+'}(L)$')
ax.axhline(y=len(covmatrix)-position, color='black', linewidth=.5, ls = "-")
ax.axvline(x=position, color='black', linewidth=.5, ls = "-")
sub_position_tomo = old_position
for i in range(summary['number_summary_mm']):
for sub_tomo in range(int(n_tomo_lens*(n_tomo_lens + 1)/2)):
sub_position_sample = sub_position_tomo
sub_position_tomo += mm_summary_length[i]
ax.axhline(y=len(covmatrix)-sub_position_tomo, color='black', linewidth=.3, ls = "--")
ax.axvline(x=sub_position_tomo, color='black', linewidth=.3, ls = "--")
position += mm_summary_length[i]*n_tomo_lens*(n_tomo_lens + 1)/2
labels_text.append(r'$\mathcal{O}_{\mathrm{mmB},p_'+str(i+1)+'}(L)$')
labels_position.append(position - mm_summary_length[i]*n_tomo_lens*(n_tomo_lens + 1)/2/2)
labels_position_y.append(len(covmatrix) - (position - mm_summary_length[i]*n_tomo_lens*(n_tomo_lens + 1)/2/2))
old_position = position
ax.axhline(y=len(covmatrix)-position, color='black', linewidth=.5, ls = "-")
ax.axvline(x=position, color='black', linewidth=.5, ls = "-")
ax.xaxis.tick_top()
plt.yticks(labels_position_y, labels_text)
plt.xticks(labels_position, labels_text, rotation=90)
if filename is not None:
fig.savefig(filename, bbox_inches='tight', pad_inches=0.1)
print("Plotting correlation matrix")
def __write_cov_list(self,
cov_dict,
obs_dict,
n_tomo_clust,
n_tomo_lens,
sampledim,
proj_quant,
gauss,
nongauss,
ssc,
fct_args):
obslist, obsbool = fct_args
proj_quant_str = 'x1\tx2\t'
for observables in obs_dict['observables'].values():
if np.any(observables == 'C_ell') or np.any(observables == 'bandpowers'):
proj_quant_str = 'ell1\tell2\t'
if (obs_dict['observables']['est_shear'] == 'xi_pm' or \
obs_dict['observables']['est_ggl'] == 'gamma_t' or \
obs_dict['observables']['est_clust'] == 'w'):
proj_quant_str = 'theta1\ttheta2\t'
for observables in obs_dict['observables'].values():
if np.any(observables == 'k_space'):
proj_quant_str = 'log10k1\t\tlog10k2'
for observables in obs_dict['observables'].values():
if np.any(observables == 'cosebi'):
proj_quant_str = 'n1\t\tn2'
if not cov_dict['split_gauss']:
if n_tomo_clust is None and n_tomo_lens is None:
tomo_str = ''
ostr_format = '%s\t%.2e\t%.2e\t%i\t%i\t%.4e\t%.4e\t%.4e\t%.4e'
else:
tomo_str = 'tomoi\ttomoj\ttomok\ttomol\t'
ostr_format = '%s\t%.2e\t%.2e\t%i\t%i\t%i\t\t%i\t\t%i\t\t%i\t\t%.4e\t%.4e\t%.4e\t%.4e'
else:
if n_tomo_clust is None and n_tomo_lens is None:
tomo_str = ''
ostr_format = '%s\t%.2e\t%.2e\t%i\t%i\t%.4e\t%.4e\t%.4e\t%.4e\t%.4e\t%.4e'
else:
tomo_str = 'tomoi\ttomoj\ttomok\ttomol\t'
ostr_format = '%s\t%.2e\t%.2e\t%i\t%i\t%i\t\t%i\t\t%i\t\t%i\t\t%.4e\t%.4e\t%.4e\t%.4e\t%.4e\t%.4e'
olist = []
splitidx = 0
write_header = True
if obs_dict['ELLspace']['n_spec'] is not None and obs_dict['ELLspace']['n_spec'] != 0:
obs_copy = ['gggg_ssss', 'gggg_sssp', 'gggg_sspp', \
'gggg_spsp', 'gggg_ppsp', 'gggg_pppp', \
'gggm_sssm', 'gggm_sspm', 'gggm_spsm', \
'gggm_sppm', 'gggm_ppsm', 'gggm_pppm', \
'ggmm_ssmm', 'ggmm_spmm', 'ggmm_ppmm', \
'gmgm_smsm', 'gmgm_smpm', 'gmgm_pmsm', \
'gmgm_pmpm', 'mmgm_mmsm', 'mmgm_mmpm', \
'mmmm_mmmm']
obs_type = ['gggg', 'gggg', 'gggg', 'gggg', 'gggg', 'gggg',
'gggm', 'gggm', 'gggm', 'gggm', 'gggm', 'gggm',
'ggmm', 'ggmm', 'ggmm',
'gmgm', 'gmgm', 'gmgm', 'gmgm', 'gmgm',
'mmgm', 'mmgm',
'mmmm']
if not cov_dict['split_gauss']:
if write_header:
olist.append('#obs\t' +proj_quant_str+ '\t\ts1\ts2\t' +
tomo_str + 'cov\t\t\tcovg\t\tcovng\t\tcovssc')
write_header = False
for i_probe in range(22):
if not isinstance(gauss[i_probe], np.ndarray):
continue
if not isinstance(nongauss[i_probe], np.ndarray):
nongauss[i_probe] = np.zeros_like(gauss[i_probe])
if not isinstance(ssc[i_probe], np.ndarray):
ssc[i_probe] = np.zeros_like(gauss[i_probe])
r1 = gauss[i_probe].shape[0]
r2 = gauss[i_probe].shape[1]
tomo1 = gauss[i_probe].shape[4]
tomo2 = gauss[i_probe].shape[5]
tomo3 = gauss[i_probe].shape[6]
tomo4 = gauss[i_probe].shape[7]
sampledim1 = 1
sampledim2 = 1
for i_r1 in range(r1):
for i_r2 in range(r2):
p1 = proj_quant[i_r1]
p2 = proj_quant[i_r2]
if obs_type[i_probe] == 'gggg' or obs_type[i_probe] == 'mmmm' or obs_type[i_probe] == 'ggmm':
for t1 in range(tomo1):
for t2 in range(t1, tomo2):
for t3 in range(tomo3):
for t4 in range(t3, tomo4):
if obs_type[i_probe] == 'gggg':
sampledim1 = sampledim
sampledim2 = sampledim
if np.any(self.projected_clust):
p1 = self.projected_clust[i_r1]
p2 = self.projected_clust[i_r2]
if obs_type[i_probe] == 'mmmm':
sampledim1 = 1
sampledim2 = 1
if np.any(self.projected_lens):
p1 = self.projected_lens[i_r1]
p2 = self.projected_lens[i_r2]
if obs_type[i_probe] == 'ggmm':
sampledim1 = sampledim
sampledim2 = 1
if np.any(self.projected_clust):
p1 = self.projected_clust[i_r1]
if np.any(self.projected_lens):
p2 = self.projected_lens[i_r2]
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[i_probe][idxs] \
+ nongauss[i_probe][idxs] \
+ ssc[i_probe][idxs]
ostr = ostr_format \
% (obs_copy[i_probe], p1, p2,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[i_probe][idxs],
nongauss[i_probe][idxs],
ssc[i_probe][idxs])
olist.append(ostr)
if obs_type[i_probe] == 'gggm' or obs_type[i_probe] == 'mmgm':
for t1 in range(tomo1):
for t2 in range(t1, tomo2):
for t3 in range(tomo3):
for t4 in range(tomo4):
if obs_type[i_probe] == 'gggm':
sampledim1 = sampledim
sampledim2 = sampledim
if np.any(self.projected_clust):
p1 = self.projected_clust[i_r1]
p2 = self.projected_clust[i_r2]
if obs_type[i_probe] == 'mmgm':
sampledim1 = 1
sampledim2 = sampledim
if np.any(self.projected_lens):
p1 = self.projected_lens[i_r1]
if np.any(self.projected_clust):
p2 = self.projected_clust[i_r2]
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[i_probe][idxs] \
+ nongauss[i_probe][idxs] \
+ ssc[i_probe][idxs]
ostr = ostr_format \
% (obs_copy[i_probe], p1, p2,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[i_probe][idxs],
nongauss[i_probe][idxs],
ssc[i_probe][idxs])
olist.append(ostr)
if obs_type[i_probe] == 'gmgm':
sampledim1 = sampledim
sampledim2 = sampledim
for t1 in range(tomo1):
for t2 in range(tomo2):
for t3 in range(tomo3):
for t4 in range(tomo4):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
if np.any(self.projected_clust):
p1 = self.projected_clust[i_r1]
p2 = self.projected_clust[i_r2]
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[i_probe][idxs] \
+ nongauss[i_probe][idxs] \
+ ssc[i_probe][idxs]
ostr = ostr_format \
% (obs_copy[i_probe], p1, p2,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[i_probe][idxs],
nongauss[i_probe][idxs],
ssc[i_probe][idxs])
olist.append(ostr)
if 'terminal' in self.style:
print("Writing result to terminal. (Brace yourself...).'")
for ostr in olist:
print(ostr)
elif 'list' in self.style:
fn = self.filename[self.style.index('list')]
with open(fn, 'w') as file:
print("Writing '" + fn + "'.")
for ostr in olist:
file.write("%s\n" % ostr)
else:
if write_header:
olist.append('#obs\t' +proj_quant_str+ '\t\ts1\ts2\t' +
tomo_str + 'cov\t\t\tcovg sva\tcovg mix' +
'\tcovg sn\t\tcovng\t\tcovssc')
write_header = False
for i_probe in range(22):
if not isinstance(gauss[3*i_probe], np.ndarray):
continue
if not isinstance(gauss[3*i_probe + 1], np.ndarray):
gauss[3*i_probe + 1] = np.zeros_like(gauss[3*i_probe])
if not isinstance(gauss[3*i_probe + 2], np.ndarray):
gauss[3*i_probe + 2] = np.zeros_like(gauss[3*i_probe])
if not isinstance(nongauss[i_probe], np.ndarray):
nongauss[i_probe] = np.zeros_like(gauss[3*i_probe])
if not isinstance(ssc[i_probe], np.ndarray):
ssc[i_probe] = np.zeros_like(gauss[3*i_probe])
r1 = gauss[3*i_probe].shape[0]
r2 = gauss[3*i_probe].shape[1]
tomo1 = gauss[3*i_probe].shape[4]
tomo2 = gauss[3*i_probe].shape[5]
tomo3 = gauss[3*i_probe].shape[6]
tomo4 = gauss[3*i_probe].shape[7]
for i_r1 in range(r1):
for i_r2 in range(r2):
p1 = proj_quant[i_r1]
p2 = proj_quant[i_r2]
if obs_type[i_probe] == 'gggg' or obs_type[i_probe] == 'mmmm' or obs_type[i_probe] == 'ggmm':
for t1 in range(tomo1):
for t2 in range(t1, tomo2):
for t3 in range(tomo3):
for t4 in range(t3, tomo4):
if obs_type[i_probe] == 'gggg':
sampledim1 = sampledim
sampledim2 = sampledim
if np.any(self.projected_clust):
p1 = self.projected_clust[i_r1]
p2 = self.projected_clust[i_r2]
if obs_type[i_probe] == 'mmmm':
sampledim1 = 1
sampledim2 = 1
if np.any(self.projected_lens):
p1 = self.projected_lens[i_r1]
p2 = self.projected_lens[i_r2]
if obs_type[i_probe] == 'ggmm':
sampledim1 = sampledim
sampledim2 = 1
if np.any(self.projected_clust):
p1 = self.projected_clust[i_r1]
if np.any(self.projected_lens):
p2 = self.projected_lens[i_r2]
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
if isinstance(gauss[3*i_probe], int):
gauss_sva = 0.0
else:
gauss_sva = gauss[3*i_probe][idxs]
if isinstance(gauss[3*i_probe+1], int):
gauss_mix = 0.0
else:
gauss_mix = gauss[3*i_probe+1][idxs]
if isinstance(gauss[3*i_probe+2], int):
gauss_sn = 0.0
else:
gauss_sn = gauss[3*i_probe+2][idxs]
if isinstance(nongauss[i_probe], int):
nongauss_aux = 0.0
else:
nongauss_aux = nongauss[i_probe][idxs]
if isinstance(ssc[i_probe], int):
ssc_aux = 0.0
else:
ssc_aux = ssc[i_probe][idxs]
cov = gauss_sva \
+ gauss_mix \
+ gauss_sn \
+ nongauss_aux \
+ nongauss_aux
ostr = ostr_format \
% (obs_copy[i_probe], p1, p2,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss_sva,
gauss_mix,
gauss_sn,
nongauss_aux,
ssc_aux)
olist.append(ostr)
if obs_type[i_probe] == 'gggm' or obs_type[i_probe] == 'mmgm':
for t1 in range(tomo1):
for t2 in range(t1, tomo2):
for t3 in range(tomo3):
for t4 in range(tomo4):
if obs_type[i_probe] == 'gggm':
sampledim1 = sampledim
sampledim2 = sampledim
if np.any(self.projected_clust):
p1 = self.projected_clust[i_r1]
p2 = self.projected_clust[i_r2]
if obs_type[i_probe] == 'mmgm':
sampledim1 = 1
sampledim2 = sampledim
if np.any(self.projected_lens):
p1 = self.projected_lens[i_r1]
if np.any(self.projected_clust):
p2 = self.projected_clust[i_r2]
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
gauss_sva = gauss[3*i_probe]
if isinstance(gauss[3*i_probe], int):
gauss_sva = 0.0
else:
gauss_sva = gauss[3*i_probe][idxs]
if isinstance(gauss[3*i_probe+1], int):
gauss_mix = 0.0
else:
gauss_mix = gauss[3*i_probe+1][idxs]
if isinstance(gauss[3*i_probe+2], int):
gauss_sn = 0.0
else:
gauss_sn = gauss[3*i_probe+2][idxs]
if isinstance(nongauss[i_probe], int):
nongauss_aux = 0.0
else:
nongauss_aux = nongauss[i_probe][idxs]
if isinstance(ssc[i_probe], int):
ssc_aux = 0.0
else:
ssc_aux = ssc[i_probe][idxs]
cov = gauss_sva \
+ gauss_mix \
+ gauss_sn \
+ nongauss_aux \
+ nongauss_aux
ostr = ostr_format \
% (obs_copy[i_probe], p1, p2,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss_sva,
gauss_mix,
gauss_sn,
nongauss_aux,
ssc_aux)
olist.append(ostr)
if obs_type[i_probe] == 'gmgm':
sampledim1 = sampledim
sampledim2 = sampledim
if np.any(self.projected_clust):
p1 = self.projected_clust[i_r1]
p2 = self.projected_clust[i_r2]
for t1 in range(tomo1):
for t2 in range(tomo2):
for t3 in range(tomo3):
for t4 in range(tomo4):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
gauss_sva = gauss[3*i_probe]
if isinstance(gauss[3*i_probe], int):
gauss_sva = 0.0
else:
gauss_sva = gauss[3*i_probe][idxs]
if isinstance(gauss[3*i_probe+1], int):
gauss_mix = 0.0
else:
gauss_mix = gauss[3*i_probe+1][idxs]
if isinstance(gauss[3*i_probe+2], int):
gauss_sn = 0.0
else:
gauss_sn = gauss[3*i_probe+2][idxs]
if isinstance(nongauss[i_probe], int):
nongauss_aux = 0.0
else:
nongauss_aux = nongauss[i_probe][idxs]
if isinstance(ssc[i_probe], int):
ssc_aux = 0.0
else:
ssc_aux = ssc[i_probe][idxs]
cov = gauss_sva \
+ gauss_mix \
+ gauss_sn \
+ nongauss_aux \
+ nongauss_aux
ostr = ostr_format \
% (obs_copy[i_probe], p1, p2,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss_sva,
gauss_mix,
gauss_sn,
nongauss_aux,
ssc_aux)
olist.append(ostr)
if not self.save_as_binary:
if 'terminal' in self.style:
print("Writing result to terminal. (Brace yourself...).'")
for ostr in olist:
print(ostr)
elif 'list' in self.style:
fn = self.filename[self.style.index('list')]
with open(fn, 'w') as file:
print("Writing '" + fn + "'.")
for ostr in olist:
file.write("%s\n" % ostr)
else:
olist = []
splitidx = 0
write_header = True
for oidx, obs in enumerate(obslist):
obs_copy = np.copy(obs)
if obs == 'xipxip' and obs_dict['observables']['est_shear'] == 'bandpowers' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'CE_mmCE_mm'
if obs == 'xipxim' and obs_dict['observables']['est_shear'] == 'bandpowers' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'CE_mmCB_mm'
if obs == 'ximxim' and obs_dict['observables']['est_shear'] == 'bandpowers' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'CB_mmCB_mm'
if obs == 'gggg' and obs_dict['observables']['est_clust'] == 'bandpowers' and obs_dict['observables']['clustering'] == True:
obs_copy = 'CE_ggCE_gg'
if obs == 'gmgm' and obs_dict['observables']['est_ggl'] == 'bandpowers' and obs_dict['observables']['ggl'] == True:
obs_copy = 'CE_gmCE_gm'
if obs == 'gggm' and obs_dict['observables']['est_ggl'] == 'bandpowers' and obs_dict['observables']['est_clust'] == 'bandpowers' and obs_dict['observables']['ggl'] == True:
obs_copy = 'CE_ggCE_gm'
if obs == 'ggxip' and obs_dict['observables']['est_clust'] == 'bandpowers' and obs_dict['observables']['est_shear'] == 'bandpowers' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'CE_ggCE_mm'
if obs == 'ggxim' and obs_dict['observables']['est_clust'] == 'bandpowers' and obs_dict['observables']['est_shear'] == 'bandpowers' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'CE_ggCB_mm'
if obs == 'gmxip' and obs_dict['observables']['est_ggl'] == 'bandpowers' and obs_dict['observables']['est_shear'] == 'bandpowers' and obs_dict['observables']['ggl'] == True:
obs_copy = 'CE_gmCE_mm'
if obs == 'gmxim' and obs_dict['observables']['est_ggl'] == 'bandpowers' and obs_dict['observables']['est_shear'] == 'bandpowers' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'CE_gmCB_mm'
if obs == 'xipxip' and obs_dict['observables']['est_shear'] == 'cosebi' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'EmmEmm'
if obs == 'xipxim' and obs_dict['observables']['est_shear'] == 'cosebi' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'EmmBmm'
if obs == 'ximxim' and obs_dict['observables']['est_shear'] == 'cosebi' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'BmmBmm'
if obs == 'gggg' and obs_dict['observables']['est_clust'] == 'cosebi' and obs_dict['observables']['clustering'] == True:
obs_copy = 'PsiggPsigg'
if obs == 'gmgm' and obs_dict['observables']['est_ggl'] == 'cosebi' and obs_dict['observables']['ggl'] == True:
obs_copy = 'PsigmPsigm'
if obs == 'gggm' and obs_dict['observables']['est_ggl'] == 'cosebi' and obs_dict['observables']['est_clust'] == 'cosebi' and obs_dict['observables']['ggl'] == True:
obs_copy = 'PsiggPsigm'
if obs == 'ggxip' and obs_dict['observables']['est_clust'] == 'cosebi' and obs_dict['observables']['est_shear'] == 'cosebi' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'PsiggEmm'
if obs == 'ggxim' and obs_dict['observables']['est_clust'] == 'cosebi' and obs_dict['observables']['est_shear'] == 'cosebi' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'PsiggBmm'
if obs == 'gmxip' and obs_dict['observables']['est_ggl'] == 'cosebi' and obs_dict['observables']['est_shear'] == 'cosebi' and obs_dict['observables']['ggl'] == True:
obs_copy = 'PsigmEmm'
if obs == 'gmxim' and obs_dict['observables']['est_ggl'] == 'cosebi' and obs_dict['observables']['est_shear'] == 'cosebi' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'PsigmBmm'
if not obsbool[oidx]:
splitidx += 3
continue
sampledim1 = sampledim
sampledim2 = sampledim
if not cov_dict['split_gauss']:
if write_header:
olist.append('#obs\t' +proj_quant_str+ '\t\ts1\ts2\t' +
tomo_str + 'cov\t\t\tcovg\t\tcovng\t\tcovssc')
write_header = False
if not isinstance(gauss[oidx], np.ndarray):
continue
if not isinstance(nongauss[oidx], np.ndarray):
nongauss[oidx] = np.zeros_like(gauss[oidx])
if not isinstance(ssc[oidx], np.ndarray):
ssc[oidx] = np.zeros_like(gauss[oidx])
r1 = gauss[oidx].shape[0]
r2 = gauss[oidx].shape[1]
for i_r1 in range(r1):
for i_r2 in range(r2):
ri = proj_quant[i_r1]
rj = proj_quant[i_r2]
if obs in ['gggg', 'mmmm', 'xipxip', 'xipxim', 'ximxim']:
tomo1 = gauss[oidx].shape[4]
if obs == 'gggg':
sampledim1 = sampledim
sampledim2 = sampledim
if np.any(self.projected_clust):
ri = self.projected_clust[i_r1]
rj = self.projected_clust[i_r2]
if obs in ['mmmm', 'xipxip', 'xipxim', 'ximxim']:
sampledim1 = 1
sampledim2 = 1
if np.any(self.projected_lens):
ri = self.projected_lens[i_r1]
rj = self.projected_lens[i_r2]
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo1):
for t4 in range(t3, tomo1):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[oidx][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[oidx][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
elif obs == 'gmgm':
sampledim1 = sampledim
sampledim2 = sampledim
tomo1 = gauss[oidx].shape[4]
tomo2 = gauss[oidx].shape[5]
if np.any(self.projected_clust):
ri = self.projected_clust[i_r1]
rj = self.projected_clust[i_r2]
for t1 in range(tomo1):
for t2 in range(tomo2):
for t3 in range(tomo1):
for t4 in range(tomo2):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[oidx][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1+1, i_s2+1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[oidx][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
elif obs in ['gggm', 'mmgm', 'gmxip', 'gmxim']:
tomo1 = gauss[oidx].shape[4]
tomo3 = gauss[oidx].shape[6]
tomo4 = gauss[oidx].shape[7]
if obs == 'gggm':
sampledim1 = sampledim
sampledim2 = sampledim
if np.any(self.projected_clust):
ri = self.projected_clust[i_r1]
rj = self.projected_clust[i_r2]
if obs in ['mmgm', 'gmxip', 'gmxim']:
sampledim1 = 1
sampledim2 = sampledim
if np.any(self.projected_clust):
rj = self.projected_clust[i_r2]
if np.any(self.projected_lens):
ri = self.projected_lens[i_r1]
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo3):
for t4 in range(tomo4):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[oidx][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[oidx][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
elif obs in ['ggmm', 'ggxip', 'ggxim']:
tomo1 = gauss[oidx].shape[4]
tomo2 = gauss[oidx].shape[6]
sampledim1 = sampledim
sampledim2 = 1
if np.any(self.projected_lens):
rj = self.projected_lens[i_r2]
if np.any(self.projected_clust):
ri = self.projected_clust[i_r1]
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo2):
for t4 in range(t3, tomo2):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[oidx][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1+1, i_s2+1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[oidx][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
else:
if write_header:
olist.append('#obs\t' +proj_quant_str+ '\t\ts1\ts2\t' +
tomo_str + 'cov\t\t\tcovg sva\tcovg mix' +
'\tcovg sn\t\tcovng\t\tcovssc')
write_header = False
if not isinstance(gauss[3*oidx], np.ndarray):
continue
if not isinstance(gauss[3*oidx + 1], np.ndarray):
gauss[3*oidx + 1] = np.zeros_like(gauss[3*oidx])
if not isinstance(gauss[3*oidx + 2], np.ndarray):
gauss[3*oidx + 2] = np.zeros_like(gauss[3*oidx])
if not isinstance(nongauss[oidx], np.ndarray):
nongauss[oidx] = np.zeros_like(gauss[3*oidx])
if not isinstance(ssc[oidx], np.ndarray):
ssc[oidx] = np.zeros_like(gauss[3*oidx])
r1 = gauss[3*oidx].shape[0]
r2 = gauss[3*oidx].shape[1]
for i_r1 in range(r1):
for i_r2 in range(r2):
ri = proj_quant[i_r1]
rj = proj_quant[i_r2]
if obs in ['gggg', 'mmmm', 'xipxip', 'xipxim', 'ximxim']:
tomo1 = gauss[splitidx].shape[4]
if obs == 'gggg':
sampledim1 = sampledim
sampledim2 = sampledim
if np.any(self.projected_clust):
rj = self.projected_clust[i_r2]
ri = self.projected_clust[i_r1]
if obs in ['mmmm', 'xipxip', 'xipxim', 'ximxim']:
sampledim1 = 1
sampledim2 = 1
if np.any(self.projected_lens):
rj = self.projected_lens[i_r2]
ri = self.projected_lens[i_r1]
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo1):
for t4 in range(t3, tomo1):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[splitidx][idxs] \
+ gauss[splitidx+1][idxs] \
+ gauss[splitidx+2][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1+1, i_s2+1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[splitidx][idxs],
gauss[splitidx+1][idxs],
gauss[splitidx+2][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
elif obs == 'gmgm':
sampledim1 = sampledim
sampledim2 = sampledim
if np.any(self.projected_clust):
rj = self.projected_clust[i_r2]
ri = self.projected_clust[i_r1]
tomo1 = gauss[splitidx].shape[4]
tomo2 = gauss[splitidx].shape[5]
for t1 in range(tomo1):
for t2 in range(tomo2):
for t3 in range(tomo1):
for t4 in range(tomo2):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[splitidx][idxs] \
+ gauss[splitidx+1][idxs] \
+ gauss[splitidx+2][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1+1, i_s2+1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[splitidx][idxs],
gauss[splitidx+1][idxs],
gauss[splitidx+2][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
elif obs in ['gggm', 'mmgm', 'gmxip', 'gmxim']:
tomo1 = gauss[splitidx].shape[4]
tomo3 = gauss[splitidx].shape[6]
tomo4 = gauss[splitidx].shape[7]
if obs == 'gggm':
sampledim1 = sampledim
sampledim2 = sampledim
if np.any(self.projected_clust):
rj = self.projected_clust[i_r2]
ri = self.projected_clust[i_r1]
if obs in ['mmgm', 'gmxip', 'gmxim']:
sampledim1 = 1
sampledim2 = sampledim
if np.any(self.projected_clust):
rj = self.projected_clust[i_r2]
if np.any(self.projected_lens):
ri = self.projected_lens[i_r1]
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo3):
for t4 in range(tomo4):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[splitidx][idxs] \
+ gauss[splitidx+1][idxs] \
+ gauss[splitidx+2][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1+1, i_s2+1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[splitidx][idxs],
gauss[splitidx+1][idxs],
gauss[splitidx+2][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
elif obs in ['ggmm', 'ggxip', 'ggxim']:
tomo1 = gauss[splitidx].shape[4]
tomo2 = gauss[splitidx].shape[6]
sampledim1 = sampledim
sampledim2 = 1
if np.any(self.projected_lens):
rj = self.projected_lens[i_r2]
if np.any(self.projected_clust):
ri = self.projected_clust[i_r1]
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo2):
for t4 in range(t3, tomo2):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t1, t3, t4)
cov = gauss[splitidx][idxs] \
+ gauss[splitidx+1][idxs] \
+ gauss[splitidx+2][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1+1, i_s2+1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[splitidx][idxs],
gauss[splitidx+1][idxs],
gauss[splitidx+2][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
splitidx += 3
if not self.save_as_binary:
if 'terminal' in self.style:
print("Writing result to terminal. (Brace yourself...).'")
for ostr in olist:
print(ostr)
elif 'list' in self.style:
fn = self.filename[self.style.index('list')]
with open(fn, 'w') as file:
print("Writing '" + fn + "'.")
for ostr in olist:
file.write("%s\n" % ostr)
return True
def __write_cov_list_cosmosis_style(self,
cov_dict,
obs_dict,
n_tomo_clust,
n_tomo_lens,
sampledim,
proj_quant,
gauss,
nongauss,
ssc,
fct_args):
obslist, obsbool = fct_args
proj_quant_str = 'x1\tx2\t'
for observables in obs_dict['observables'].values():
if np.any(observables == 'C_ell') or np.any(observables == 'bandpowers'):
proj_quant_str = 'ell1\tell2\t'
if (obs_dict['observables']['est_shear'] == 'xi_pm' or \
obs_dict['observables']['est_ggl'] == 'gamma_t' or \
obs_dict['observables']['est_clust'] == 'w'):
proj_quant_str = 'theta1\ttheta2\t'
for observables in obs_dict['observables'].values():
if np.any(observables == 'k_space'):
proj_quant_str = 'log10k1\t\tlog10k2'
for observables in obs_dict['observables'].values():
if np.any(observables == 'cosebi'):
proj_quant_str = 'n1\t\tn2'
if not cov_dict['split_gauss']:
if n_tomo_clust is None and n_tomo_lens is None:
tomo_str = ''
ostr_format = '%s\t%.2e\t%.2e\t%i\t%i\t%.4e\t%.4e\t%.4e\t%.4e'
else:
tomo_str = 'tomoi\ttomoj\ttomok\ttomol\t'
ostr_format = '%s\t%.2e\t%.2e\t%i\t%i\t%i\t\t%i\t\t%i\t\t%i\t\t%.4e\t%.4e\t%.4e\t%.4e'
else:
if n_tomo_clust is None and n_tomo_lens is None:
tomo_str = ''
ostr_format = '%s\t%.2e\t%.2e\t%i\t%i\t%.4e\t%.4e\t%.4e\t%.4e\t%.4e\t%.4e'
else:
tomo_str = 'tomoi\ttomoj\ttomok\ttomol\t'
ostr_format = '%s\t%.2e\t%.2e\t%i\t%i\t%i\t\t%i\t\t%i\t\t%i\t\t%.4e\t%.4e\t%.4e\t%.4e\t%.4e\t%.4e'
idxlist = self.__get_idxlist(proj_quant, sampledim)
olist = []
splitidx = 0
write_header = True
if obs_dict['ELLspace']['n_spec'] is not None and obs_dict['ELLspace']['n_spec'] != 0:
obs_copy = ['gggg_ssss', 'gggg_sssp', 'gggg_sspp', \
'gggg_spsp', 'gggg_ppsp', 'gggg_pppp', \
'gggm_sssm', 'gggm_sspm', 'gggm_spsm', \
'gggm_sppm', 'gggm_ppsm', 'gggm_pppm', \
'ggmm_ssmm', 'ggmm_spmm', 'ggmm_ppmm', \
'gmgm_smsm', 'gmgm_smpm', 'gmgm_pmsm', \
'gmgm_pmpm', 'mmgm_mmsm', 'mmgm_mmpm', \
'mmmm_mmmm']
obs_type = ['gggg', 'gggg', 'gggg', 'gggg', 'gggg', 'gggg',
'gggm', 'gggm', 'gggm', 'gggm', 'gggm', 'gggm',
'ggmm', 'ggmm', 'ggmm',
'gmgm', 'gmgm', 'gmgm', 'gmgm', 'gmgm',
'mmgm', 'mmgm',
'mmmm']
if not cov_dict['split_gauss']:
if write_header:
olist.append('#obs\t' +proj_quant_str+ '\t\ts1\ts2\t' +
tomo_str + 'cov\t\t\tcovg\t\tcovng\t\tcovssc')
write_header = False
for i_probe in range(22):
if not isinstance(gauss[i_probe], np.ndarray):
continue
r1 = gauss[i_probe].shape[0]
r2 = gauss[i_probe].shape[1]
tomo1 = gauss[i_probe].shape[4]
tomo2 = gauss[i_probe].shape[5]
tomo3 = gauss[i_probe].shape[6]
tomo4 = gauss[i_probe].shape[7]
sampledim1 = 1
sampledim2 = 1
if obs_type[i_probe] == 'gggg' or obs_type[i_probe] == 'mmmm' or obs_type[i_probe] == 'ggmm':
for t1 in range(tomo1):
for t2 in range(t1, tomo2):
for t3 in range(tomo3):
for t4 in range(t3, tomo4):
for i_r1 in range(r1):
for i_r2 in range(r2):
p1 = proj_quant[i_r1]
p2 = proj_quant[i_r2]
if obs_type[i_probe] == 'gggg':
sampledim1 = sampledim
sampledim2 = sampledim
if np.any(self.projected_clust):
p1 = self.projected_clust[i_r1]
p2 = self.projected_clust[i_r2]
if obs_type[i_probe] == 'mmmm':
sampledim1 = 1
sampledim2 = 1
if np.any(self.projected_lens):
p1 = self.projected_lens[i_r1]
p2 = self.projected_lens[i_r2]
if obs_type[i_probe] == 'ggmm':
sampledim1 = sampledim
sampledim2 = 1
if np.any(self.projected_clust):
p1 = self.projected_clust[i_r1]
if np.any(self.projected_lens):
p1 = self.projected_lens[i_r1]
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[i_probe][idxs] \
+ nongauss[i_probe][idxs] \
+ ssc[i_probe][idxs]
ostr = ostr_format \
% (obs_copy[i_probe], p1, p2,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[i_probe][idxs],
nongauss[i_probe][idxs],
ssc[i_probe][idxs])
olist.append(ostr)
if obs_type[i_probe] == 'gggm' or obs_type[i_probe] == 'mmgm':
for t1 in range(tomo1):
for t2 in range(t1, tomo2):
for t3 in range(tomo3):
for t4 in range(tomo4):
for i_r1 in range(r1):
for i_r2 in range(r2):
p1 = proj_quant[i_r1]
p2 = proj_quant[i_r2]
if obs_type[i_probe] == 'gggm':
sampledim1 = sampledim
sampledim2 = sampledim
if np.any(self.projected_clust):
p1 = self.projected_clust[i_r1]
p2 = self.projected_clust[i_r2]
if obs_type[i_probe] == 'mmgm':
sampledim1 = 1
sampledim2 = sampledim
if np.any(self.projected_lens):
p1 = self.projected_lens[i_r1]
if np.any(self.projected_clust):
p1 = self.projected_clust[i_r1]
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[i_probe][idxs] \
+ nongauss[i_probe][idxs] \
+ ssc[i_probe][idxs]
ostr = ostr_format \
% (obs_copy[i_probe], p1, p2,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[i_probe][idxs],
nongauss[i_probe][idxs],
ssc[i_probe][idxs])
olist.append(ostr)
if obs_type[i_probe] == 'gmgm':
sampledim1 = sampledim
sampledim2 = sampledim
for t1 in range(tomo1):
for t2 in range(tomo2):
for t3 in range(tomo3):
for t4 in range(tomo4):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
for i_r1 in range(r1):
for i_r2 in range(r2):
p1 = proj_quant[i_r1]
p2 = proj_quant[i_r2]
if np.any(self.projected_clust):
p1 = self.projected_clust[i_r1]
p2 = self.projected_clust[i_r2]
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[i_probe][idxs] \
+ nongauss[i_probe][idxs] \
+ ssc[i_probe][idxs]
ostr = ostr_format \
% (obs_copy[i_probe], p1, p2,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[i_probe][idxs],
nongauss[i_probe][idxs],
ssc[i_probe][idxs])
olist.append(ostr)
if not self.save_as_binary:
if 'terminal' in self.style:
print("Writing result to terminal. (Brace yourself...).'")
for ostr in olist:
print(ostr)
elif 'list' in self.style:
fn = self.filename[self.style.index('list')]
with open(fn, 'w') as file:
print("Writing '" + fn + "'.")
for ostr in olist:
file.write("%s\n" % ostr)
else:
if write_header:
olist.append('#obs\t' +proj_quant_str+ '\t\ts1\ts2\t' +
tomo_str + 'cov\t\t\tcovg sva\tcovg mix' +
'\tcovg sn\t\tcovng\t\tcovssc')
write_header = False
for i_probe in range(22):
if not isinstance(gauss[3*i_probe], np.ndarray):
continue
r1 = gauss[3*i_probe].shape[0]
r2 = gauss[3*i_probe].shape[1]
tomo1 = gauss[3*i_probe].shape[4]
tomo2 = gauss[3*i_probe].shape[5]
tomo3 = gauss[3*i_probe].shape[6]
tomo4 = gauss[3*i_probe].shape[7]
if obs_type[i_probe] == 'gggg' or obs_type[i_probe] == 'mmmm' or obs_type[i_probe] == 'ggmm':
for t1 in range(tomo1):
for t2 in range(t1, tomo2):
for t3 in range(tomo3):
for t4 in range(t3, tomo4):
if obs_type[i_probe] == 'gggg':
sampledim1 = sampledim
sampledim2 = sampledim
if obs_type[i_probe] == 'mmmm':
sampledim1 = 1
sampledim2 = 1
if obs_type[i_probe] == 'ggmm':
sampledim1 = sampledim
sampledim2 = 1
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
for i_r1 in range(r1):
for i_r2 in range(r2):
p1 = proj_quant[i_r1]
p2 = proj_quant[i_r2]
if obs_type[i_probe] == 'gggg':
if np.any(self.projected_clust):
p1 = self.projected_clust[i_r1]
p2 = self.projected_clust[i_r2]
if obs_type[i_probe] == 'mmmm':
if np.any(self.projected_lens):
p1 = self.projected_lens[i_r1]
p2 = self.projected_lens[i_r2]
if obs_type[i_probe] == 'ggmm':
if np.any(self.projected_clust):
p1 = self.projected_clust[i_r1]
if np.any(self.projected_lens):
p1 = self.projected_lens[i_r1]
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
if isinstance(gauss[3*i_probe], int):
gauss_sva = 0.0
else:
gauss_sva = gauss[3*i_probe][idxs]
if isinstance(gauss[3*i_probe+1], int):
gauss_mix = 0.0
else:
gauss_mix = gauss[3*i_probe+1][idxs]
if isinstance(gauss[3*i_probe+2], int):
gauss_sn = 0.0
else:
gauss_sn = gauss[3*i_probe+2][idxs]
if isinstance(nongauss[i_probe], int):
nongauss_aux = 0.0
else:
nongauss_aux = nongauss[i_probe][idxs]
if isinstance(ssc[i_probe], int):
ssc_aux = 0.0
else:
ssc_aux = ssc[i_probe][idxs]
cov = gauss_sva \
+ gauss_mix \
+ gauss_sn \
+ nongauss_aux \
+ nongauss_aux
ostr = ostr_format \
% (obs_copy[i_probe], p1, p2,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss_sva,
gauss_mix,
gauss_sn,
nongauss_aux,
ssc_aux)
olist.append(ostr)
if obs_type[i_probe] == 'gggm' or obs_type[i_probe] == 'mmgm':
for t1 in range(tomo1):
for t2 in range(t1, tomo2):
for t3 in range(tomo3):
for t4 in range(tomo4):
if obs_type[i_probe] == 'gggm':
sampledim1 = sampledim
sampledim2 = sampledim
if obs_type[i_probe] == 'mmgm':
sampledim1 = 1
sampledim2 = sampledim
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
for i_r1 in range(r1):
for i_r2 in range(r2):
p1 = proj_quant[i_r1]
p2 = proj_quant[i_r2]
if obs_type[i_probe] == 'gggm':
if np.any(self.projected_clust):
p1 = self.projected_clust[i_r1]
p2 = self.projected_clust[i_r2]
if obs_type[i_probe] == 'mmgm':
if np.any(self.projected_lens):
p1 = self.projected_lens[i_r1]
if np.any(self.projected_clust):
p2 = self.projected_clust[i_r2]
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
gauss_sva = gauss[3*i_probe]
if isinstance(gauss[3*i_probe], int):
gauss_sva = 0.0
else:
gauss_sva = gauss[3*i_probe][idxs]
if isinstance(gauss[3*i_probe+1], int):
gauss_mix = 0.0
else:
gauss_mix = gauss[3*i_probe+1][idxs]
if isinstance(gauss[3*i_probe+2], int):
gauss_sn = 0.0
else:
gauss_sn = gauss[3*i_probe+2][idxs]
if isinstance(nongauss[i_probe], int):
nongauss_aux = 0.0
else:
nongauss_aux = nongauss[i_probe][idxs]
if isinstance(ssc[i_probe], int):
ssc_aux = 0.0
else:
ssc_aux = ssc[i_probe][idxs]
cov = gauss_sva \
+ gauss_mix \
+ gauss_sn \
+ nongauss_aux \
+ nongauss_aux
ostr = ostr_format \
% (obs_copy[i_probe], p1, p2,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss_sva,
gauss_mix,
gauss_sn,
nongauss_aux,
ssc_aux)
olist.append(ostr)
if obs_type[i_probe] == 'gmgm':
sampledim1 = sampledim
sampledim2 = sampledim
for t1 in range(tomo1):
for t2 in range(tomo2):
for t3 in range(tomo3):
for t4 in range(tomo4):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
for i_r1 in range(r1):
for i_r2 in range(r2):
p1 = proj_quant[i_r1]
p2 = proj_quant[i_r2]
if np.any(self.projected_clust):
p1 = self.projected_clust[i_r1]
p2 = self.projected_clust[i_r2]
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
gauss_sva = gauss[3*i_probe]
if isinstance(gauss[3*i_probe], int):
gauss_sva = 0.0
else:
gauss_sva = gauss[3*i_probe][idxs]
if isinstance(gauss[3*i_probe+1], int):
gauss_mix = 0.0
else:
gauss_mix = gauss[3*i_probe+1][idxs]
if isinstance(gauss[3*i_probe+2], int):
gauss_sn = 0.0
else:
gauss_sn = gauss[3*i_probe+2][idxs]
if isinstance(nongauss[i_probe], int):
nongauss_aux = 0.0
else:
nongauss_aux = nongauss[i_probe][idxs]
if isinstance(ssc[i_probe], int):
ssc_aux = 0.0
else:
ssc_aux = ssc[i_probe][idxs]
cov = gauss_sva \
+ gauss_mix \
+ gauss_sn \
+ nongauss_aux \
+ nongauss_aux
ostr = ostr_format \
% (obs_copy[i_probe], p1, p2,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss_sva,
gauss_mix,
gauss_sn,
nongauss_aux,
ssc_aux)
olist.append(ostr)
if not self.save_as_binary:
if 'terminal' in self.style:
print("Writing result to terminal. (Brace yourself...).'")
for ostr in olist:
print(ostr)
elif 'list' in self.style:
fn = self.filename[self.style.index('list')]
with open(fn, 'w') as file:
print("Writing '" + fn + "'.")
for ostr in olist:
file.write("%s\n" % ostr)
else:
idxlist = self.__get_idxlist(proj_quant, sampledim)
olist = []
splitidx = 0
write_header = True
for oidx, obs in enumerate(obslist):
i_probe = oidx
obs_copy = obs
if obs == 'xipxip' and obs_dict['observables']['est_shear'] == 'bandpowers' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'CE_mmCE_mm'
if obs == 'xipxim' and obs_dict['observables']['est_shear'] == 'bandpowers' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'CE_mmCB_mm'
if obs == 'ximxim' and obs_dict['observables']['est_shear'] == 'bandpowers' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'CB_mmCB_mm'
if obs == 'gggg' and obs_dict['observables']['est_clust'] == 'bandpowers' and obs_dict['observables']['clustering'] == True:
obs_copy = 'CE_ggCE_gg'
if obs == 'gmgm' and obs_dict['observables']['est_ggl'] == 'bandpowers' and obs_dict['observables']['ggl'] == True:
obs_copy = 'CE_gmCE_gm'
if obs == 'gggm' and obs_dict['observables']['est_ggl'] == 'bandpowers' and obs_dict['observables']['est_clust'] == 'bandpowers' and obs_dict['observables']['ggl'] == True:
obs_copy = 'CE_ggCE_gm'
if obs == 'ggxip' and obs_dict['observables']['est_clust'] == 'bandpowers' and obs_dict['observables']['est_shear'] == 'bandpowers' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'CE_ggCE_mm'
if obs == 'ggxim' and obs_dict['observables']['est_clust'] == 'bandpowers' and obs_dict['observables']['est_shear'] == 'bandpowers' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'CE_ggCB_mm'
if obs == 'gmxip' and obs_dict['observables']['est_ggl'] == 'bandpowers' and obs_dict['observables']['est_shear'] == 'bandpowers' and obs_dict['observables']['ggl'] == True:
obs_copy = 'CE_gmCE_mm'
if obs == 'gmxim' and obs_dict['observables']['est_ggl'] == 'bandpowers' and obs_dict['observables']['est_shear'] == 'bandpowers' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'CE_gmCB_mm'
if obs == 'xipxip' and obs_dict['observables']['est_shear'] == 'cosebi' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'EmmEmm'
if obs == 'xipxim' and obs_dict['observables']['est_shear'] == 'cosebi' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'EmmBmm'
if obs == 'ximxim' and obs_dict['observables']['est_shear'] == 'cosebi' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'BmmBmm'
if obs == 'gggg' and obs_dict['observables']['est_clust'] == 'cosebi' and obs_dict['observables']['clustering'] == True:
obs_copy = 'PsiggPsigg'
if obs == 'gmgm' and obs_dict['observables']['est_ggl'] == 'cosebi' and obs_dict['observables']['ggl'] == True:
obs_copy = 'PsigmPsigm'
if obs == 'gggm' and obs_dict['observables']['est_ggl'] == 'cosebi' and obs_dict['observables']['est_clust'] == 'cosebi' and obs_dict['observables']['ggl'] == True:
obs_copy = 'PsiggPsigm'
if obs == 'ggxip' and obs_dict['observables']['est_clust'] == 'cosebi' and obs_dict['observables']['est_shear'] == 'cosebi' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'PsiggEmm'
if obs == 'ggxim' and obs_dict['observables']['est_clust'] == 'cosebi' and obs_dict['observables']['est_shear'] == 'cosebi' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'PsiggBmm'
if obs == 'gmxip' and obs_dict['observables']['est_ggl'] == 'cosebi' and obs_dict['observables']['est_shear'] == 'cosebi' and obs_dict['observables']['ggl'] == True:
obs_copy = 'PsigmEmm'
if obs == 'gmxim' and obs_dict['observables']['est_ggl'] == 'cosebi' and obs_dict['observables']['est_shear'] == 'cosebi' and obs_dict['observables']['cosmic_shear'] == True:
obs_copy = 'PsigmBmm'
if not obsbool[oidx]:
splitidx += 3
continue
sampledim1 = sampledim
sampledim2 = sampledim
if not cov_dict['split_gauss']:
if not isinstance(gauss[oidx], np.ndarray):
continue
r1 = gauss[oidx].shape[0]
r2 = gauss[oidx].shape[1]
if write_header:
olist.append('#obs\t' +proj_quant_str+ '\t\ts1\ts2\t' +
tomo_str + 'cov\t\t\tcovg\t\tcovng\t\tcovssc')
write_header = False
if obs in ['gggg', 'mmmm', 'xipxip', 'xipxim', 'ximxim']:
tomo1 = gauss[oidx].shape[4]
if obs == 'gggg':
sampledim1 = sampledim
sampledim2 = sampledim
if obs in ['mmmm', 'xipxip', 'xipxim', 'ximxim']:
sampledim1 = 1
sampledim2 = 1
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo1):
for t4 in range(t3, tomo1):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
for i_r1 in range(r1):
for i_r2 in range(r2):
ri = proj_quant[i_r1]
rj = proj_quant[i_r2]
if obs == 'gggg':
if np.any(self.projected_clust):
ri = self.projected_clust[i_r1]
rj = self.projected_clust[i_r2]
if obs in ['mmmm', 'xipxip', 'xipxim', 'ximxim']:
if np.any(self.projected_lens):
ri = self.projected_lens[i_r1]
rj = self.projected_lens[i_r2]
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
if isinstance(gauss[i_probe], int):
gauss_aux = 0.0
else:
gauss_aux = gauss[i_probe][idxs]
if isinstance(nongauss[i_probe], int):
nongauss_aux = 0.0
else:
nongauss_aux = nongauss[i_probe][idxs]
if isinstance(ssc[i_probe], int):
ssc_aux = 0.0
else:
ssc_aux = ssc[i_probe][idxs]
cov = gauss_aux + nongauss_aux + ssc_aux
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss_aux,
nongauss_aux,
ssc_aux)
olist.append(ostr)
elif obs == 'gmgm':
sampledim1 = sampledim
sampledim2 = sampledim
tomo1 = gauss[oidx].shape[4]
tomo2 = gauss[oidx].shape[5]
for t1 in range(tomo1):
for t2 in range(tomo2):
for t3 in range(tomo1):
for t4 in range(tomo2):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
for i_r1 in range(r1):
for i_r2 in range(r2):
ri = proj_quant[i_r1]
rj = proj_quant[i_r2]
if np.any(self.projected_clust):
ri = self.projected_clust[i_r1]
rj = self.projected_clust[i_r2]
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
if isinstance(gauss[i_probe], int):
gauss_aux = 0.0
else:
gauss_aux = gauss[i_probe][idxs]
if isinstance(nongauss[i_probe], int):
nongauss_aux = 0.0
else:
nongauss_aux = nongauss[i_probe][idxs]
if isinstance(ssc[i_probe], int):
ssc_aux = 0.0
else:
ssc_aux = ssc[i_probe][idxs]
cov = gauss_aux + nongauss_aux + ssc_aux
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss_aux,
nongauss_aux,
ssc_aux)
olist.append(ostr)
elif obs in ['gggm', 'mmgm', 'gmxip', 'gmxim']:
tomo1 = gauss[oidx].shape[4]
tomo3 = gauss[oidx].shape[6]
tomo4 = gauss[oidx].shape[7]
if obs == 'gggm':
sampledim1 = sampledim
sampledim2 = sampledim
if obs in ['mmgm', 'gmxip', 'gmxim']:
sampledim1 = 1
sampledim2 = sampledim
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo3):
for t4 in range(tomo4):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
for i_r1 in range(r1):
for i_r2 in range(r2):
ri = proj_quant[i_r1]
rj = proj_quant[i_r2]
if obs == 'gggm':
if np.any(self.projected_clust):
ri = self.projected_clust[i_r1]
rj = self.projected_clust[i_r2]
if obs in ['mmgm', 'gmxip', 'gmxim']:
if np.any(self.projected_lens):
ri = self.projected_lens[i_r1]
if np.any(self.projected_clust):
rj = self.projected_clust[i_r2]
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
if isinstance(gauss[i_probe], int):
gauss_aux = 0.0
else:
gauss_aux = gauss[i_probe][idxs]
if isinstance(nongauss[i_probe], int):
nongauss_aux = 0.0
else:
nongauss_aux = nongauss[i_probe][idxs]
if isinstance(ssc[i_probe], int):
ssc_aux = 0.0
else:
ssc_aux = ssc[i_probe][idxs]
cov = gauss_aux + nongauss_aux + ssc_aux
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss_aux,
nongauss_aux,
ssc_aux)
olist.append(ostr)
elif obs in ['ggmm', 'ggxip', 'ggxim']:
tomo1 = gauss[oidx].shape[4]
tomo2 = gauss[oidx].shape[6]
sampledim1 = sampledim
sampledim2 = 1
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo2):
for t4 in range(t3, tomo2):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
for i_r1 in range(r1):
for i_r2 in range(r2):
ri = proj_quant[i_r1]
rj = proj_quant[i_r2]
if np.any(self.projected_lens):
rj = self.projected_lens[i_r2]
if np.any(self.projected_clust):
ri = self.projected_clust[i_r1]
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
if isinstance(gauss[i_probe], int):
gauss_aux = 0.0
else:
gauss_aux = gauss[i_probe][idxs]
if isinstance(nongauss[i_probe], int):
nongauss_aux = 0.0
else:
nongauss_aux = nongauss[i_probe][idxs]
if isinstance(ssc[i_probe], int):
ssc_aux = 0.0
else:
ssc_aux = ssc[i_probe][idxs]
cov = gauss_aux + nongauss_aux + ssc_aux
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss_aux,
nongauss_aux,
ssc_aux)
olist.append(ostr)
else:
if not isinstance(gauss[3*oidx], np.ndarray):
continue
r1 = gauss[3*oidx].shape[0]
r2 = gauss[3*oidx].shape[1]
if write_header:
olist.append('#obs\t' +proj_quant_str+ '\t\ts1\ts2\t' +
tomo_str + 'cov\t\t\tcovg sva\tcovg mix' +
'\tcovg sn\t\tcovng\t\tcovssc')
write_header = False
if obs in ['gggg', 'mmmm', 'xipxip', 'xipxim', 'ximxim']:
tomo1 = gauss[splitidx].shape[4]
if obs == 'gggg':
sampledim1 = sampledim
sampledim2 = sampledim
if obs in ['mmmm', 'xipxip', 'xipxim', 'ximxim']:
sampledim1 = 1
sampledim2 = 1
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo1):
for t4 in range(t3, tomo1):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
for i_r1 in range(r1):
for i_r2 in range(r2):
ri = proj_quant[i_r1]
rj = proj_quant[i_r2]
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
if obs == 'gggg':
if np.any(self.projected_clust):
ri = self.projected_clust[i_r1]
rj = self.projected_clust[i_r2]
if obs in ['mmmm', 'xipxip', 'xipxim', 'ximxim']:
if np.any(self.projected_lens):
rj = self.projected_lens[i_r2]
ri = self.projected_lens[i_r1]
gauss_sva = gauss[3*i_probe]
if isinstance(gauss[3*i_probe], int):
gauss_sva = 0.0
else:
gauss_sva = gauss[3*i_probe][idxs]
if isinstance(gauss[3*i_probe+1], int):
gauss_mix = 0.0
else:
gauss_mix = gauss[3*i_probe+1][idxs]
if isinstance(gauss[3*i_probe+2], int):
gauss_sn = 0.0
else:
gauss_sn = gauss[3*i_probe+2][idxs]
if isinstance(nongauss[i_probe], int):
nongauss_aux = 0.0
else:
nongauss_aux = nongauss[i_probe][idxs]
if isinstance(ssc[i_probe], int):
ssc_aux = 0.0
else:
ssc_aux = ssc[i_probe][idxs]
cov = gauss_sva \
+ gauss_mix \
+ gauss_sn \
+ nongauss_aux \
+ ssc_aux
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss_sva,
gauss_mix,
gauss_sn,
nongauss_aux,
ssc_aux)
olist.append(ostr)
elif obs == 'gmgm':
sampledim1 = sampledim
sampledim2 = sampledim
tomo1 = gauss[splitidx].shape[4]
tomo2 = gauss[splitidx].shape[5]
for t1 in range(tomo1):
for t2 in range(tomo2):
for t3 in range(tomo1):
for t4 in range(tomo2):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
for i_r1 in range(r1):
for i_r2 in range(r2):
ri = proj_quant[i_r1]
rj = proj_quant[i_r2]
if np.any(self.projected_clust):
ri = self.projected_clust[i_r1]
rj = self.projected_clust[i_r2]
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
gauss_sva = gauss[3*i_probe]
if isinstance(gauss[3*i_probe], int):
gauss_sva = 0.0
else:
gauss_sva = gauss[3*i_probe][idxs]
if isinstance(gauss[3*i_probe+1], int):
gauss_mix = 0.0
else:
gauss_mix = gauss[3*i_probe+1][idxs]
if isinstance(gauss[3*i_probe+2], int):
gauss_sn = 0.0
else:
gauss_sn = gauss[3*i_probe+2][idxs]
if isinstance(nongauss[i_probe], int):
nongauss_aux = 0.0
else:
nongauss_aux = nongauss[i_probe][idxs]
if isinstance(ssc[i_probe], int):
ssc_aux = 0.0
else:
ssc_aux = ssc[i_probe][idxs]
cov = gauss_sva \
+ gauss_mix \
+ gauss_sn \
+ nongauss_aux \
+ ssc_aux
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss_sva,
gauss_mix,
gauss_sn,
nongauss_aux,
ssc_aux)
olist.append(ostr)
elif obs in ['gggm', 'mmgm', 'gmxip', 'gmxim']:
tomo1 = gauss[splitidx].shape[4]
tomo3 = gauss[splitidx].shape[6]
tomo4 = gauss[splitidx].shape[7]
if obs == 'gggm':
sampledim1 = sampledim
sampledim2 = sampledim
if obs in ['mmgm', 'gmxip', 'gmxim']:
sampledim1 = 1
sampledim2 = sampledim
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo3):
for t4 in range(tomo4):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
for i_r1 in range(r1):
for i_r2 in range(r2):
ri = proj_quant[i_r1]
rj = proj_quant[i_r2]
if obs == 'gggm':
if np.any(self.projected_clust):
ri = self.projected_clust[i_r1]
rj = self.projected_clust[i_r2]
if obs in ['mmgm', 'gmxip', 'gmxim']:
if np.any(self.projected_lens):
ri = self.projected_lens[i_r1]
if np.any(self.projected_clust):
rj = self.projected_clust[i_r2]
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
gauss_sva = gauss[3*i_probe]
if isinstance(gauss[3*i_probe], int):
gauss_sva = 0.0
else:
gauss_sva = gauss[3*i_probe][idxs]
if isinstance(gauss[3*i_probe+1], int):
gauss_mix = 0.0
else:
gauss_mix = gauss[3*i_probe+1][idxs]
if isinstance(gauss[3*i_probe+2], int):
gauss_sn = 0.0
else:
gauss_sn = gauss[3*i_probe+2][idxs]
if isinstance(nongauss[i_probe], int):
nongauss_aux = 0.0
else:
nongauss_aux = nongauss[i_probe][idxs]
if isinstance(ssc[i_probe], int):
ssc_aux = 0.0
else:
ssc_aux = ssc[i_probe][idxs]
cov = gauss_sva \
+ gauss_mix \
+ gauss_sn \
+ nongauss_aux \
+ ssc_aux
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss_sva,
gauss_mix,
gauss_sn,
nongauss_aux,
ssc_aux)
olist.append(ostr)
elif obs in ['ggmm', 'ggxip', 'ggxim']:
tomo1 = gauss[splitidx].shape[4]
tomo2 = gauss[splitidx].shape[6]
sampledim1 = sampledim
sampledim2 = 1
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo2):
for t4 in range(t3, tomo2):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
for i_r1 in range(r1):
for i_r2 in range(r2):
ri = proj_quant[i_r1]
rj = proj_quant[i_r2]
if np.any(self.projected_lens):
rj = self.projected_lens[i_r2]
if np.any(self.projected_clust):
ri = self.projected_clust[i_r1]
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
gauss_sva = gauss[3*i_probe]
if isinstance(gauss[3*i_probe], int):
gauss_sva = 0.0
else:
gauss_sva = gauss[3*i_probe][idxs]
if isinstance(gauss[3*i_probe+1], int):
gauss_mix = 0.0
else:
gauss_mix = gauss[3*i_probe+1][idxs]
if isinstance(gauss[3*i_probe+2], int):
gauss_sn = 0.0
else:
gauss_sn = gauss[3*i_probe+2][idxs]
if isinstance(nongauss[i_probe], int):
nongauss_aux = 0.0
else:
nongauss_aux = nongauss[i_probe][idxs]
if isinstance(ssc[i_probe], int):
ssc_aux = 0.0
else:
ssc_aux = ssc[i_probe][idxs]
cov = gauss_sva \
+ gauss_mix \
+ gauss_sn \
+ nongauss_aux \
+ ssc_aux
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss_sva,
gauss_mix,
gauss_sn,
nongauss_aux,
ssc_aux)
olist.append(ostr)
splitidx += 3
if not self.save_as_binary:
if 'terminal' in self.style:
print("Writing result to terminal. (Brace yourself...).'")
for ostr in olist:
print(ostr)
elif 'list' in self.style:
fn = self.filename[self.style.index('list')]
with open(fn, 'w') as file:
print("Writing '" + fn + "'.")
for ostr in olist:
file.write("%s\n" % ostr)
return True
def __write_cov_list_arbitrary(self,
cov_dict,
obs_dict,
n_tomo_clust,
n_tomo_lens,
sampledim,
read_in_tables,
gauss,
nongauss,
ssc,
fct_args):
obslist, obsbool = fct_args
proj_quant_str = 'n\tm\t'
if not cov_dict['split_gauss']:
if n_tomo_clust is None and n_tomo_lens is None:
tomo_str = ''
ostr_format = '%s\t%i\ti\t%i\t%i\t%.4e\t%.4e\t%.4e\t%.4e'
else:
tomo_str = 'tomoi\ttomoj\ttomok\ttomol\t'
ostr_format = '%s\t%i\t%i\t%i\t%i\t%i\t\t%i\t\t%i\t\t%i\t\t%.4e\t%.4e\t%.4e\t%.4e'
else:
if n_tomo_clust is None and n_tomo_lens is None:
tomo_str = ''
ostr_format = '%s\t%i\t%i\t%i\t%i\t%.4e\t%.4e\t%.4e\t%.4e\t%.4e\t%.4e'
else:
tomo_str = 'tomoi\ttomoj\ttomok\ttomol\t'
ostr_format = '%s\t%i\t%i\t%i\t%i\t%i\t\t%i\t\t%i\t\t%i\t\t%.4e\t%.4e\t%.4e\t%.4e\t%.4e\t%.4e'
olist = []
splitidx = 0
write_header = True
olist = []
splitidx = 0
write_header = True
summary = read_in_tables['arb_summary']
gg = True
if summary['number_summary_gg'] is None:
gg = False
gm = True
if summary['number_summary_gm'] is None:
gm = False
mm = True
if summary['number_summary_mm'] is None:
mm = False
for oidx, obs in enumerate(obslist):
if not obsbool[oidx]:
splitidx += 3
continue
sampledim1 = sampledim
sampledim2 = sampledim
if not cov_dict['split_gauss']:
if write_header:
olist.append('#obs\t' +proj_quant_str+ '\t\ts1\ts2\t' +
tomo_str + 'cov\t\t\tcovg\t\tcovng\t\tcovssc')
write_header = False
if not isinstance(gauss[oidx], np.ndarray):
continue
else:
len_proj_quant1 = len(gauss[oidx][:,0,0,0,0,0,0,0])
len_proj_quant2 = len(gauss[oidx][0,:,0,0,0,0,0,0])
if not isinstance(nongauss[oidx], np.ndarray):
nongauss[oidx] = np.zeros_like(gauss[oidx])
else:
len_proj_quant1 = len(nongauss[oidx][:,0,0,0,0,0,0,0])
len_proj_quant2 = len(nongauss[oidx][0,:,0,0,0,0,0,0])
if not isinstance(ssc[oidx], np.ndarray):
ssc[oidx] = np.zeros_like(gauss[oidx])
else:
len_proj_quant1 = len(ssc[oidx][:,0,0,0,0,0,0,0])
len_proj_quant2 = len(ssc[oidx][0,:,0,0,0,0,0,0])
for i_r1 in range(len_proj_quant1):
for i_r2 in range(len_proj_quant2):
ri = int(np.copy(i_r1))
rj = int(np.copy(i_r2))
#label ri
if gg and obs in ['gggg', 'gggm', 'ggxip', 'ggxim']:
obs_copy = str(summary['gg_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_gg']:
obs_copy = str(summary['gg_summary_name'][1])
ri -= summary['arb_number_first_summary_gg']
if gm and obs in ['gmgm']:
obs_copy = str(summary['gm_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_gm']:
obs_copy = str(summary['gm_summary_name'][1])
ri -= summary['arb_number_first_summary_gm']
if mm and obs in [ 'xipxip', 'xipxim', 'gmxip']:
obs_copy = str(summary['mmE_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy = str(summary['mmE_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
if mm and obs in ['gmxim', 'ximxim']:
obs_copy = str(summary['mmB_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy = str(summary['mmB_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
#label rj
if gg and obs in ['gggg']:
if i_r2 >= summary['arb_number_first_summary_gg']:
obs_copy += str(summary['gg_summary_name'][1])
rj -= summary['arb_number_first_summary_gg']
else:
obs_copy += str(summary['gg_summary_name'][0])
if gm and obs in ['gmgm', 'gggm', 'gmxip', 'gmxim']:
if i_r2 >= summary['arb_number_first_summary_gm']:
obs_copy += str(summary['gm_summary_name'][1])
rj -= summary['arb_number_first_summary_gm']
else:
obs_copy += str(summary['gm_summary_name'][0])
if mm and obs in ['xipxip']:
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy += str(summary['mmE_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
else:
obs_copy = str(summary['mmE_summary_name'][0])
if mm and obs in ['xipxim', 'ximxim']:
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy += str(summary['mmB_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
else:
obs_copy += str(summary['mmB_summary_name'][0])
if obs in ['gggg', 'mmmm', 'xipxip', 'xipxim', 'ximxim']:
tomo1 = gauss[oidx].shape[4]
if obs == 'gggg':
sampledim1 = sampledim
sampledim2 = sampledim
if obs in ['mmmm', 'xipxip', 'xipxim', 'ximxim']:
sampledim1 = 1
sampledim2 = 1
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo1):
for t4 in range(t3, tomo1):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[oidx][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, int(ri + 1), int(rj + 1),
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[oidx][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
elif obs == 'gmgm':
sampledim1 = sampledim
sampledim2 = sampledim
tomo1 = gauss[oidx].shape[4]
tomo2 = gauss[oidx].shape[5]
for t1 in range(tomo1):
for t2 in range(tomo2):
for t3 in range(tomo1):
for t4 in range(tomo2):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[oidx][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, int(ri + 1), int(rj + 1),
i_s1+1, i_s2+1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[oidx][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
elif obs in ['gggm', 'mmgm', 'gmxip', 'gmxim']:
tomo1 = gauss[oidx].shape[4]
tomo3 = gauss[oidx].shape[6]
tomo4 = gauss[oidx].shape[7]
if obs == 'gggm':
sampledim1 = sampledim
sampledim2 = sampledim
if obs in ['mmgm', 'gmxip', 'gmxim']:
sampledim1 = 1
sampledim2 = sampledim
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo3):
for t4 in range(tomo4):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[oidx][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[oidx][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
elif obs in ['ggmm', 'ggxip', 'ggxim']:
tomo1 = gauss[oidx].shape[4]
tomo2 = gauss[oidx].shape[6]
sampledim1 = sampledim
sampledim2 = 1
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo2):
for t4 in range(t3, tomo2):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[oidx][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, int(ri + 1), int(rj + 1),
i_s1+1, i_s2+1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[oidx][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
else:
if write_header:
olist.append('#obs\t' +proj_quant_str+ '\t\ts1\ts2\t' +
tomo_str + 'cov\t\t\tcovg sva\tcovg mix' +
'\tcovg sn\t\tcovng\t\tcovssc')
write_header = False
if not isinstance(gauss[3*oidx], np.ndarray):
continue
else:
len_proj_quant1 = len(gauss[3*oidx][:,0,0,0,0,0,0,0])
len_proj_quant2 = len(gauss[3*oidx][0,:,0,0,0,0,0,0])
if not isinstance(gauss[3*oidx + 1], np.ndarray):
gauss[3*oidx + 1] = np.zeros_like(gauss[3*oidx])
if not isinstance(gauss[3*oidx + 2], np.ndarray):
gauss[3*oidx + 2] = np.zeros_like(gauss[3*oidx])
if not isinstance(nongauss[oidx], np.ndarray):
nongauss[oidx] = np.zeros_like(gauss[3*oidx])
else:
len_proj_quant1 = len(nongauss[oidx][:,0,0,0,0,0,0,0])
len_proj_quant2 = len(nongauss[oidx][0,:,0,0,0,0,0,0])
if not isinstance(ssc[oidx], np.ndarray):
ssc[oidx] = np.zeros_like(gauss[3*oidx])
else:
len_proj_quant1 = len(ssc[oidx][:,0,0,0,0,0,0,0])
len_proj_quant2 = len(ssc[oidx][0,:,0,0,0,0,0,0])
for i_r1 in range(len_proj_quant1):
for i_r2 in range(len_proj_quant2):
ri = int(np.copy(i_r1))
rj = int(np.copy(i_r2))
#label ri
if gg and obs in ['gggg', 'gggm', 'ggxip', 'ggxim']:
obs_copy = str(summary['gg_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_gg']:
obs_copy = str(summary['gg_summary_name'][1])
ri -= summary['arb_number_first_summary_gg']
if gm and obs in ['gmgm']:
obs_copy = str(summary['gm_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_gm']:
obs_copy = str(summary['gm_summary_name'][1])
ri -= summary['arb_number_first_summary_gm']
if mm and obs in [ 'xipxip', 'xipxim', 'gmxip']:
obs_copy = str(summary['mmE_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy = str(summary['mmE_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
if mm and obs in ['gmxim', 'ximxim']:
obs_copy = str(summary['mmB_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy = str(summary['mmB_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
#label rj
if gg and obs in ['gggg']:
if i_r2 >= summary['arb_number_first_summary_gg']:
obs_copy += str(summary['gg_summary_name'][1])
rj -= summary['arb_number_first_summary_gg']
else:
obs_copy += str(summary['gg_summary_name'][0])
if gm and obs in ['gmgm', 'gggm', 'gmxip', 'gmxim']:
if i_r2 >= summary['arb_number_first_summary_gm']:
obs_copy += str(summary['gm_summary_name'][1])
rj -= summary['arb_number_first_summary_gm']
else:
obs_copy += str(summary['gm_summary_name'][0])
if mm and obs in ['xipxip']:
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy += str(summary['mmE_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
else:
obs_copy += str(summary['mmE_summary_name'][0])
if mm and obs in ['xipxim', 'ximxim']:
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy += str(summary['mmB_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
else:
obs_copy += str(summary['mmB_summary_name'][0])
if obs in ['gggg', 'mmmm', 'xipxip', 'xipxim', 'ximxim']:
tomo1 = gauss[splitidx].shape[4]
if obs == 'gggg':
sampledim1 = sampledim
sampledim2 = sampledim
if obs in ['mmmm', 'xipxip', 'xipxim', 'ximxim']:
sampledim1 = 1
sampledim2 = 1
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo1):
for t4 in range(t3, tomo1):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[splitidx][idxs] \
+ gauss[splitidx+1][idxs] \
+ gauss[splitidx+2][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1+1, i_s2+1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[splitidx][idxs],
gauss[splitidx+1][idxs],
gauss[splitidx+2][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
elif obs == 'gmgm':
sampledim1 = sampledim
sampledim2 = sampledim
tomo1 = gauss[splitidx].shape[4]
tomo2 = gauss[splitidx].shape[5]
for t1 in range(tomo1):
for t2 in range(tomo2):
for t3 in range(tomo1):
for t4 in range(tomo2):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[splitidx][idxs] \
+ gauss[splitidx+1][idxs] \
+ gauss[splitidx+2][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1+1, i_s2+1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[splitidx][idxs],
gauss[splitidx+1][idxs],
gauss[splitidx+2][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
elif obs in ['gggm', 'mmgm', 'gmxip', 'gmxim']:
tomo1 = gauss[splitidx].shape[4]
tomo3 = gauss[splitidx].shape[6]
tomo4 = gauss[splitidx].shape[7]
if obs == 'gggm':
sampledim1 = sampledim
sampledim2 = sampledim
if obs in ['mmgm', 'gmxip', 'gmxim']:
sampledim1 = 1
sampledim2 = sampledim
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo3):
for t4 in range(tomo4):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[splitidx][idxs] \
+ gauss[splitidx+1][idxs] \
+ gauss[splitidx+2][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1+1, i_s2+1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[splitidx][idxs],
gauss[splitidx+1][idxs],
gauss[splitidx+2][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
elif obs in ['ggmm', 'ggxip', 'ggxim']:
tomo1 = gauss[splitidx].shape[4]
tomo2 = gauss[splitidx].shape[6]
sampledim1 = sampledim
sampledim2 = 1
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo2):
for t4 in range(t3, tomo2):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t1, t3, t4)
cov = gauss[splitidx][idxs] \
+ gauss[splitidx+1][idxs] \
+ gauss[splitidx+2][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1+1, i_s2+1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[splitidx][idxs],
gauss[splitidx+1][idxs],
gauss[splitidx+2][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
splitidx += 3
if not self.save_as_binary:
if 'terminal' in self.style:
print("Writing result to terminal. (Brace yourself...).'")
for ostr in olist:
print(ostr)
elif 'list' in self.style:
fn = self.filename[self.style.index('list')]
with open(fn, 'w') as file:
print("Writing '" + fn + "'.")
for ostr in olist:
file.write("%s\n" % ostr)
return True
def __write_cov_list_arbitrary_cosmosis_style(self,
cov_dict,
obs_dict,
n_tomo_clust,
n_tomo_lens,
sampledim,
read_in_tables,
gauss,
nongauss,
ssc,
fct_args):
obslist, obsbool = fct_args
proj_quant_str = 'n\tm\t'
if not cov_dict['split_gauss']:
if n_tomo_clust is None and n_tomo_lens is None:
tomo_str = ''
ostr_format = '%s\t%i\ti\t%i\t%i\t%.4e\t%.4e\t%.4e\t%.4e'
else:
tomo_str = 'tomoi\ttomoj\ttomok\ttomol\t'
ostr_format = '%s\t%i\t%i\t%i\t%i\t%i\t\t%i\t\t%i\t\t%i\t\t%.4e\t%.4e\t%.4e\t%.4e'
else:
if n_tomo_clust is None and n_tomo_lens is None:
tomo_str = ''
ostr_format = '%s\t%i\t%i\t%i\t%i\t%.4e\t%.4e\t%.4e\t%.4e\t%.4e\t%.4e'
else:
tomo_str = 'tomoi\ttomoj\ttomok\ttomol\t'
ostr_format = '%s\t%i\t%i\t%i\t%i\t%i\t\t%i\t\t%i\t\t%i\t\t%.4e\t%.4e\t%.4e\t%.4e\t%.4e\t%.4e'
olist = []
splitidx = 0
write_header = True
olist = []
splitidx = 0
write_header = True
summary = read_in_tables['arb_summary']
gg = True
if summary['number_summary_gg'] is None:
gg = False
gm = True
if summary['number_summary_gm'] is None:
gm = False
mm = True
if summary['number_summary_mm'] is None:
mm = False
for oidx, obs in enumerate(obslist):
if not obsbool[oidx]:
splitidx += 3
continue
sampledim1 = sampledim
sampledim2 = sampledim
if not cov_dict['split_gauss']:
if write_header:
olist.append('#obs\t' +proj_quant_str+ '\t\ts1\ts2\t' +
tomo_str + 'cov\t\t\tcovg\t\tcovng\t\tcovssc')
write_header = False
if not isinstance(gauss[oidx], np.ndarray):
continue
else:
len_proj_quant1 = len(gauss[oidx][:,0,0,0,0,0,0,0])
len_proj_quant2 = len(gauss[oidx][0,:,0,0,0,0,0,0])
if not isinstance(nongauss[oidx], np.ndarray):
nongauss[oidx] = np.zeros_like(gauss[oidx])
else:
len_proj_quant1 = len(nongauss[oidx][:,0,0,0,0,0,0,0])
len_proj_quant2 = len(nongauss[oidx][0,:,0,0,0,0,0,0])
if not isinstance(ssc[oidx], np.ndarray):
ssc[oidx] = np.zeros_like(gauss[oidx])
else:
len_proj_quant1 = len(ssc[oidx][:,0,0,0,0,0,0,0])
len_proj_quant2 = len(ssc[oidx][0,:,0,0,0,0,0,0])
if obs in ['gggg', 'mmmm', 'xipxip', 'xipxim', 'ximxim']:
tomo1 = gauss[oidx].shape[4]
if obs == 'gggg':
sampledim1 = sampledim
sampledim2 = sampledim
if obs in ['mmmm', 'xipxip', 'xipxim', 'ximxim']:
sampledim1 = 1
sampledim2 = 1
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo1):
for t4 in range(t3, tomo1):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
for i_r1 in range(len_proj_quant1):
for i_r2 in range(len_proj_quant2):
ri = int(np.copy(i_r1))
rj = int(np.copy(i_r2))
#label ri
if gg and obs in ['gggg', 'gggm', 'ggxip', 'ggxim']:
obs_copy = str(summary['gg_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_gg']:
obs_copy = str(summary['gg_summary_name'][1])
ri -= summary['arb_number_first_summary_gg']
if gm and obs in ['gmgm']:
obs_copy = str(summary['gm_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_gm']:
obs_copy = str(summary['gm_summary_name'][1])
ri -= summary['arb_number_first_summary_gm']
if mm and obs in [ 'xipxip', 'xipxim', 'gmxip']:
obs_copy = str(summary['mmE_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy = str(summary['mmE_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
if mm and obs in ['gmxim', 'ximxim']:
obs_copy = str(summary['mmB_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy = str(summary['mmB_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
#label rj
if gg and obs in ['gggg']:
if i_r2 >= summary['arb_number_first_summary_gg']:
obs_copy += str(summary['gg_summary_name'][1])
rj -= summary['arb_number_first_summary_gg']
else:
obs_copy += str(summary['gg_summary_name'][0])
if gm and obs in ['gmgm', 'gggm', 'gmxip', 'gmxim']:
if i_r2 >= summary['arb_number_first_summary_gm']:
obs_copy += str(summary['gm_summary_name'][1])
rj -= summary['arb_number_first_summary_gm']
else:
obs_copy += str(summary['gm_summary_name'][0])
if mm and obs in ['xipxip']:
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy += str(summary['mmE_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
else:
obs_copy += str(summary['mmE_summary_name'][0])
if mm and obs in ['xipxim', 'ximxim']:
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy += str(summary['mmB_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
else:
obs_copy += str(summary['mmB_summary_name'][0])
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[oidx][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, int(ri + 1), int(rj + 1),
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[oidx][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
elif obs == 'gmgm':
sampledim1 = sampledim
sampledim2 = sampledim
tomo1 = gauss[oidx].shape[4]
tomo2 = gauss[oidx].shape[5]
for t1 in range(tomo1):
for t2 in range(tomo2):
for t3 in range(tomo1):
for t4 in range(tomo2):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
for i_r1 in range(len_proj_quant1):
for i_r2 in range(len_proj_quant2):
ri = int(np.copy(i_r1))
rj = int(np.copy(i_r2))
#label ri
if gg and obs in ['gggg', 'gggm', 'ggxip', 'ggxim']:
obs_copy = str(summary['gg_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_gg']:
obs_copy = str(summary['gg_summary_name'][1])
ri -= summary['arb_number_first_summary_gg']
if gm and obs in ['gmgm']:
obs_copy = str(summary['gm_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_gm']:
obs_copy = str(summary['gm_summary_name'][1])
ri -= summary['arb_number_first_summary_gm']
if mm and obs in [ 'xipxip', 'xipxim', 'gmxip']:
obs_copy = str(summary['mmE_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy = str(summary['mmE_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
if mm and obs in ['gmxim', 'ximxim']:
obs_copy = str(summary['mmB_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy = str(summary['mmB_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
#label rj
if gg and obs in ['gggg']:
if i_r2 >= summary['arb_number_first_summary_gg']:
obs_copy += str(summary['gg_summary_name'][1])
rj -= summary['arb_number_first_summary_gg']
else:
obs_copy += str(summary['gg_summary_name'][0])
if gm and obs in ['gmgm', 'gggm', 'gmxip', 'gmxim']:
if i_r2 >= summary['arb_number_first_summary_gm']:
obs_copy += str(summary['gm_summary_name'][1])
rj -= summary['arb_number_first_summary_gm']
else:
obs_copy += str(summary['gm_summary_name'][0])
if mm and obs in ['xipxip']:
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy += str(summary['mmE_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
else:
obs_copy += str(summary['mmE_summary_name'][0])
if mm and obs in ['xipxim', 'ximxim']:
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy += str(summary['mmB_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
else:
obs_copy += str(summary['mmB_summary_name'][0])
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[oidx][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, int(ri + 1), int(rj + 1),
i_s1+1, i_s2+1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[oidx][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
elif obs in ['gggm', 'mmgm', 'gmxip', 'gmxim']:
tomo1 = gauss[oidx].shape[4]
tomo3 = gauss[oidx].shape[6]
tomo4 = gauss[oidx].shape[7]
if obs == 'gggm':
sampledim1 = sampledim
sampledim2 = sampledim
if obs in ['mmgm', 'gmxip', 'gmxim']:
sampledim1 = 1
sampledim2 = sampledim
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo3):
for t4 in range(tomo4):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
for i_r1 in range(len_proj_quant1):
for i_r2 in range(len_proj_quant2):
ri = int(np.copy(i_r1))
rj = int(np.copy(i_r2))
#label ri
if gg and obs in ['gggg', 'gggm', 'ggxip', 'ggxim']:
obs_copy = str(summary['gg_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_gg']:
obs_copy = str(summary['gg_summary_name'][1])
ri -= summary['arb_number_first_summary_gg']
if gm and obs in ['gmgm']:
obs_copy = str(summary['gm_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_gm']:
obs_copy = str(summary['gm_summary_name'][1])
ri -= summary['arb_number_first_summary_gm']
if mm and obs in [ 'xipxip', 'xipxim', 'gmxip']:
obs_copy = str(summary['mmE_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy = str(summary['mmE_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
if mm and obs in ['gmxim', 'ximxim']:
obs_copy = str(summary['mmB_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy = str(summary['mmB_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
#label rj
if gg and obs in ['gggg']:
if i_r2 >= summary['arb_number_first_summary_gg']:
obs_copy += str(summary['gg_summary_name'][1])
rj -= summary['arb_number_first_summary_gg']
else:
obs_copy += str(summary['gg_summary_name'][0])
if gm and obs in ['gmgm', 'gggm', 'gmxip', 'gmxim']:
if i_r2 >= summary['arb_number_first_summary_gm']:
obs_copy += str(summary['gm_summary_name'][1])
rj -= summary['arb_number_first_summary_gm']
else:
obs_copy += str(summary['gm_summary_name'][0])
if mm and obs in ['xipxip']:
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy += str(summary['mmE_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
else:
obs_copy += str(summary['mmE_summary_name'][0])
if mm and obs in ['xipxim', 'ximxim']:
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy += str(summary['mmB_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
else:
obs_copy += str(summary['mmB_summary_name'][0])
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[oidx][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1 + 1, i_s2 + 1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[oidx][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
elif obs in ['ggmm', 'ggxip', 'ggxim']:
tomo1 = gauss[oidx].shape[4]
tomo2 = gauss[oidx].shape[6]
sampledim1 = sampledim
sampledim2 = 1
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo2):
for t4 in range(t3, tomo2):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
for i_r1 in range(len_proj_quant1):
for i_r2 in range(len_proj_quant2):
ri = int(np.copy(i_r1))
rj = int(np.copy(i_r2))
#label ri
if gg and obs in ['gggg', 'gggm', 'ggxip', 'ggxim']:
obs_copy = str(summary['gg_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_gg']:
obs_copy = str(summary['gg_summary_name'][1])
ri -= summary['arb_number_first_summary_gg']
if gm and obs in ['gmgm']:
obs_copy = str(summary['gm_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_gm']:
obs_copy = str(summary['gm_summary_name'][1])
ri -= summary['arb_number_first_summary_gm']
if mm and obs in [ 'xipxip', 'xipxim', 'gmxip']:
obs_copy = str(summary['mmE_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy = str(summary['mmE_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
if mm and obs in ['gmxim', 'ximxim']:
obs_copy = str(summary['mmB_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy = str(summary['mmB_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
#label rj
if gg and obs in ['gggg']:
if i_r2 >= summary['arb_number_first_summary_gg']:
obs_copy += str(summary['gg_summary_name'][1])
rj -= summary['arb_number_first_summary_gg']
else:
obs_copy += str(summary['gg_summary_name'][0])
if gm and obs in ['gmgm', 'gggm', 'gmxip', 'gmxim']:
if i_r2 >= summary['arb_number_first_summary_gm']:
obs_copy += str(summary['gm_summary_name'][1])
rj -= summary['arb_number_first_summary_gm']
else:
obs_copy += str(summary['gm_summary_name'][0])
if mm and obs in ['xipxip']:
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy += str(summary['mmE_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
else:
obs_copy += str(summary['mmE_summary_name'][0])
if mm and obs in ['xipxim', 'ximxim']:
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy += str(summary['mmB_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
else:
obs_copy += str(summary['mmB_summary_name'][0])
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[oidx][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, int(ri + 1), int(rj + 1),
i_s1+1, i_s2+1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[oidx][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
else:
if write_header:
olist.append('#obs\t' +proj_quant_str+ '\t\ts1\ts2\t' +
tomo_str + 'cov\t\t\tcovg sva\tcovg mix' +
'\tcovg sn\t\tcovng\t\tcovssc')
write_header = False
if not isinstance(gauss[3*oidx], np.ndarray):
continue
else:
len_proj_quant1 = len(gauss[3*oidx][:,0,0,0,0,0,0,0])
len_proj_quant2 = len(gauss[3*oidx][0,:,0,0,0,0,0,0])
if not isinstance(gauss[3*oidx + 1], np.ndarray):
gauss[3*oidx + 1] = np.zeros_like(gauss[3*oidx])
if not isinstance(gauss[3*oidx + 2], np.ndarray):
gauss[3*oidx + 2] = np.zeros_like(gauss[3*oidx])
if not isinstance(nongauss[oidx], np.ndarray):
nongauss[oidx] = np.zeros_like(gauss[3*oidx])
else:
len_proj_quant1 = len(nongauss[oidx][:,0,0,0,0,0,0,0])
len_proj_quant2 = len(nongauss[oidx][0,:,0,0,0,0,0,0])
if not isinstance(ssc[oidx], np.ndarray):
ssc[oidx] = np.zeros_like(gauss[3*oidx])
else:
len_proj_quant1 = len(ssc[oidx][:,0,0,0,0,0,0,0])
len_proj_quant2 = len(ssc[oidx][0,:,0,0,0,0,0,0])
if obs in ['gggg', 'mmmm', 'xipxip', 'xipxim', 'ximxim']:
tomo1 = gauss[splitidx].shape[4]
if obs == 'gggg':
sampledim1 = sampledim
sampledim2 = sampledim
if obs in ['mmmm', 'xipxip', 'xipxim', 'ximxim']:
sampledim1 = 1
sampledim2 = 1
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo1):
for t4 in range(t3, tomo1):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
for i_r1 in range(len_proj_quant1):
for i_r2 in range(len_proj_quant2):
ri = int(np.copy(i_r1))
rj = int(np.copy(i_r2))
#label ri
if gg and obs in ['gggg', 'gggm', 'ggxip', 'ggxim']:
obs_copy = str(summary['gg_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_gg']:
obs_copy = str(summary['gg_summary_name'][1])
ri -= summary['arb_number_first_summary_gg']
if gm and obs in ['gmgm']:
obs_copy = str(summary['gm_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_gm']:
obs_copy = str(summary['gm_summary_name'][1])
ri -= summary['arb_number_first_summary_gm']
if mm and obs in [ 'xipxip', 'xipxim', 'gmxip']:
obs_copy = str(summary['mmE_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy = str(summary['mmE_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
if mm and obs in ['gmxim', 'ximxim']:
obs_copy = str(summary['mmB_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy = str(summary['mmB_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
#label rj
if gg and obs in ['gggg']:
if i_r2 >= summary['arb_number_first_summary_gg']:
obs_copy += str(summary['gg_summary_name'][1])
rj -= summary['arb_number_first_summary_gg']
else:
obs_copy += str(summary['gg_summary_name'][0])
if gm and obs in ['gmgm', 'gggm', 'gmxip', 'gmxim']:
if i_r2 >= summary['arb_number_first_summary_gm']:
obs_copy += str(summary['gm_summary_name'][1])
rj -= summary['arb_number_first_summary_gm']
else:
obs_copy += str(summary['gm_summary_name'][0])
if mm and obs in ['xipxip']:
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy += str(summary['mmE_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
else:
obs_copy += str(summary['mmE_summary_name'][0])
if mm and obs in ['xipxim', 'ximxim']:
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy += str(summary['mmB_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
else:
obs_copy += str(summary['mmB_summary_name'][0])
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[splitidx][idxs] \
+ gauss[splitidx+1][idxs] \
+ gauss[splitidx+2][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1+1, i_s2+1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[splitidx][idxs],
gauss[splitidx+1][idxs],
gauss[splitidx+2][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
elif obs == 'gmgm':
sampledim1 = sampledim
sampledim2 = sampledim
tomo1 = gauss[splitidx].shape[4]
tomo2 = gauss[splitidx].shape[5]
for t1 in range(tomo1):
for t2 in range(tomo2):
for t3 in range(tomo1):
for t4 in range(tomo2):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
for i_r1 in range(len_proj_quant1):
for i_r2 in range(len_proj_quant2):
ri = int(np.copy(i_r1))
rj = int(np.copy(i_r2))
#label ri
if gg and obs in ['gggg', 'gggm', 'ggxip', 'ggxim']:
obs_copy = str(summary['gg_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_gg']:
obs_copy = str(summary['gg_summary_name'][1])
ri -= summary['arb_number_first_summary_gg']
if gm and obs in ['gmgm']:
obs_copy = str(summary['gm_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_gm']:
obs_copy = str(summary['gm_summary_name'][1])
ri -= summary['arb_number_first_summary_gm']
if mm and obs in [ 'xipxip', 'xipxim', 'gmxip']:
obs_copy = str(summary['mmE_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy = str(summary['mmE_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
if mm and obs in ['gmxim', 'ximxim']:
obs_copy = str(summary['mmB_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy = str(summary['mmB_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
#label rj
if gg and obs in ['gggg']:
if i_r2 >= summary['arb_number_first_summary_gg']:
obs_copy += str(summary['gg_summary_name'][1])
rj -= summary['arb_number_first_summary_gg']
else:
obs_copy += str(summary['gg_summary_name'][0])
if gm and obs in ['gmgm', 'gggm', 'gmxip', 'gmxim']:
if i_r2 >= summary['arb_number_first_summary_gm']:
obs_copy += str(summary['gm_summary_name'][1])
rj -= summary['arb_number_first_summary_gm']
else:
obs_copy += str(summary['gm_summary_name'][0])
if mm and obs in ['xipxip']:
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy += str(summary['mmE_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
else:
obs_copy += str(summary['mmE_summary_name'][0])
if mm and obs in ['xipxim', 'ximxim']:
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy += str(summary['mmB_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
else:
obs_copy += str(summary['mmB_summary_name'][0])
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[splitidx][idxs] \
+ gauss[splitidx+1][idxs] \
+ gauss[splitidx+2][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1+1, i_s2+1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[splitidx][idxs],
gauss[splitidx+1][idxs],
gauss[splitidx+2][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
elif obs in ['gggm', 'mmgm', 'gmxip', 'gmxim']:
tomo1 = gauss[splitidx].shape[4]
tomo3 = gauss[splitidx].shape[6]
tomo4 = gauss[splitidx].shape[7]
if obs == 'gggm':
sampledim1 = sampledim
sampledim2 = sampledim
if obs in ['mmgm', 'gmxip', 'gmxim']:
sampledim1 = 1
sampledim2 = sampledim
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo3):
for t4 in range(tomo4):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
for i_r1 in range(len_proj_quant1):
for i_r2 in range(len_proj_quant2):
ri = int(np.copy(i_r1))
rj = int(np.copy(i_r2))
#label ri
if gg and obs in ['gggg', 'gggm', 'ggxip', 'ggxim']:
obs_copy = str(summary['gg_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_gg']:
obs_copy = str(summary['gg_summary_name'][1])
ri -= summary['arb_number_first_summary_gg']
if gm and obs in ['gmgm']:
obs_copy = str(summary['gm_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_gm']:
obs_copy = str(summary['gm_summary_name'][1])
ri -= summary['arb_number_first_summary_gm']
if mm and obs in [ 'xipxip', 'xipxim', 'gmxip']:
obs_copy = str(summary['mmE_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy = str(summary['mmE_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
if mm and obs in ['gmxim', 'ximxim']:
obs_copy = str(summary['mmB_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy = str(summary['mmB_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
#label rj
if gg and obs in ['gggg']:
if i_r2 >= summary['arb_number_first_summary_gg']:
obs_copy += str(summary['gg_summary_name'][1])
rj -= summary['arb_number_first_summary_gg']
else:
obs_copy += str(summary['gg_summary_name'][0])
if gm and obs in ['gmgm', 'gggm', 'gmxip', 'gmxim']:
if i_r2 >= summary['arb_number_first_summary_gm']:
obs_copy += str(summary['gm_summary_name'][1])
rj -= summary['arb_number_first_summary_gm']
else:
obs_copy += str(summary['gm_summary_name'][0])
if mm and obs in ['xipxip']:
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy += str(summary['mmE_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
else:
obs_copy += str(summary['mmE_summary_name'][0])
if mm and obs in ['xipxim', 'ximxim']:
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy += str(summary['mmB_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
else:
obs_copy += str(summary['mmB_summary_name'][0])
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t2, t3, t4)
cov = gauss[splitidx][idxs] \
+ gauss[splitidx+1][idxs] \
+ gauss[splitidx+2][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1+1, i_s2+1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[splitidx][idxs],
gauss[splitidx+1][idxs],
gauss[splitidx+2][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
elif obs in ['ggmm', 'ggxip', 'ggxim']:
tomo1 = gauss[splitidx].shape[4]
tomo2 = gauss[splitidx].shape[6]
sampledim1 = sampledim
sampledim2 = 1
for t1 in range(tomo1):
for t2 in range(t1, tomo1):
for t3 in range(tomo2):
for t4 in range(t3, tomo2):
for i_s1 in range(sampledim1):
for i_s2 in range(sampledim2):
for i_r1 in range(len_proj_quant1):
for i_r2 in range(len_proj_quant2):
ri = int(np.copy(i_r1))
rj = int(np.copy(i_r2))
#label ri
if gg and obs in ['gggg', 'gggm', 'ggxip', 'ggxim']:
obs_copy = str(summary['gg_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_gg']:
obs_copy = str(summary['gg_summary_name'][1])
ri -= summary['arb_number_first_summary_gg']
if gm and obs in ['gmgm']:
obs_copy = str(summary['gm_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_gm']:
obs_copy = str(summary['gm_summary_name'][1])
ri -= summary['arb_number_first_summary_gm']
if mm and obs in [ 'xipxip', 'xipxim', 'gmxip']:
obs_copy = str(summary['mmE_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy = str(summary['mmE_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
if mm and obs in ['gmxim', 'ximxim']:
obs_copy = str(summary['mmB_summary_name'][0])
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy = str(summary['mmB_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
#label rj
if gg and obs in ['gggg']:
if i_r2 >= summary['arb_number_first_summary_gg']:
obs_copy += str(summary['gg_summary_name'][1])
rj -= summary['arb_number_first_summary_gg']
else:
obs_copy += str(summary['gg_summary_name'][0])
if gm and obs in ['gmgm', 'gggm', 'gmxip', 'gmxim']:
if i_r2 >= summary['arb_number_first_summary_gm']:
obs_copy += str(summary['gm_summary_name'][1])
rj -= summary['arb_number_first_summary_gm']
else:
obs_copy += str(summary['gm_summary_name'][0])
if mm and obs in ['xipxip']:
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy += str(summary['mmE_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
else:
obs_copy += str(summary['mmE_summary_name'][0])
if mm and obs in ['xipxim', 'ximxim']:
if i_r1 >= summary['arb_number_first_summary_mm']:
obs_copy += str(summary['mmB_summary_name'][1])
ri -= summary['arb_number_first_summary_mm']
else:
obs_copy += str(summary['mmB_summary_name'][0])
idxs = (i_r1, i_r2, i_s1, i_s2, t1, t1, t3, t4)
cov = gauss[splitidx][idxs] \
+ gauss[splitidx+1][idxs] \
+ gauss[splitidx+2][idxs] \
+ nongauss[oidx][idxs] \
+ ssc[oidx][idxs]
ostr = ostr_format \
% (obs_copy, ri, rj,
i_s1+1, i_s2+1, t1+1, t2+1, t3+1, t4+1,
cov,
gauss[splitidx][idxs],
gauss[splitidx+1][idxs],
gauss[splitidx+2][idxs],
nongauss[oidx][idxs],
ssc[oidx][idxs])
olist.append(ostr)
splitidx += 3
if not self.save_as_binary:
if 'terminal' in self.style:
print("Writing result to terminal. (Brace yourself...).'")
for ostr in olist:
print(ostr)
elif 'list' in self.style:
fn = self.filename[self.style.index('list')]
with open(fn, 'w') as file:
print("Writing '" + fn + "'.")
for ostr in olist:
file.write("%s\n" % ostr)
return True
def __create_matrix(self,covlist, is_i_smaller_j, is_m_smaller_n):
if is_i_smaller_j and is_m_smaller_n:
data_size_ij = int(len(covlist[0,0,0,0,:,0,0,0]) * (len(covlist[0,0,0,0,0,:,0,0]) + 1)/2)*len(covlist[:,0,0,0,0,0,0,0])*len(covlist[0,0,:,0,0,0,0,0])
data_size_mn = int(len(covlist[0,0,0,0,0,0,:,0]) * (len(covlist[0,0,0,0,0,0,0,:]) + 1)/2)*len(covlist[0, :,0,0,0,0,0,0])*len(covlist[0,0,0,:,0,0,0,0])
covariance = np.zeros((data_size_ij,data_size_mn))
i = 0
for i_tomo in range(len(covlist[0,0,0,0,:,0,0,0])):
for j_tomo in range(i_tomo,len(covlist[0,0,0,0,0,:,0,0])):
for i_sample in range(len(covlist[0 ,0, :, 0,0,0,0,0])):
for i_theta in range(len(covlist[:,0,0,0,0,0,0,0])):
j = 0
for m_tomo in range(len(covlist[0,0,0,0,0,0,:,0])):
for n_tomo in range(m_tomo, len(covlist[0,0,0,0,0,0,0,:])):
for j_sample in range(len(covlist[0,0,0,:,0,0,0,0])):
for j_theta in range(len(covlist[0,:,0,0,0,0,0,0])):
covariance[i,j] = covlist[i_theta,j_theta,i_sample,j_sample,i_tomo,j_tomo,m_tomo,n_tomo]
j += 1
i += 1
if is_i_smaller_j and not is_m_smaller_n:
i = 0
data_size_ij = int(len(covlist[0,0,0,0,:,0,0,0]) * (len(covlist[0,0,0,0,0,:,0,0]) + 1)/2)*len(covlist[:,0,0,0,0,0,0,0])*len(covlist[0,0,:,0,0,0,0,0])
data_size_mn = int(len(covlist[0,0,0,0,0,0,:,0]) * (len(covlist[0,0,0,0,0,0,0,:])))*len(covlist[0, :,0,0,0,0,0,0])*len(covlist[0,0,0,:,0,0,0,0])
covariance = np.zeros((data_size_ij,data_size_mn))
for i_tomo in range(len(covlist[0,0,0,0,:,0,0,0])):
for j_tomo in range(i_tomo,len(covlist[0,0,0,0,0,:,0,0])):
for i_sample in range(len(covlist[0,0, :, 0,0,0,0,0])):
for i_theta in range(len(covlist[:,0,0,0,0,0,0,0])):
j = 0
for m_tomo in range(len(covlist[0,0,0,0,0,0,:,0])):
for n_tomo in range(len(covlist[0,0,0,0,0,0,0,:])):
for j_sample in range(len(covlist[0,0,0,:,0,0,0,0])):
for j_theta in range(len(covlist[0,:,0,0,0,0,0,0])):
covariance[i,j] = covlist[i_theta,j_theta,i_sample,j_sample,i_tomo,j_tomo,m_tomo,n_tomo]
j += 1
i += 1
if not is_i_smaller_j and is_m_smaller_n:
i = 0
data_size_ij = int(len(covlist[0,0,0,0,:,0,0,0]) * (len(covlist[0,0,0,0,0,:,0,0])))*len(covlist[:,0,0,0,0,0,0,0])*len(covlist[0,0,:,0,0,0,0,0])
data_size_mn = int(len(covlist[0,0,0,0,0,0,:,0]) * (len(covlist[0,0,0,0,0,0,0,:]) + 1)/2)*len(covlist[0, :,0,0,0,0,0,0])*len(covlist[0,0,:,0,0,0,0,0])
covariance = np.zeros((data_size_ij,data_size_mn))
for i_tomo in range(len(covlist[0,0,0,0,:,0,0,0])):
for j_tomo in range(len(covlist[0,0,0,0,0,:,0,0])):
for i_sample in range(len(covlist[0 ,0, :, 0,0,0,0,0])):
for i_theta in range(len(covlist[:,0,0,0,0,0,0,0])):
j = 0
for m_tomo in range(len(covlist[0,0,0,0,0,0,:,0])):
for n_tomo in range(m_tomo, len(covlist[0,0,0,0,0,0,0,:])):
for j_sample in range(len(covlist[0,0,0,:,0,0,0,0])):
for j_theta in range(len(covlist[0,:,0,0,0,0,0,0])):
covariance[i,j] = covlist[i_theta,j_theta,i_sample,j_sample,i_tomo,j_tomo,m_tomo,n_tomo]
j += 1
i += 1
if not is_i_smaller_j and not is_m_smaller_n:
i = 0
data_size_ij = int(len(covlist[0,0,0,0,:,0,0,0]) * (len(covlist[0,0,0,0,0,:,0,0])))*len(covlist[:,0,0,0,0,0,0,0])*len(covlist[0,0,:,0,0,0,0,0])
data_size_mn = int(len(covlist[0,0,0,0,0,0,:,0]) * (len(covlist[0,0,0,0,0,0,0,:])))*len(covlist[0, :,0,0,0,0,0,0])*len(covlist[0,0,0, :,0,0,0,0])
covariance = np.zeros((data_size_ij,data_size_mn))
for i_tomo in range(len(covlist[0,0,0,0,:,0,0,0])):
for j_tomo in range(len(covlist[0,0,0,0,0,:,0,0])):
for i_sample in range(len(covlist[0 ,0, :, 0,0,0,0,0])):
for i_theta in range(len(covlist[:,0,0,0,0,0,0,0])):
j = 0
for m_tomo in range(len(covlist[0,0,0,0,0,0,:,0])):
for n_tomo in range(len(covlist[0,0,0,0,0,0,0,:])):
for j_sample in range(len(covlist[0,0,0,:,0,0,0,0])):
for j_theta in range(len(covlist[0,:,0,0,0,0,0,0])):
covariance[i,j] = covlist[i_theta,j_theta,i_sample,j_sample,i_tomo,j_tomo,m_tomo,n_tomo]
j += 1
i += 1
return covariance
def __create_matrix_diagonal(self,covlist, diagonal_1, diagonal_2, is_i_smaller_j, is_m_smaller_n):
if diagonal_1 and diagonal_2:
data_size_ij = int(len(covlist[0,0,0,0,:,0,0,0]))*len(covlist[:,0,0,0,0,0,0,0])
data_size_mn = int(len(covlist[0,0,0,0,0,0,:,0]))*len(covlist[0,:,0,0,0,0,0,0])
covariance = np.zeros((data_size_ij,data_size_mn))
i = 0
for i_tomo in range(len(covlist[0,0,0,0,:,0,0,0])):
for i_theta in range(len(covlist[:,0,0,0,0,0,0,0])):
j = 0
for m_tomo in range(len(covlist[0,0,0,0,0,0,:,0])):
for j_theta in range(len(covlist[0,:,0,0,0,0,0,0])):
covariance[i,j] = covlist[i_theta,j_theta,0,0,i_tomo,i_tomo,m_tomo,m_tomo]
j += 1
i += 1
if diagonal_1 and not diagonal_2:
if is_m_smaller_n:
data_size_ij = int(len(covlist[0,0,0,0,:,0,0,0]))*len(covlist[:,0,0,0,0,0,0,0])
data_size_mn = int(len(covlist[0,0,0,0,0,0,:,0]) * (len(covlist[0,0,0,0,0,0,0,:]) + 1)/2)*len(covlist[0, :,0,0,0,0,0,0])
covariance = np.zeros((data_size_ij,data_size_mn))
i = 0
for i_tomo in range(len(covlist[0,0,0,0,:,0,0,0])):
for i_theta in range(len(covlist[:,0,0,0,0,0,0,0])):
j = 0
for m_tomo in range(len(covlist[0,0,0,0,0,0,:,0])):
for n_tomo in range(m_tomo, len(covlist[0,0,0,0,0,0,0,:])):
for j_theta in range(len(covlist[0,:,0,0,0,0,0,0])):
covariance[i,j] = covlist[i_theta,j_theta,0,0,i_tomo,i_tomo,m_tomo,n_tomo]
j += 1
i += 1
else:
data_size_ij = int(len(covlist[0,0,0,0,:,0,0,0]))*len(covlist[:,0,0,0,0,0,0,0])
data_size_mn = int(len(covlist[0,0,0,0,0,0,:,0]) * (len(covlist[0,0,0,0,0,0,0,:])))*len(covlist[0, :,0,0,0,0,0,0])
covariance = np.zeros((data_size_ij,data_size_mn))
i = 0
for i_tomo in range(len(covlist[0,0,0,0,:,0,0,0])):
for i_theta in range(len(covlist[:,0,0,0,0,0,0,0])):
j = 0
for m_tomo in range(len(covlist[0,0,0,0,0,0,:,0])):
for n_tomo in range(len(covlist[0,0,0,0,0,0,0,:])):
for j_theta in range(len(covlist[0,:,0,0,0,0,0,0])):
covariance[i,j] = covlist[i_theta,j_theta,0,0,i_tomo,i_tomo,m_tomo,n_tomo]
j += 1
i += 1
if diagonal_2 and not diagonal_1:
if is_i_smaller_j:
data_size_mn = int(len(covlist[0,0,0,0,:,0,0,0]))*len(covlist[:,0,0,0,0,0,0,0])
data_size_ij = int(len(covlist[0,0,0,0,0,0,:,0]) * (len(covlist[0,0,0,0,0,0,0,:]) + 1)/2)*len(covlist[0, :,0,0,0,0,0,0])
covariance = np.zeros((data_size_ij,data_size_mn))
i = 0
for i_tomo in range(len(covlist[0,0,0,0,:,0,0,0])):
for j_tomo in range(i_tomo, len(covlist[0,0,0,0,0,:,0,0])):
for i_theta in range(len(covlist[:,0,0,0,0,0,0,0])):
j = 0
for m_tomo in range(len(covlist[0,0,0,0,0,0,:,0])):
for j_theta in range(len(covlist[0,:,0,0,0,0,0,0])):
covariance[i,j] = covlist[i_theta,j_theta,0,0,i_tomo,j_tomo,m_tomo,m_tomo]
j += 1
i += 1
else:
data_size_mn = int(len(covlist[0,0,0,0,:,0,0,0]))*len(covlist[:,0,0,0,0,0,0,0])
data_size_ij = int(len(covlist[0,0,0,0,0,0,:,0]) * (len(covlist[0,0,0,0,0,0,0,:])))*len(covlist[0, :,0,0,0,0,0,0])
covariance = np.zeros((data_size_ij,data_size_mn))
i = 0
for i_tomo in range(len(covlist[0,0,0,0,:,0,0,0])):
for j_tomo in range(len(covlist[0,0,0,0,0,:,0,0])):
for i_theta in range(len(covlist[:,0,0,0,0,0,0,0])):
j = 0
for m_tomo in range(len(covlist[0,0,0,0,0,0,:,0])):
for j_theta in range(len(covlist[0,:,0,0,0,0,0,0])):
covariance[i,j] = covlist[i_theta,j_theta,0,0,i_tomo,j_tomo,m_tomo,m_tomo]
j += 1
i += 1
return covariance
def __create_matrix_arbitrary(self,covlist, is_i_smaller_j, is_m_smaller_n, probe1, probe2, summary):
nprobes_probe_1 = summary['number_summary_'+probe1]
nprobes_probe_2 = summary['number_summary_'+probe2]
nprobes_firstprobe_probe_1 = summary['arb_number_first_summary_' +probe1]
nprobes_firstprobe_probe_2 = summary['arb_number_first_summary_' +probe2]
if nprobes_probe_1 is not None and nprobes_probe_2 is not None:
covp1p1 = self.__create_matrix(covlist[:nprobes_firstprobe_probe_1,:nprobes_firstprobe_probe_2], is_i_smaller_j, is_m_smaller_n)
result = covp1p1
if nprobes_probe_1 > 1:
covp2p1 = self.__create_matrix(covlist[nprobes_firstprobe_probe_1:,:nprobes_firstprobe_probe_2], is_i_smaller_j, is_m_smaller_n)
result = np.block([[covp1p1],
[covp2p1]])
if nprobes_probe_2 > 1:
covp2p2 = self.__create_matrix(covlist[nprobes_firstprobe_probe_1:,nprobes_firstprobe_probe_2:], is_i_smaller_j, is_m_smaller_n)
covp1p2 = self.__create_matrix(covlist[:nprobes_firstprobe_probe_1,nprobes_firstprobe_probe_2:], is_i_smaller_j, is_m_smaller_n)
result = np.block([[covp1p1, covp1p2],
[covp2p1, covp2p2]])
if nprobes_probe_2 > 1 and nprobes_probe_1 == 1 :
covp1p2 = self.__create_matrix(covlist[:nprobes_firstprobe_probe_1,nprobes_firstprobe_probe_2:], is_i_smaller_j, is_m_smaller_n)
result = np.block([[covp1p1, covp1p2]])
return result
def __write_cov_matrix_new(self,
obs_dict,
cov_dict,
n_tomo_clust,
n_tomo_lens,
sampledim,
proj_quant,
gauss,
nongauss,
ssc,
fct_args):
obslist, obsbool, obslength, mult, gg, gm, mm, xipp, xipm, ximm = \
fct_args
if obs_dict['ELLspace']['n_spec'] is None or obs_dict['ELLspace']['n_spec'] == 0:
if obslength == 6 and mult == 3:
gauss = [gauss[0]+gauss[1]+gauss[2],
gauss[3]+gauss[4]+gauss[5],
gauss[6]+gauss[7]+gauss[8],
gauss[9]+gauss[10]+gauss[11],
gauss[12]+gauss[13]+gauss[14],
gauss[15]+gauss[16]+gauss[17]]
elif obslength == 10 and mult == 3:
gauss = [gauss[0]+gauss[1]+gauss[2],
gauss[3]+gauss[4]+gauss[5],
gauss[6]+gauss[7]+gauss[8],
gauss[9]+gauss[10]+gauss[11],
gauss[12]+gauss[13]+gauss[14],
gauss[15]+gauss[16]+gauss[17],
gauss[18]+gauss[19]+gauss[20],
gauss[21]+gauss[22]+gauss[23],
gauss[24]+gauss[25]+gauss[26],
gauss[27]+gauss[28]+gauss[29]]
if obs_dict['observables']['est_shear'] == 'cosebi' and obs_dict['observables']['cosmic_shear'] == True:
xipm = True
ximm = True
xipp = True
cov = [gauss[idx]+nongauss[idx]+ssc[idx] for idx in range(obslength)]
cov_diag = []
if obslength == 6:
# 'gggg', 'gggm', 'ggmm', 'gmgm', 'mmgm', 'mmmm'
if gg:
covariance_gggg = self.__create_matrix(cov[0],True,True)
cov2d = covariance_gggg
cov_diag.append(covariance_gggg)
if gm:
covariance_gmgm = self.__create_matrix(cov[3],False,False)
cov_diag.append(covariance_gmgm)
covariance_gggm = self.__create_matrix(cov[1],True,False)
cov2d = np.block([[covariance_gggg, covariance_gggm],
[covariance_gggm.T, covariance_gmgm]])
if mm:
covariance_mmmm = self.__create_matrix(cov[5],True,True)
cov_diag.append(covariance_mmmm)
covariance_ggmm = self.__create_matrix(cov[2],True,True)
covariance_mmgm = self.__create_matrix(cov[4],True,False)
cov2d = np.block([[covariance_gggg, covariance_gggm, covariance_ggmm],
[covariance_gggm.T, covariance_gmgm, covariance_mmgm.T],
[covariance_ggmm.T, covariance_mmgm, covariance_mmmm]])
elif mm:
covariance_mmmm = self.__create_matrix(cov[5],True,True)
cov_diag.append(covariance_mmmm)
covariance_ggmm = self.__create_matrix(cov[2],True,True)
cov2d = np.block([[covariance_gggg, covariance_ggmm],
[covariance_ggmm.T, covariance_mmmm]])
elif gm:
covariance_gmgm = self.__create_matrix(cov[3],False,False)
cov_diag.append(covariance_gmgm)
cov2d = covariance_gmgm
if mm:
covariance_mmmm = self.__create_matrix(cov[5],True,True)
cov_diag.append(covariance_mmmm)
covariance_mmgm = self.__create_matrix(cov[4],True,False)
cov2d = np.block([[covariance_gmgm, covariance_mmgm.T],
[covariance_mmgm, covariance_mmmm]])
elif mm:
covariance_mmmm = self.__create_matrix(cov[5],True,True)
cov_diag.append(covariance_mmmm)
cov2d = covariance_mmmm
elif obslength == 10:
# 'ww', 'wgt', 'wxip', 'wxim', 'gtgt', 'xipgt',
# 'ximgt', 'xipxip', 'xipxim', 'ximxim'
if gg:
covariance_ww = self.__create_matrix(cov[0],True,True)
cov2d = covariance_ww
cov_diag.append(covariance_ww)
if gm:
covariance_gtgt = self.__create_matrix(cov[4],False,False)
cov_diag.append(covariance_gtgt)
covariance_wgt = self.__create_matrix(cov[1],True,False)
cov2d = np.block([[covariance_ww, covariance_wgt],
[covariance_wgt.T, covariance_gtgt]])
if xipp:
covariance_xipxip = self.__create_matrix(cov[7],True,True)
covariance_wxip = self.__create_matrix(cov[2],True,True)
covariance_xipgt = self.__create_matrix(cov[5],True,False)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix(cov[9],True,True)
cov_diag.append(covariance_ximxim)
covariance_wxim = self.__create_matrix(cov[3],True,True)
covariance_ximgt = self.__create_matrix(cov[6],True,False)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip, covariance_wxim],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_wxim.T, covariance_ximgt, np.zeros_like(covariance_ximxim), covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix(cov[8],True,True)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip, covariance_wxim],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip, covariance_xipxim],
[covariance_wxim.T, covariance_ximgt, covariance_xipxim.T, covariance_ximxim]])
elif xipp:
covariance_xipxip = self.__create_matrix(cov[7],True,True)
covariance_wxip = self.__create_matrix(cov[2],True,True)
cov2d = np.block([[covariance_ww, covariance_wxip],
[covariance_wxip.T, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix(cov[9],True,True)
covariance_wxim = self.__create_matrix(cov[3],True,True)
cov_diag.append(covariance_ximxim)
cov2d = np.block([[covariance_ww, covariance_wxip,covariance_wxim],
[covariance_wxip.T, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_wxim.T, np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix(cov[8],True,True)
cov2d = np.block([[covariance_ww, covariance_wxip,covariance_wxim],
[covariance_wxip.T, covariance_xipxip, covariance_xipxim],
[covariance_wxim.T, covariance_xipxim.T, covariance_ximxim]])
elif gm:
covariance_gtgt = self.__create_matrix(cov[4],False,False)
cov2d = covariance_gtgt
cov_diag.append(covariance_gtgt)
if xipp:
covariance_xipxip = self.__create_matrix(cov[7],True,True)
covariance_xipgt = self.__create_matrix(cov[5],True,False)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T],
[covariance_xipgt, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix(cov[9],True,True)
covariance_ximgt = self.__create_matrix(cov[6],True,False)
cov_diag.append(covariance_ximxim)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_xipgt, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_ximgt, np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix(cov[8],True,True)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_xipgt, covariance_xipxip, covariance_xipxim],
[covariance_ximgt, covariance_xipxim.T, covariance_ximxim]])
elif xipp:
covariance_xipxip = self.__create_matrix(cov[7],True,True)
cov2d = covariance_xipxip
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix(cov[9],True,True)
cov2d = np.block([[covariance_xipxip, np.zeros_like(covariance_ximxim)],
[np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
cov_diag.append(covariance_ximxim)
if xipm:
covariance_xipxim = self.__create_matrix(cov[8],True,True)
cov2d = np.block([[covariance_xipxip, covariance_xipxim],
[covariance_xipxim.T, covariance_ximxim]])
elif ximm:
covariance_ximxim = self.__create_matrix(cov[9],True,True)
cov2d = covariance_xipxip
cov_diag.append(covariance_ximxim)
elif xipm:
covariance_xipxim = self.__create_matrix(cov[8],True,True)
cov2d = covariance_xipxim
cov2d_total = np.copy(cov2d)
if cov_dict['split_gauss']:
cov = [gauss[idx] for idx in range(obslength)]
cov_diag = []
if obslength == 6:
# 'gggg', 'gggm', 'ggmm', 'gmgm', 'mmgm', 'mmmm'
if gg:
covariance_gggg = self.__create_matrix(cov[0],True,True)
cov2d = covariance_gggg
cov_diag.append(covariance_gggg)
if gm:
covariance_gmgm = self.__create_matrix(cov[3],False,False)
cov_diag.append(covariance_gmgm)
covariance_gggm = self.__create_matrix(cov[1],True,False)
cov2d = np.block([[covariance_gggg, covariance_gggm],
[covariance_gggm.T, covariance_gmgm]])
if mm:
covariance_mmmm = self.__create_matrix(cov[5],True,True)
cov_diag.append(covariance_mmmm)
covariance_ggmm = self.__create_matrix(cov[2],True,True)
covariance_mmgm = self.__create_matrix(cov[4],True,False)
cov2d = np.block([[covariance_gggg, covariance_gggm, covariance_ggmm],
[covariance_gggm.T, covariance_gmgm, covariance_mmgm.T],
[covariance_ggmm.T, covariance_mmgm, covariance_mmmm]])
elif mm:
covariance_mmmm = self.__create_matrix(cov[5],True,True)
cov_diag.append(covariance_mmmm)
covariance_ggmm = self.__create_matrix(cov[2],True,True)
cov2d = np.block([[covariance_gggg, covariance_ggmm],
[covariance_ggmm.T, covariance_mmmm]])
elif gm:
covariance_gmgm = self.__create_matrix(cov[3],False,False)
cov_diag.append(covariance_gmgm)
cov2d = covariance_gmgm
if mm:
covariance_mmmm = self.__create_matrix(cov[5],True,True)
cov_diag.append(covariance_mmmm)
covariance_mmgm = self.__create_matrix(cov[4],True,False)
cov2d = np.block([[covariance_gmgm, covariance_mmgm.T],
[covariance_mmgm, covariance_mmmm]])
elif mm:
covariance_mmmm = self.__create_matrix(cov[5],True,True)
cov_diag.append(covariance_mmmm)
cov2d = covariance_mmmm
elif obslength == 10:
# 'ww', 'wgt', 'wxip', 'wxim', 'gtgt', 'xipgt',
# 'ximgt', 'xipxip', 'xipxim', 'ximxim'
if gg:
covariance_ww = self.__create_matrix(cov[0],True,True)
cov2d = covariance_ww
cov_diag.append(covariance_ww)
if gm:
covariance_gtgt = self.__create_matrix(cov[4],False,False)
cov_diag.append(covariance_gtgt)
covariance_wgt = self.__create_matrix(cov[1],True,False)
cov2d = np.block([[covariance_ww, covariance_wgt],
[covariance_wgt.T, covariance_gtgt]])
if xipp:
covariance_xipxip = self.__create_matrix(cov[7],True,True)
covariance_wxip = self.__create_matrix(cov[2],True,True)
covariance_xipgt = self.__create_matrix(cov[5],True,False)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix(cov[9],True,True)
cov_diag.append(covariance_ximxim)
covariance_wxim = self.__create_matrix(cov[3],True,True)
covariance_ximgt = self.__create_matrix(cov[6],True,False)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip, covariance_wxim],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_wxim.T, covariance_ximgt, np.zeros_like(covariance_ximxim), covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix(cov[8],True,True)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip, covariance_wxim],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip, covariance_xipxim],
[covariance_wxim.T, covariance_ximgt, covariance_xipxim.T, covariance_ximxim]])
elif xipp:
covariance_xipxip = self.__create_matrix(cov[7],True,True)
covariance_wxip = self.__create_matrix(cov[2],True,True)
cov2d = np.block([[covariance_ww, covariance_wxip],
[covariance_wxip.T, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix(cov[9],True,True)
covariance_wxim = self.__create_matrix(cov[3],True,True)
cov_diag.append(covariance_ximxim)
cov2d = np.block([[covariance_ww, covariance_wxip,covariance_wxim],
[covariance_wxip.T, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_wxim.T, np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix(cov[8],True,True)
cov2d = np.block([[covariance_ww, covariance_wxip,covariance_wxim],
[covariance_wxip.T, covariance_xipxip, covariance_xipxim],
[covariance_wxim.T, covariance_xipxim.T, covariance_ximxim]])
elif gm:
covariance_gtgt = self.__create_matrix(cov[4],False,False)
cov2d = covariance_gtgt
cov_diag.append(covariance_gtgt)
if xipp:
covariance_xipxip = self.__create_matrix(cov[7],True,True)
covariance_xipgt = self.__create_matrix(cov[5],True,False)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T],
[covariance_xipgt, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix(cov[9],True,True)
covariance_ximgt = self.__create_matrix(cov[6],True,False)
cov_diag.append(covariance_ximxim)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_xipgt, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_ximgt, np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix(cov[8],True,True)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_xipgt, covariance_xipxip, covariance_xipxim],
[covariance_ximgt, covariance_xipxim.T, covariance_ximxim]])
elif xipp:
covariance_xipxip = self.__create_matrix(cov[7],True,True)
cov2d = covariance_xipxip
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix(cov[9],True,True)
cov2d = np.block([[covariance_xipxip, np.zeros_like(covariance_ximxim)],
[np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
cov_diag.append(covariance_ximxim)
if xipm:
covariance_xipxim = self.__create_matrix(cov[8],True,True)
cov2d = np.block([[covariance_xipxip, covariance_xipxim],
[covariance_xipxim.T, covariance_ximxim]])
elif ximm:
covariance_ximxim = self.__create_matrix(cov[9],True,True)
cov2d = covariance_xipxip
cov_diag.append(covariance_ximxim)
elif xipm:
covariance_xipxim = self.__create_matrix(cov[8],True,True)
cov2d = covariance_xipxim
cov2d_gauss = np.copy(cov2d)
if self.has_nongauss:
cov = [nongauss[idx] for idx in range(obslength)]
cov_diag = []
if obslength == 6:
# 'gggg', 'gggm', 'ggmm', 'gmgm', 'mmgm', 'mmmm'
if gg:
covariance_gggg = self.__create_matrix(cov[0],True,True)
cov2d = covariance_gggg
cov_diag.append(covariance_gggg)
if gm:
covariance_gmgm = self.__create_matrix(cov[3],False,False)
cov_diag.append(covariance_gmgm)
covariance_gggm = self.__create_matrix(cov[1],True,False)
cov2d = np.block([[covariance_gggg, covariance_gggm],
[covariance_gggm.T, covariance_gmgm]])
if mm:
covariance_mmmm = self.__create_matrix(cov[5],True,True)
cov_diag.append(covariance_mmmm)
covariance_ggmm = self.__create_matrix(cov[2],True,True)
covariance_mmgm = self.__create_matrix(cov[4],True,False)
cov2d = np.block([[covariance_gggg, covariance_gggm, covariance_ggmm],
[covariance_gggm.T, covariance_gmgm, covariance_mmgm.T],
[covariance_ggmm.T, covariance_mmgm, covariance_mmmm]])
elif mm:
covariance_mmmm = self.__create_matrix(cov[5],True,True)
cov_diag.append(covariance_mmmm)
covariance_ggmm = self.__create_matrix(cov[2],True,True)
cov2d = np.block([[covariance_gggg, covariance_ggmm],
[covariance_ggmm.T, covariance_mmmm]])
elif gm:
covariance_gmgm = self.__create_matrix(cov[3],False,False)
cov_diag.append(covariance_gmgm)
cov2d = covariance_gmgm
if mm:
covariance_mmmm = self.__create_matrix(cov[5],True,True)
cov_diag.append(covariance_mmmm)
covariance_mmgm = self.__create_matrix(cov[4],True,False)
cov2d = np.block([[covariance_gmgm, covariance_mmgm.T],
[covariance_mmgm, covariance_mmmm]])
elif mm:
covariance_mmmm = self.__create_matrix(cov[5],True,True)
cov_diag.append(covariance_mmmm)
cov2d = covariance_mmmm
elif obslength == 10:
# 'ww', 'wgt', 'wxip', 'wxim', 'gtgt', 'xipgt',
# 'ximgt', 'xipxip', 'xipxim', 'ximxim'
if gg:
covariance_ww = self.__create_matrix(cov[0],True,True)
cov2d = covariance_ww
cov_diag.append(covariance_ww)
if gm:
covariance_gtgt = self.__create_matrix(cov[4],False,False)
cov_diag.append(covariance_gtgt)
covariance_wgt = self.__create_matrix(cov[1],True,False)
cov2d = np.block([[covariance_ww, covariance_wgt],
[covariance_wgt.T, covariance_gtgt]])
if xipp:
covariance_xipxip = self.__create_matrix(cov[7],True,True)
covariance_wxip = self.__create_matrix(cov[2],True,True)
covariance_xipgt = self.__create_matrix(cov[5],True,False)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix(cov[9],True,True)
cov_diag.append(covariance_ximxim)
covariance_wxim = self.__create_matrix(cov[3],True,True)
covariance_ximgt = self.__create_matrix(cov[6],True,False)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip, covariance_wxim],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_wxim.T, covariance_ximgt, np.zeros_like(covariance_ximxim), covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix(cov[8],True,True)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip, covariance_wxim],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip, covariance_xipxim],
[covariance_wxim.T, covariance_ximgt, covariance_xipxim.T, covariance_ximxim]])
elif xipp:
covariance_xipxip = self.__create_matrix(cov[7],True,True)
covariance_wxip = self.__create_matrix(cov[2],True,True)
cov2d = np.block([[covariance_ww, covariance_wxip],
[covariance_wxip.T, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix(cov[9],True,True)
covariance_wxim = self.__create_matrix(cov[3],True,True)
cov_diag.append(covariance_ximxim)
cov2d = np.block([[covariance_ww, covariance_wxip,covariance_wxim],
[covariance_wxip.T, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_wxim.T, np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix(cov[8],True,True)
cov2d = np.block([[covariance_ww, covariance_wxip,covariance_wxim],
[covariance_wxip.T, covariance_xipxip, covariance_xipxim],
[covariance_wxim.T, covariance_xipxim.T, covariance_ximxim]])
elif gm:
covariance_gtgt = self.__create_matrix(cov[4],False,False)
cov2d = covariance_gtgt
cov_diag.append(covariance_gtgt)
if xipp:
covariance_xipxip = self.__create_matrix(cov[7],True,True)
covariance_xipgt = self.__create_matrix(cov[5],True,False)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T],
[covariance_xipgt, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix(cov[9],True,True)
covariance_ximgt = self.__create_matrix(cov[6],True,False)
cov_diag.append(covariance_ximxim)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_xipgt, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_ximgt, np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix(cov[8],True,True)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_xipgt, covariance_xipxip, covariance_xipxim],
[covariance_ximgt, covariance_xipxim.T, covariance_ximxim]])
elif xipp:
covariance_xipxip = self.__create_matrix(cov[7],True,True)
cov2d = covariance_xipxip
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix(cov[9],True,True)
cov2d = np.block([[covariance_xipxip, np.zeros_like(covariance_ximxim)],
[np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
cov_diag.append(covariance_ximxim)
if xipm:
covariance_xipxim = self.__create_matrix(cov[8],True,True)
cov2d = np.block([[covariance_xipxip, covariance_xipxim],
[covariance_xipxim.T, covariance_ximxim]])
elif ximm:
covariance_ximxim = self.__create_matrix(cov[9],True,True)
cov2d = covariance_xipxip
cov_diag.append(covariance_ximxim)
elif xipm:
covariance_xipxim = self.__create_matrix(cov[8],True,True)
cov2d = covariance_xipxim
cov2d_nongauss = np.copy(cov2d)
if self.has_ssc:
cov = [ssc[idx] for idx in range(obslength)]
cov_diag = []
if obslength == 6:
# 'gggg', 'gggm', 'ggmm', 'gmgm', 'mmgm', 'mmmm'
if gg:
covariance_gggg = self.__create_matrix(cov[0],True,True)
cov2d = covariance_gggg
cov_diag.append(covariance_gggg)
if gm:
covariance_gmgm = self.__create_matrix(cov[3],False,False)
cov_diag.append(covariance_gmgm)
covariance_gggm = self.__create_matrix(cov[1],True,False)
cov2d = np.block([[covariance_gggg, covariance_gggm],
[covariance_gggm.T, covariance_gmgm]])
if mm:
covariance_mmmm = self.__create_matrix(cov[5],True,True)
cov_diag.append(covariance_mmmm)
covariance_ggmm = self.__create_matrix(cov[2],True,True)
covariance_mmgm = self.__create_matrix(cov[4],True,False)
cov2d = np.block([[covariance_gggg, covariance_gggm, covariance_ggmm],
[covariance_gggm.T, covariance_gmgm, covariance_mmgm.T],
[covariance_ggmm.T, covariance_mmgm, covariance_mmmm]])
elif mm:
covariance_mmmm = self.__create_matrix(cov[5],True,True)
cov_diag.append(covariance_mmmm)
covariance_ggmm = self.__create_matrix(cov[2],True,True)
cov2d = np.block([[covariance_gggg, covariance_ggmm],
[covariance_ggmm.T, covariance_mmmm]])
elif gm:
covariance_gmgm = self.__create_matrix(cov[3],False,False)
cov_diag.append(covariance_gmgm)
cov2d = covariance_gmgm
if mm:
covariance_mmmm = self.__create_matrix(cov[5],True,True)
cov_diag.append(covariance_mmmm)
covariance_mmgm = self.__create_matrix(cov[4],True,False)
cov2d = np.block([[covariance_gmgm, covariance_mmgm.T],
[covariance_mmgm, covariance_mmmm]])
elif mm:
covariance_mmmm = self.__create_matrix(cov[5],True,True)
cov_diag.append(covariance_mmmm)
cov2d = covariance_mmmm
elif obslength == 10:
# 'ww', 'wgt', 'wxip', 'wxim', 'gtgt', 'xipgt',
# 'ximgt', 'xipxip', 'xipxim', 'ximxim'
if gg:
covariance_ww = self.__create_matrix(cov[0],True,True)
cov2d = covariance_ww
cov_diag.append(covariance_ww)
if gm:
covariance_gtgt = self.__create_matrix(cov[4],False,False)
cov_diag.append(covariance_gtgt)
covariance_wgt = self.__create_matrix(cov[1],True,False)
cov2d = np.block([[covariance_ww, covariance_wgt],
[covariance_wgt.T, covariance_gtgt]])
if xipp:
covariance_xipxip = self.__create_matrix(cov[7],True,True)
covariance_wxip = self.__create_matrix(cov[2],True,True)
covariance_xipgt = self.__create_matrix(cov[5],True,False)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix(cov[9],True,True)
cov_diag.append(covariance_ximxim)
covariance_wxim = self.__create_matrix(cov[3],True,True)
covariance_ximgt = self.__create_matrix(cov[6],True,False)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip, covariance_wxim],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_wxim.T, covariance_ximgt, np.zeros_like(covariance_ximxim), covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix(cov[8],True,True)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip, covariance_wxim],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip, covariance_xipxim],
[covariance_wxim.T, covariance_ximgt, covariance_xipxim.T, covariance_ximxim]])
elif xipp:
covariance_xipxip = self.__create_matrix(cov[7],True,True)
covariance_wxip = self.__create_matrix(cov[2],True,True)
cov2d = np.block([[covariance_ww, covariance_wxip],
[covariance_wxip.T, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix(cov[9],True,True)
covariance_wxim = self.__create_matrix(cov[3],True,True)
cov_diag.append(covariance_ximxim)
cov2d = np.block([[covariance_ww, covariance_wxip,covariance_wxim],
[covariance_wxip.T, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_wxim.T, np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix(cov[8],True,True)
cov2d = np.block([[covariance_ww, covariance_wxip,covariance_wxim],
[covariance_wxip.T, covariance_xipxip, covariance_xipxim],
[covariance_wxim.T, covariance_xipxim.T, covariance_ximxim]])
elif gm:
covariance_gtgt = self.__create_matrix(cov[4],False,False)
cov2d = covariance_gtgt
cov_diag.append(covariance_gtgt)
if xipp:
covariance_xipxip = self.__create_matrix(cov[7],True,True)
covariance_xipgt = self.__create_matrix(cov[5],True,False)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T],
[covariance_xipgt, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix(cov[9],True,True)
covariance_ximgt = self.__create_matrix(cov[6],True,False)
cov_diag.append(covariance_ximxim)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_xipgt, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_ximgt, np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix(cov[8],True,True)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_xipgt, covariance_xipxip, covariance_xipxim],
[covariance_ximgt, covariance_xipxim.T, covariance_ximxim]])
elif xipp:
covariance_xipxip = self.__create_matrix(cov[7],True,True)
cov2d = covariance_xipxip
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix(cov[9],True,True)
cov2d = np.block([[covariance_xipxip, np.zeros_like(covariance_ximxim)],
[np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
cov_diag.append(covariance_ximxim)
if xipm:
covariance_xipxim = self.__create_matrix(cov[8],True,True)
cov2d = np.block([[covariance_xipxip, covariance_xipxim],
[covariance_xipxim.T, covariance_ximxim]])
elif ximm:
covariance_ximxim = self.__create_matrix(cov[9],True,True)
cov2d = covariance_xipxip
cov_diag.append(covariance_ximxim)
elif xipm:
covariance_xipxim = self.__create_matrix(cov[8],True,True)
cov2d = covariance_xipxim
cov2d_ssc = np.copy(cov2d)
for i in range(len(cov2d[:,0])):
for j in range(len(cov2d[:,0])):
cov2d_total[j,i] = cov2d_total[i,j]
if cov_dict['split_gauss']:
cov2d_gauss[j,i] = cov2d_gauss[i,j]
if self.has_nongauss:
cov2d_nongauss[j,i] = cov2d_nongauss[i,j]
if self.has_ssc:
cov2d_ssc[j,i] = cov2d_ssc[i,j]
if len(np.where(np.linalg.eig(cov2d_total)[0] < 0)[0]) > 0:
print("ALARM: The resulting covariance matrix has negative eigenvalues")
print("Try to adjust the accuracy settings in the config file:")
print("For configuration space covariance reduce theta_accuracy and increase integration_intervals, usually a factor of 2 is enough.")
print("For bandpower covariance reduce bandpower_accuracy.")
print("For COSEBI covariance reduce En_accuracy.")
if self.plot:
self.plot_corrcoeff_matrix(
obs_dict, cov2d_total, cov_diag, proj_quant, n_tomo_clust,
n_tomo_lens, sampledim, self.plot ,fct_args)
if obs_dict['observables']['est_shear'] == 'bandpowers' and obs_dict['observables']['cosmic_shear'] == True:
obslist[7] = 'CE_mmCE_mm'
obslist[9] = 'CB_mmCB_mm'
if obs_dict['observables']['est_clust'] == 'bandpowers' and obs_dict['observables']['clustering'] == True:
obslist[0] = 'CE_ggCE_gg'
if obs_dict['observables']['est_ggl'] == 'bandpowers' and obs_dict['observables']['ggl'] == True:
obslist[4] = 'CE_gmCE_gm'
hdr_str = 'Covariance matrix with the diagonals in the order: '
hdr_str += obslist[0]+' ' if obsbool[0] else ''
if obslength == 6:
hdr_str += obslist[3]+' ' if obsbool[3] else ''
hdr_str += obslist[5]+' ' if obsbool[5] else ''
elif obslength == 10:
hdr_str += obslist[4]+' ' if obsbool[4] else ''
hdr_str += obslist[7]+' ' if obsbool[7] else ''
hdr_str += obslist[9]+' ' if obsbool[9] else ''
hdr_str += 'with '
if n_tomo_clust is not None:
hdr_str += str(n_tomo_clust) + ' tomographic clustering bins and '
if n_tomo_lens is not None:
hdr_str += str(n_tomo_lens) + ' tomographic lensing bins and '
alternative = False
if self.projected_clust is not None:
hdr_str += str(len(self.projected_clust)) + ' elements per tomographic clustering bin '
alternative = True
if self.projected_lens is not None:
if not alternative:
hdr_str += str(len(self.projected_lens)) + ' elements per tomographic lensing bin '
else:
hdr_str += str(len(self.projected_lens)) + ' elements per tomographic lensing bin and'
alternative = True
if not alternative:
hdr_str += str(len(proj_quant)) + ' elements per tomographic bin'
if 'matrix' in self.style:
if not cov_dict['split_gauss']:
print("Writing matrix output file.")
if self.save_as_binary:
fn = self.filename[self.style.index('matrix')]
name, extension = os.path.splitext(fn)
np.save(name, cov2d_total)
else:
fn = self.filename[self.style.index('matrix')]
np.savetxt(fn, cov2d_total, fmt='%.6e', delimiter=' ',
newline='\n', header=hdr_str, comments='# ')
else:
print("Writing matrix output file.")
if self.save_as_binary:
fn = self.filename[self.style.index('matrix')]
name, extension = os.path.splitext(fn)
np.save(name, cov2d_total)
fn_gauss = name + "_gauss"
fn_nongauss = name + "_nongauss"
fn_ssc = name + "_SSC"
np.save(fn_gauss, cov2d_gauss)
if self.has_nongauss:
np.save(fn_nongauss, cov2d_nongauss)
if self.has_ssc:
np.save(fn_ssc, cov2d_ssc)
else:
fn = self.filename[self.style.index('matrix')]
np.savetxt(fn, cov2d_total, fmt='%.6e', delimiter=' ',
newline='\n', header=hdr_str, comments='# ')
name, extension = os.path.splitext(fn)
fn_gauss = name + "_gauss" + extension
fn_nongauss = name + "_nongauss" + extension
fn_ssc = name + "_SSC" + extension
np.savetxt(fn_gauss, cov2d_gauss, fmt='%.6e', delimiter=' ',
newline='\n', header=hdr_str, comments='# ')
if self.has_nongauss:
np.savetxt(fn_nongauss, cov2d_nongauss, fmt='%.6e', delimiter=' ',
newline='\n', header=hdr_str, comments='# ')
if self.has_ssc:
np.savetxt(fn_ssc, cov2d_ssc, fmt='%.6e', delimiter=' ',
newline='\n', header=hdr_str, comments='# ')
else:
gauss = [gauss[0]+gauss[1]+gauss[2],
gauss[3]+gauss[4]+gauss[5],
gauss[6]+gauss[7]+gauss[8],
gauss[9]+gauss[10]+gauss[11],
gauss[12]+gauss[13]+gauss[14],
gauss[15]+gauss[16]+gauss[17],
gauss[18]+gauss[19]+gauss[20],
gauss[21]+gauss[22]+gauss[23],
gauss[24]+gauss[25]+gauss[26],
gauss[27]+gauss[28]+gauss[29],
gauss[30]+gauss[31]+gauss[32],
gauss[33]+gauss[34]+gauss[35],
gauss[36]+gauss[37]+gauss[38],
gauss[39]+gauss[40]+gauss[41],
gauss[42]+gauss[43]+gauss[44],
gauss[45]+gauss[46]+gauss[47],
gauss[48]+gauss[49]+gauss[50],
gauss[51]+gauss[52]+gauss[53],
gauss[54]+gauss[55]+gauss[56],
gauss[57]+gauss[58]+gauss[59],
gauss[60]+gauss[61]+gauss[62],
gauss[63]+gauss[64]+gauss[65]]
"""
gggg_ssss_new, gggg_sssp_new, gggg_sspp_new, \
gggg_spsp_new, gggg_ppsp_new, gggg_pppp_new, \
gggm_sssm_new, gggm_sspm_new, gggm_spsm_new, \
gggm_sppm_new, gggm_ppsm_new, gggm_pppm_new, \
ggmm_ssmm_new, ggmm_spmm_new, ggmm_ppmm_new, \
gmgm_smsm_new, gmgm_smpm_new, gmgm_pmsm_new, \
gmgm_pmpm_new, mmgm_mmsm_new, mmgm_mmpm_new, \
mmmm_mmmm_new
"""
if self.has_nongauss and self.has_ssc:
cov = [gauss[idx]+nongauss[idx]+ssc[idx] for idx in range(len(gauss))]
if self.has_nongauss and not self.has_ssc:
cov = [gauss[idx]+nongauss[idx] for idx in range(len(gauss))]
if not self.has_nongauss and self.has_ssc:
cov = [gauss[idx]+ssc[idx] for idx in range(len(gauss))]
if not self.has_nongauss and not self.has_ssc:
cov = [gauss[idx] for idx in range(len(gauss))]
cov_diag = []
if gg:
covariance_gggg_ssss = self.__create_matrix_diagonal(cov[0], True, True, True, True)
covariance_gggg_sssp = self.__create_matrix_diagonal(cov[1], True, False, True, False)
covariance_gggg_sspp = self.__create_matrix_diagonal(cov[2], True, False, True, True)
covariance_gggg_spsp = self.__create_matrix(cov[3],False, False)
covariance_gggg_ppsp = self.__create_matrix(cov[4],True, False)
covariance_gggg_pppp = self.__create_matrix(cov[5],True, True)
cov2d = np.block([[covariance_gggg_ssss, covariance_gggg_sssp, covariance_gggg_sspp],
[covariance_gggg_sssp.T, covariance_gggg_spsp, covariance_gggg_ppsp.T],
[covariance_gggg_sspp.T, covariance_gggg_ppsp, covariance_gggg_pppp]])
cov_diag.append(covariance_gggg_ssss)
cov_diag.append(covariance_gggg_spsp)
cov_diag.append(covariance_gggg_pppp)
if gm:
covariance_gggm_sssm = self.__create_matrix_diagonal(cov[6], True, False, True, False)
covariance_gggm_sspm = self.__create_matrix_diagonal(cov[7], True, False, True, False)
covariance_gggm_spsm = self.__create_matrix(cov[8], False, False)
covariance_gggm_sppm = self.__create_matrix(cov[9],False,False)
covariance_gggm_ppsm = self.__create_matrix(cov[10],True,False)
covariance_gggm_pppm = self.__create_matrix(cov[11],True,False)
covariance_gmgm_smsm = self.__create_matrix(cov[15],False,False)
covariance_gmgm_smpm = self.__create_matrix(cov[16],False,False)
covariance_gmgm_pmsm = self.__create_matrix(cov[17],False,False)
covariance_gmgm_pmpm = self.__create_matrix(cov[18],False,False)
cov_diag.append(covariance_gmgm_smsm)
cov_diag.append(covariance_gmgm_pmpm)
cov2d = np.block([[covariance_gggg_ssss, covariance_gggg_sssp, covariance_gggg_sspp, covariance_gggm_sssm, covariance_gggm_sspm],
[covariance_gggg_sssp.T, covariance_gggg_spsp, covariance_gggg_ppsp.T, covariance_gggm_spsm, covariance_gggm_sppm],
[covariance_gggg_sspp.T, covariance_gggg_ppsp, covariance_gggg_pppp, covariance_gggm_ppsm, covariance_gggm_pppm],
[covariance_gggm_sssm.T, covariance_gggm_spsm.T, covariance_gggm_ppsm.T, covariance_gmgm_smsm, covariance_gmgm_smpm],
[covariance_gggm_sspm.T, covariance_gggm_sppm.T, covariance_gggm_pppm.T, covariance_gmgm_pmsm, covariance_gmgm_pmpm]])
if mm:
covariance_mmmm_mmmm = self.__create_matrix(cov[21],True,True)
cov_diag.append(covariance_mmmm_mmmm)
covariance_ggmm_ssmm = self.__create_matrix_diagonal(cov[12],True,False,True,True)
covariance_ggmm_spmm = self.__create_matrix(cov[13],False,True)
covariance_ggmm_ppmm = self.__create_matrix(cov[14],True,True)
covariance_mmgm_mmsm = self.__create_matrix(cov[19],True,False)
covariance_mmgm_mmpm = self.__create_matrix(cov[20],True,False)
cov2d = np.block([[covariance_gggg_ssss, covariance_gggg_sssp, covariance_gggg_sspp, covariance_gggm_sssm, covariance_gggm_sspm, covariance_ggmm_ssmm],
[covariance_gggg_sssp.T, covariance_gggg_spsp, covariance_gggg_ppsp.T, covariance_gggm_spsm, covariance_gggm_sppm, covariance_ggmm_spmm],
[covariance_gggg_sspp.T, covariance_gggg_ppsp, covariance_gggg_pppp, covariance_gggm_ppsm, covariance_gggm_pppm, covariance_ggmm_ppmm],
[covariance_gggm_sssm.T, covariance_gggm_spsm.T, covariance_gggm_ppsm.T, covariance_gmgm_smsm, covariance_gmgm_smpm, covariance_mmgm_mmsm.T],
[covariance_gggm_sspm.T, covariance_gggm_sppm.T, covariance_gggm_pppm.T, covariance_gmgm_pmsm, covariance_gmgm_pmpm, covariance_mmgm_mmpm.T],
[covariance_ggmm_ssmm.T, covariance_ggmm_spmm.T, covariance_ggmm_ppmm.T, covariance_mmgm_mmsm, covariance_mmgm_mmpm, covariance_mmmm_mmmm]])
elif mm:
covariance_mmmm_mmmm = self.__create_matrix(cov[21],True,True)
cov_diag.append(covariance_mmmm_mmmm)
covariance_ggmm_ssmm = self.__create_matrix_diagonal(cov[12],True,False,True,True)
covariance_ggmm_spmm = self.__create_matrix(cov[13],False,True)
covariance_ggmm_ppmm = self.__create_matrix(cov[14],True,True)
cov2d = np.block([[covariance_gggg_ssss, covariance_gggg_sssp, covariance_gggg_sspp, covariance_ggmm_ssmm],
[covariance_gggg_sssp.T, covariance_gggg_spsp, covariance_gggg_ppsp.T, covariance_ggmm_spmm],
[covariance_gggg_sspp.T, covariance_gggg_ppsp, covariance_gggg_pppp, covariance_ggmm_ppmm],
[covariance_ggmm_ssmm.T, covariance_ggmm_spmm.T, covariance_ggmm_ppmm.T, covariance_mmmm_mmmm]])
elif gm:
covariance_gmgm_smsm = self.__create_matrix(cov[15],False,False)
covariance_gmgm_smpm = self.__create_matrix(cov[16],False,False)
covariance_gmgm_pmsm = self.__create_matrix(cov[17],False,False)
covariance_gmgm_pmpm = self.__create_matrix(cov[18],False,False)
cov_diag.append(covariance_gmgm_smsm)
cov_diag.append(covariance_gmgm_pmpm)
cov2d = np.block([[covariance_gmgm_smsm, covariance_gmgm_smpm],
[covariance_gmgm_pmsm, covariance_gmgm_pmpm]])
if mm:
covariance_mmmm_mmmm = self.__create_matrix(cov[21],True,True)
cov_diag.append(covariance_mmmm_mmmm)
covariance_mmgm_mmsm = self.__create_matrix(cov[19],True,False)
covariance_mmgm_mmpm = self.__create_matrix(cov[20],True,False)
cov2d = np.block([[covariance_gmgm_smsm, covariance_gmgm_smpm, covariance_mmgm_mmsm.T],
[covariance_gmgm_pmsm, covariance_gmgm_pmpm, covariance_mmgm_mmpm.T],
[covariance_mmgm_mmsm, covariance_mmgm_mmpm, covariance_mmmm_mmmm]])
elif mm:
covariance_mmmm_mmmm = self.__create_matrix(cov[21],True,True)
cov_diag.append(covariance_mmmm_mmmm)
cov2d = covariance_mmmm_mmmm
cov2d_total = np.copy(cov2d)
if cov_dict['split_gauss']:
cov = [gauss[idx] for idx in range(len(gauss))]
cov_diag = []
if gg:
covariance_gggg_ssss = self.__create_matrix_diagonal(cov[0], True, True, True, True)
covariance_gggg_sssp = self.__create_matrix_diagonal(cov[1], True, False, True, False)
covariance_gggg_sspp = self.__create_matrix_diagonal(cov[2], True, False, True, True)
covariance_gggg_spsp = self.__create_matrix(cov[3],False, False)
covariance_gggg_ppsp = self.__create_matrix(cov[4],True, False)
covariance_gggg_pppp = self.__create_matrix(cov[5],True, True)
cov2d = np.block([[covariance_gggg_ssss, covariance_gggg_sssp, covariance_gggg_sspp],
[covariance_gggg_sssp.T, covariance_gggg_spsp, covariance_gggg_ppsp.T],
[covariance_gggg_sspp.T, covariance_gggg_ppsp, covariance_gggg_pppp]])
cov_diag.append(covariance_gggg_ssss)
cov_diag.append(covariance_gggg_spsp)
cov_diag.append(covariance_gggg_pppp)
if gm:
covariance_gggm_sssm = self.__create_matrix_diagonal(cov[6], True, False, True, False)
covariance_gggm_sspm = self.__create_matrix_diagonal(cov[7], True, False, True, False)
covariance_gggm_spsm = self.__create_matrix(cov[8], False, False)
covariance_gggm_sppm = self.__create_matrix(cov[9],False,False)
covariance_gggm_ppsm = self.__create_matrix(cov[10],True,False)
covariance_gggm_pppm = self.__create_matrix(cov[11],True,False)
covariance_gmgm_smsm = self.__create_matrix(cov[15],False,False)
covariance_gmgm_smpm = self.__create_matrix(cov[16],False,False)
covariance_gmgm_pmsm = self.__create_matrix(cov[17],False,False)
covariance_gmgm_pmpm = self.__create_matrix(cov[18],False,False)
cov_diag.append(covariance_gmgm_smsm)
cov_diag.append(covariance_gmgm_pmpm)
cov2d = np.block([[covariance_gggg_ssss, covariance_gggg_sssp, covariance_gggg_sspp, covariance_gggm_sssm, covariance_gggm_sspm],
[covariance_gggg_sssp.T, covariance_gggg_spsp, covariance_gggg_ppsp.T, covariance_gggm_spsm, covariance_gggm_sppm],
[covariance_gggg_sspp.T, covariance_gggg_ppsp, covariance_gggg_pppp, covariance_gggm_ppsm, covariance_gggm_pppm],
[covariance_gggm_sssm.T, covariance_gggm_spsm.T, covariance_gggm_ppsm.T, covariance_gmgm_smsm, covariance_gmgm_smpm],
[covariance_gggm_sspm.T, covariance_gggm_sppm.T, covariance_gggm_pppm.T, covariance_gmgm_pmsm, covariance_gmgm_pmpm]])
if mm:
covariance_mmmm_mmmm = self.__create_matrix(cov[21],True,True)
cov_diag.append(covariance_mmmm_mmmm)
covariance_ggmm_ssmm = self.__create_matrix_diagonal(cov[12],True,False,True,True)
covariance_ggmm_spmm = self.__create_matrix(cov[13],False,True)
covariance_ggmm_ppmm = self.__create_matrix(cov[14],True,True)
covariance_mmgm_mmsm = self.__create_matrix(cov[19],True,False)
covariance_mmgm_mmpm = self.__create_matrix(cov[20],True,False)
cov2d = np.block([[covariance_gggg_ssss, covariance_gggg_sssp, covariance_gggg_sspp, covariance_gggm_sssm, covariance_gggm_sspm, covariance_ggmm_ssmm],
[covariance_gggg_sssp.T, covariance_gggg_spsp, covariance_gggg_ppsp.T, covariance_gggm_spsm, covariance_gggm_sppm, covariance_ggmm_spmm],
[covariance_gggg_sspp.T, covariance_gggg_ppsp, covariance_gggg_pppp, covariance_gggm_ppsm, covariance_gggm_pppm, covariance_ggmm_ppmm],
[covariance_gggm_sssm.T, covariance_gggm_spsm.T, covariance_gggm_ppsm.T, covariance_gmgm_smsm, covariance_gmgm_smpm, covariance_mmgm_mmsm.T],
[covariance_gggm_sspm.T, covariance_gggm_sppm.T, covariance_gggm_pppm.T, covariance_gmgm_pmsm, covariance_gmgm_pmpm, covariance_mmgm_mmpm.T],
[covariance_ggmm_ssmm.T, covariance_ggmm_spmm.T, covariance_ggmm_ppmm.T, covariance_mmgm_mmsm, covariance_mmgm_mmpm, covariance_mmmm_mmmm]])
elif mm:
covariance_mmmm_mmmm = self.__create_matrix(cov[21],True,True)
cov_diag.append(covariance_mmmm_mmmm)
covariance_ggmm_ssmm = self.__create_matrix_diagonal(cov[12],True,False,True,True)
covariance_ggmm_spmm = self.__create_matrix(cov[13],False,True)
covariance_ggmm_ppmm = self.__create_matrix(cov[14],True,True)
cov2d = np.block([[covariance_gggg_ssss, covariance_gggg_sssp, covariance_gggg_sspp, covariance_ggmm_ssmm],
[covariance_gggg_sssp.T, covariance_gggg_spsp, covariance_gggg_ppsp.T, covariance_ggmm_spmm],
[covariance_gggg_sspp.T, covariance_gggg_ppsp, covariance_gggg_pppp, covariance_ggmm_ppmm],
[covariance_ggmm_ssmm.T, covariance_ggmm_spmm.T, covariance_ggmm_ppmm.T, covariance_mmmm_mmmm]])
elif gm:
covariance_gmgm_smsm = self.__create_matrix(cov[15],False,False)
covariance_gmgm_smpm = self.__create_matrix(cov[16],False,False)
covariance_gmgm_pmsm = self.__create_matrix(cov[17],False,False)
covariance_gmgm_pmpm = self.__create_matrix(cov[18],False,False)
cov_diag.append(covariance_gmgm_smsm)
cov_diag.append(covariance_gmgm_pmpm)
cov2d = np.block([[covariance_gmgm_smsm, covariance_gmgm_smpm],
[covariance_gmgm_pmsm, covariance_gmgm_pmpm]])
if mm:
covariance_mmmm_mmmm = self.__create_matrix(cov[21],True,True)
cov_diag.append(covariance_mmmm_mmmm)
covariance_mmgm_mmsm = self.__create_matrix(cov[19],True,False)
covariance_mmgm_mmpm = self.__create_matrix(cov[20],True,False)
cov2d = np.block([[covariance_gmgm_smsm, covariance_gmgm_smpm, covariance_mmgm_mmsm.T],
[covariance_gmgm_pmsm, covariance_gmgm_pmpm, covariance_mmgm_mmpm.T],
[covariance_mmgm_mmsm, covariance_mmgm_mmpm, covariance_mmmm_mmmm]])
elif mm:
covariance_mmmm_mmmm = self.__create_matrix(cov[21],True,True)
cov_diag.append(covariance_mmmm_mmmm)
cov2d = covariance_mmmm_mmmm
cov2d_gauss = np.copy(cov2d)
if self.has_ssc:
cov = [ssc[idx] for idx in range(len(gauss))]
cov_diag = []
if gg:
covariance_gggg_ssss = self.__create_matrix_diagonal(cov[0], True, True, True, True)
covariance_gggg_sssp = self.__create_matrix_diagonal(cov[1], True, False, True, False)
covariance_gggg_sspp = self.__create_matrix_diagonal(cov[2], True, False, True, True)
covariance_gggg_spsp = self.__create_matrix(cov[3],False, False)
covariance_gggg_ppsp = self.__create_matrix(cov[4],True, False)
covariance_gggg_pppp = self.__create_matrix(cov[5],True, True)
cov2d = np.block([[covariance_gggg_ssss, covariance_gggg_sssp, covariance_gggg_sspp],
[covariance_gggg_sssp.T, covariance_gggg_spsp, covariance_gggg_ppsp.T],
[covariance_gggg_sspp.T, covariance_gggg_ppsp, covariance_gggg_pppp]])
cov_diag.append(covariance_gggg_ssss)
cov_diag.append(covariance_gggg_spsp)
cov_diag.append(covariance_gggg_pppp)
if gm:
covariance_gggm_sssm = self.__create_matrix_diagonal(cov[6], True, False, True, False)
covariance_gggm_sspm = self.__create_matrix_diagonal(cov[7], True, False, True, False)
covariance_gggm_spsm = self.__create_matrix(cov[8], False, False)
covariance_gggm_sppm = self.__create_matrix(cov[9],False,False)
covariance_gggm_ppsm = self.__create_matrix(cov[10],True,False)
covariance_gggm_pppm = self.__create_matrix(cov[11],True,False)
covariance_gmgm_smsm = self.__create_matrix(cov[15],False,False)
covariance_gmgm_smpm = self.__create_matrix(cov[16],False,False)
covariance_gmgm_pmsm = self.__create_matrix(cov[17],False,False)
covariance_gmgm_pmpm = self.__create_matrix(cov[18],False,False)
cov_diag.append(covariance_gmgm_smsm)
cov_diag.append(covariance_gmgm_pmpm)
cov2d = np.block([[covariance_gggg_ssss, covariance_gggg_sssp, covariance_gggg_sspp, covariance_gggm_sssm, covariance_gggm_sspm],
[covariance_gggg_sssp.T, covariance_gggg_spsp, covariance_gggg_ppsp.T, covariance_gggm_spsm, covariance_gggm_sppm],
[covariance_gggg_sspp.T, covariance_gggg_ppsp, covariance_gggg_pppp, covariance_gggm_ppsm, covariance_gggm_pppm],
[covariance_gggm_sssm.T, covariance_gggm_spsm.T, covariance_gggm_ppsm.T, covariance_gmgm_smsm, covariance_gmgm_smpm],
[covariance_gggm_sspm.T, covariance_gggm_sppm.T, covariance_gggm_pppm.T, covariance_gmgm_pmsm, covariance_gmgm_pmpm]])
if mm:
covariance_mmmm_mmmm = self.__create_matrix(cov[21],True,True)
cov_diag.append(covariance_mmmm_mmmm)
covariance_ggmm_ssmm = self.__create_matrix_diagonal(cov[12],True,False,True,True)
covariance_ggmm_spmm = self.__create_matrix(cov[13],False,True)
covariance_ggmm_ppmm = self.__create_matrix(cov[14],True,True)
covariance_mmgm_mmsm = self.__create_matrix(cov[19],True,False)
covariance_mmgm_mmpm = self.__create_matrix(cov[20],True,False)
cov2d = np.block([[covariance_gggg_ssss, covariance_gggg_sssp, covariance_gggg_sspp, covariance_gggm_sssm, covariance_gggm_sspm, covariance_ggmm_ssmm],
[covariance_gggg_sssp.T, covariance_gggg_spsp, covariance_gggg_ppsp.T, covariance_gggm_spsm, covariance_gggm_sppm, covariance_ggmm_spmm],
[covariance_gggg_sspp.T, covariance_gggg_ppsp, covariance_gggg_pppp, covariance_gggm_ppsm, covariance_gggm_pppm, covariance_ggmm_ppmm],
[covariance_gggm_sssm.T, covariance_gggm_spsm.T, covariance_gggm_ppsm.T, covariance_gmgm_smsm, covariance_gmgm_smpm, covariance_mmgm_mmsm.T],
[covariance_gggm_sspm.T, covariance_gggm_sppm.T, covariance_gggm_pppm.T, covariance_gmgm_pmsm, covariance_gmgm_pmpm, covariance_mmgm_mmpm.T],
[covariance_ggmm_ssmm.T, covariance_ggmm_spmm.T, covariance_ggmm_ppmm.T, covariance_mmgm_mmsm, covariance_mmgm_mmpm, covariance_mmmm_mmmm]])
elif mm:
covariance_mmmm_mmmm = self.__create_matrix(cov[21],True,True)
cov_diag.append(covariance_mmmm_mmmm)
covariance_ggmm_ssmm = self.__create_matrix_diagonal(cov[12],True,False,True,True)
covariance_ggmm_spmm = self.__create_matrix(cov[13],False,True)
covariance_ggmm_ppmm = self.__create_matrix(cov[14],True,True)
cov2d = np.block([[covariance_gggg_ssss, covariance_gggg_sssp, covariance_gggg_sspp, covariance_ggmm_ssmm],
[covariance_gggg_sssp.T, covariance_gggg_spsp, covariance_gggg_ppsp.T, covariance_ggmm_spmm],
[covariance_gggg_sspp.T, covariance_gggg_ppsp, covariance_gggg_pppp, covariance_ggmm_ppmm],
[covariance_ggmm_ssmm.T, covariance_ggmm_spmm.T, covariance_ggmm_ppmm.T, covariance_mmmm_mmmm]])
elif gm:
covariance_gmgm_smsm = self.__create_matrix(cov[15],False,False)
covariance_gmgm_smpm = self.__create_matrix(cov[16],False,False)
covariance_gmgm_pmsm = self.__create_matrix(cov[17],False,False)
covariance_gmgm_pmpm = self.__create_matrix(cov[18],False,False)
cov_diag.append(covariance_gmgm_smsm)
cov_diag.append(covariance_gmgm_pmpm)
cov2d = np.block([[covariance_gmgm_smsm, covariance_gmgm_smpm],
[covariance_gmgm_pmsm, covariance_gmgm_pmpm]])
if mm:
covariance_mmmm_mmmm = self.__create_matrix(cov[21],True,True)
cov_diag.append(covariance_mmmm_mmmm)
covariance_mmgm_mmsm = self.__create_matrix(cov[19],True,False)
covariance_mmgm_mmpm = self.__create_matrix(cov[20],True,False)
cov2d = np.block([[covariance_gmgm_smsm, covariance_gmgm_smpm, covariance_mmgm_mmsm.T],
[covariance_gmgm_pmsm, covariance_gmgm_pmpm, covariance_mmgm_mmpm.T],
[covariance_mmgm_mmsm, covariance_mmgm_mmpm, covariance_mmmm_mmmm]])
elif mm:
covariance_mmmm_mmmm = self.__create_matrix(cov[21],True,True)
cov_diag.append(covariance_mmmm_mmmm)
cov2d = covariance_mmmm_mmmm
cov2d_ssc = np.copy(cov2d)
if self.has_nongauss:
cov = [nongauss[idx] for idx in range(len(gauss))]
cov_diag = []
if gg:
covariance_gggg_ssss = self.__create_matrix_diagonal(cov[0], True, True, True, True)
covariance_gggg_sssp = self.__create_matrix_diagonal(cov[1], True, False, True, False)
covariance_gggg_sspp = self.__create_matrix_diagonal(cov[2], True, False, True, True)
covariance_gggg_spsp = self.__create_matrix(cov[3],False, False)
covariance_gggg_ppsp = self.__create_matrix(cov[4],True, False)
covariance_gggg_pppp = self.__create_matrix(cov[5],True, True)
cov2d = np.block([[covariance_gggg_ssss, covariance_gggg_sssp, covariance_gggg_sspp],
[covariance_gggg_sssp.T, covariance_gggg_spsp, covariance_gggg_ppsp.T],
[covariance_gggg_sspp.T, covariance_gggg_ppsp, covariance_gggg_pppp]])
cov_diag.append(covariance_gggg_ssss)
cov_diag.append(covariance_gggg_spsp)
cov_diag.append(covariance_gggg_pppp)
if gm:
covariance_gggm_sssm = self.__create_matrix_diagonal(cov[6], True, False, True, False)
covariance_gggm_sspm = self.__create_matrix_diagonal(cov[7], True, False, True, False)
covariance_gggm_spsm = self.__create_matrix(cov[8], False, False)
covariance_gggm_sppm = self.__create_matrix(cov[9],False,False)
covariance_gggm_ppsm = self.__create_matrix(cov[10],True,False)
covariance_gggm_pppm = self.__create_matrix(cov[11],True,False)
covariance_gmgm_smsm = self.__create_matrix(cov[15],False,False)
covariance_gmgm_smpm = self.__create_matrix(cov[16],False,False)
covariance_gmgm_pmsm = self.__create_matrix(cov[17],False,False)
covariance_gmgm_pmpm = self.__create_matrix(cov[18],False,False)
cov_diag.append(covariance_gmgm_smsm)
cov_diag.append(covariance_gmgm_pmpm)
cov2d = np.block([[covariance_gggg_ssss, covariance_gggg_sssp, covariance_gggg_sspp, covariance_gggm_sssm, covariance_gggm_sspm],
[covariance_gggg_sssp.T, covariance_gggg_spsp, covariance_gggg_ppsp.T, covariance_gggm_spsm, covariance_gggm_sppm],
[covariance_gggg_sspp.T, covariance_gggg_ppsp, covariance_gggg_pppp, covariance_gggm_ppsm, covariance_gggm_pppm],
[covariance_gggm_sssm.T, covariance_gggm_spsm.T, covariance_gggm_ppsm.T, covariance_gmgm_smsm, covariance_gmgm_smpm],
[covariance_gggm_sspm.T, covariance_gggm_sppm.T, covariance_gggm_pppm.T, covariance_gmgm_pmsm, covariance_gmgm_pmpm]])
if mm:
covariance_mmmm_mmmm = self.__create_matrix(cov[21],True,True)
cov_diag.append(covariance_mmmm_mmmm)
covariance_ggmm_ssmm = self.__create_matrix_diagonal(cov[12],True,False,True,True)
covariance_ggmm_spmm = self.__create_matrix(cov[13],False,True)
covariance_ggmm_ppmm = self.__create_matrix(cov[14],True,True)
covariance_mmgm_mmsm = self.__create_matrix(cov[19],True,False)
covariance_mmgm_mmpm = self.__create_matrix(cov[20],True,False)
cov2d = np.block([[covariance_gggg_ssss, covariance_gggg_sssp, covariance_gggg_sspp, covariance_gggm_sssm, covariance_gggm_sspm, covariance_ggmm_ssmm],
[covariance_gggg_sssp.T, covariance_gggg_spsp, covariance_gggg_ppsp.T, covariance_gggm_spsm, covariance_gggm_sppm, covariance_ggmm_spmm],
[covariance_gggg_sspp.T, covariance_gggg_ppsp, covariance_gggg_pppp, covariance_gggm_ppsm, covariance_gggm_pppm, covariance_ggmm_ppmm],
[covariance_gggm_sssm.T, covariance_gggm_spsm.T, covariance_gggm_ppsm.T, covariance_gmgm_smsm, covariance_gmgm_smpm, covariance_mmgm_mmsm.T],
[covariance_gggm_sspm.T, covariance_gggm_sppm.T, covariance_gggm_pppm.T, covariance_gmgm_pmsm, covariance_gmgm_pmpm, covariance_mmgm_mmpm.T],
[covariance_ggmm_ssmm.T, covariance_ggmm_spmm.T, covariance_ggmm_ppmm.T, covariance_mmgm_mmsm, covariance_mmgm_mmpm, covariance_mmmm_mmmm]])
elif mm:
covariance_mmmm_mmmm = self.__create_matrix(cov[21],True,True)
cov_diag.append(covariance_mmmm_mmmm)
covariance_ggmm_ssmm = self.__create_matrix_diagonal(cov[12],True,False,True,True)
covariance_ggmm_spmm = self.__create_matrix(cov[13],False,True)
covariance_ggmm_ppmm = self.__create_matrix(cov[14],True,True)
cov2d = np.block([[covariance_gggg_ssss, covariance_gggg_sssp, covariance_gggg_sspp, covariance_ggmm_ssmm],
[covariance_gggg_sssp.T, covariance_gggg_spsp, covariance_gggg_ppsp.T, covariance_ggmm_spmm],
[covariance_gggg_sspp.T, covariance_gggg_ppsp, covariance_gggg_pppp, covariance_ggmm_ppmm],
[covariance_ggmm_ssmm.T, covariance_ggmm_spmm.T, covariance_ggmm_ppmm.T, covariance_mmmm_mmmm]])
elif gm:
covariance_gmgm_smsm = self.__create_matrix(cov[15],False,False)
covariance_gmgm_smpm = self.__create_matrix(cov[16],False,False)
covariance_gmgm_pmsm = self.__create_matrix(cov[17],False,False)
covariance_gmgm_pmpm = self.__create_matrix(cov[18],False,False)
cov_diag.append(covariance_gmgm_smsm)
cov_diag.append(covariance_gmgm_pmpm)
cov2d = np.block([[covariance_gmgm_smsm, covariance_gmgm_smpm],
[covariance_gmgm_pmsm, covariance_gmgm_pmpm]])
if mm:
covariance_mmmm_mmmm = self.__create_matrix(cov[21],True,True)
cov_diag.append(covariance_mmmm_mmmm)
covariance_mmgm_mmsm = self.__create_matrix(cov[19],True,False)
covariance_mmgm_mmpm = self.__create_matrix(cov[20],True,False)
cov2d = np.block([[covariance_gmgm_smsm, covariance_gmgm_smpm, covariance_mmgm_mmsm.T],
[covariance_gmgm_pmsm, covariance_gmgm_pmpm, covariance_mmgm_mmpm.T],
[covariance_mmgm_mmsm, covariance_mmgm_mmpm, covariance_mmmm_mmmm]])
elif mm:
covariance_mmmm_mmmm = self.__create_matrix(cov[21],True,True)
cov_diag.append(covariance_mmmm_mmmm)
cov2d = covariance_mmmm_mmmm
cov2d_nongauss = np.copy(cov2d)
for i in range(len(cov2d[:,0])):
for j in range(len(cov2d[:,0])):
cov2d_total[j,i] = cov2d_total[i,j]
if cov_dict['split_gauss']:
cov2d_gauss[j,i] = cov2d_gauss[i,j]
if self.has_nongauss:
cov2d_nongauss[j,i] = cov2d_nongauss[i,j]
if self.has_ssc:
cov2d_ssc[j,i] = cov2d_ssc[i,j]
if len(np.where(np.linalg.eig(cov2d_total)[0] < 0)[0]) > 0:
print("ALARM: The resulting covariance matrix has negative eigenvalues")
print("Try to adjust the accuracy settings in the config file:")
print("For configuration space covariance reduce theta_accuracy and increase integration_intervals, usually a factor of 2 is enough.")
print("For bandpower covariance reduce bandpower_accuracy.")
print("For COSEBI covariance reduce En_accuracy.")
'''if obs_dict['observables']['est_shear'] == 'bandpowers' and obs_dict['observables']['cosmic_shear'] == True:
obslist[7] = 'CE_mmCE_mm'
obslist[9] = 'CB_mmCB_mm'
if obs_dict['observables']['est_clust'] == 'bandpowers' and obs_dict['observables']['clustering'] == True:
obslist[0] = 'CE_ggCE_gg'
if obs_dict['observables']['est_ggl'] == 'bandpowers' and obs_dict['observables']['ggl'] == True:
obslist[4] = 'CE_gmCE_gm
'''
hdr_str = 'Covariance matrix with the diagonals in the order: '
'''hdr_str += obslist[0]+' ' if obsbool[0] else ''
if obslength == 6:
hdr_str += obslist[3]+' ' if obsbool[3] else ''
hdr_str += obslist[5]+' ' if obsbool[5] else ''
elif obslength == 10:
hdr_str += obslist[4]+' ' if obsbool[4] else ''
hdr_str += obslist[7]+' ' if obsbool[7] else ''
hdr_str += obslist[9]+' ' if obsbool[9] else ''
hdr_str += 'with '
if n_tomo_clust is not None:
hdr_str += str(n_tomo_clust) + ' tomographic clustering bins and '
if n_tomo_lens is not None:
hdr_str += str(n_tomo_lens) + ' tomographic lensing bins and '
hdr_str += str(len(proj_quant)) + ' elements per tomographic bin'
'''
if 'matrix' in self.style:
if not cov_dict['split_gauss']:
print("Writing matrix output file.")
fn = self.filename[self.style.index('matrix')]
np.savetxt(fn, cov2d_total, fmt='%.6e', delimiter=' ',
newline='\n', header=hdr_str, comments='# ')
else:
print("Writing matrix output file.")
fn = self.filename[self.style.index('matrix')]
np.savetxt(fn, cov2d_total, fmt='%.6e', delimiter=' ',
newline='\n', header=hdr_str, comments='# ')
name, extension = os.path.splitext(fn)
fn_gauss = name + "_gauss" + extension
fn_nongauss = name + "_nongauss" + extension
fn_ssc = name + "_SSC" + extension
np.savetxt(fn_gauss, cov2d_gauss, fmt='%.6e', delimiter=' ',
newline='\n', header=hdr_str, comments='# ')
if self.has_nongauss:
np.savetxt(fn_nongauss, cov2d_nongauss, fmt='%.6e', delimiter=' ',
newline='\n', header=hdr_str, comments='# ')
if self.has_ssc:
np.savetxt(fn_ssc, cov2d_ssc, fmt='%.6e', delimiter=' ',
newline='\n', header=hdr_str, comments='# ')
def __write_cov_matrix_arbitrary(self,
obs_dict,
cov_dict,
n_tomo_clust,
n_tomo_lens,
sampledim,
read_in_tables,
gauss,
nongauss,
ssc,
fct_args):
obslist, obsbool, obslength, mult, gg, gm, mm, xipp, xipm, ximm = \
fct_args
if obslength == 6 and mult == 3:
gauss = [gauss[0]+gauss[1]+gauss[2],
gauss[3]+gauss[4]+gauss[5],
gauss[6]+gauss[7]+gauss[8],
gauss[9]+gauss[10]+gauss[11],
gauss[12]+gauss[13]+gauss[14],
gauss[15]+gauss[16]+gauss[17]]
elif obslength == 10 and mult == 3:
gauss = [gauss[0]+gauss[1]+gauss[2],
gauss[3]+gauss[4]+gauss[5],
gauss[6]+gauss[7]+gauss[8],
gauss[9]+gauss[10]+gauss[11],
gauss[12]+gauss[13]+gauss[14],
gauss[15]+gauss[16]+gauss[17],
gauss[18]+gauss[19]+gauss[20],
gauss[21]+gauss[22]+gauss[23],
gauss[24]+gauss[25]+gauss[26],
gauss[27]+gauss[28]+gauss[29]]
if mm:
xipm = True
ximm = True
xipp = True
else:
xipm = False,
ximm = False
xipp = False
summary = read_in_tables['arb_summary']
# 'ww', 'wgt', 'wxip', 'wxim', 'gtgt', 'xipgt',
# 'ximgt', 'xipxip', 'xipxim', 'ximxim'
cov = [gauss[idx]+nongauss[idx]+ssc[idx] for idx in range(obslength)]
cov_diag = []
if gg:
covariance_ww = self.__create_matrix_arbitrary(cov[0],True,True,'gg','gg',summary)
cov2d = covariance_ww
cov_diag.append(covariance_ww)
if gm:
covariance_gtgt = self.__create_matrix_arbitrary(cov[4],False,False,'gm', 'gm', summary)
cov_diag.append(covariance_gtgt)
covariance_wgt = self.__create_matrix_arbitrary(cov[1],True,False,'gg', 'gm', summary)
cov2d = np.block([[covariance_ww, covariance_wgt],
[covariance_wgt.T, covariance_gtgt]])
if xipp:
covariance_xipxip = self.__create_matrix_arbitrary(cov[7],True,True,'mm', 'mm', summary)
covariance_wxip = self.__create_matrix_arbitrary(cov[2],True,True,'gg', 'mm', summary)
covariance_xipgt = self.__create_matrix_arbitrary(cov[5],True,False, 'mm', 'gm', summary)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix_arbitrary(cov[9],True,True, 'mm', 'mm', summary)
cov_diag.append(covariance_ximxim)
covariance_wxim = self.__create_matrix_arbitrary(cov[3],True,True, 'gg', 'mm', summary)
covariance_ximgt = self.__create_matrix_arbitrary(cov[6],True,False, 'mm', 'gm', summary)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip, covariance_wxim],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_wxim.T, covariance_ximgt, np.zeros_like(covariance_ximxim), covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix_arbitrary(cov[8],True,True, 'mm', 'mm', summary)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip, covariance_wxim],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip, covariance_xipxim],
[covariance_wxim.T, covariance_ximgt, covariance_xipxim.T, covariance_ximxim]])
elif xipp:
covariance_xipxip = self.__create_matrix_arbitrary(cov[7],True,True ,'mm', 'mm', summary)
covariance_wxip = self.__create_matrix_arbitrary(cov[2],True,True, 'gg', 'mm', summary)
cov2d = np.block([[covariance_ww, covariance_wxip],
[covariance_wxip.T, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix_arbitrary(cov[9],True,True, 'mm', 'mm', summary)
covariance_wxim = self.__create_matrix_arbitrary(cov[3],True,True, 'gg', 'mm', summary)
cov_diag.append(covariance_ximxim)
cov2d = np.block([[covariance_ww, covariance_wxip,covariance_wxim],
[covariance_wxip.T, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_wxim.T, np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix_arbitrary(cov[8],True,True, 'mm', 'mm', summary)
cov2d = np.block([[covariance_ww, covariance_wxip,covariance_wxim],
[covariance_wxip.T, covariance_xipxip, covariance_xipxim],
[covariance_wxim.T, covariance_xipxim.T, covariance_ximxim]])
elif gm:
covariance_gtgt = self.__create_matrix_arbitrary(cov[4],False,False, 'gm', 'gm', summary)
cov2d = covariance_gtgt
cov_diag.append(covariance_gtgt)
if xipp:
covariance_xipxip = self.__create_matrix_arbitrary(cov[7],True,True, 'mm', 'mm', summary)
covariance_xipgt = self.__create_matrix_arbitrary(cov[5],True,False, 'mm', 'gm', summary)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T],
[covariance_xipgt, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix_arbitrary(cov[9],True,True, 'mm', 'mm', summary)
covariance_ximgt = self.__create_matrix_arbitrary(cov[6],True,False, 'mm', 'gm', summary)
cov_diag.append(covariance_ximxim)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_xipgt, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_ximgt, np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix_arbitrary(cov[8],True,True, 'mm', 'mm', summary)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_xipgt, covariance_xipxip, covariance_xipxim],
[covariance_ximgt, covariance_xipxim.T, covariance_ximxim]])
elif xipp:
covariance_xipxip = self.__create_matrix_arbitrary(cov[7],True,True,'mm', 'mm', summary)
cov2d = covariance_xipxip
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix_arbitrary(cov[9],True,True, 'mm', 'mm', summary)
cov2d = np.block([[covariance_xipxip, np.zeros_like(covariance_ximxim)],
[np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
cov_diag.append(covariance_ximxim)
if xipm:
covariance_xipxim = self.__create_matrix_arbitrary(cov[8],True,True, 'mm', 'mm', summary)
cov2d = np.block([[covariance_xipxip, covariance_xipxim],
[covariance_xipxim.T, covariance_ximxim]])
elif ximm:
covariance_ximxim = self.__create_matrix_arbitrary(cov[9],True,True, 'mm', 'mm', summary)
cov2d = covariance_xipxip
cov_diag.append(covariance_ximxim)
elif xipm:
covariance_xipxim = self.__create_matrix_arbitrary(cov[8],True,True, 'mm', 'mm', summary)
cov2d = covariance_xipxim
cov2d_total = np.copy(cov2d)
if cov_dict['split_gauss']:
cov = [gauss[idx] for idx in range(obslength)]
cov_diag = []
if gg:
covariance_ww = self.__create_matrix_arbitrary(cov[0],True,True,'gg','gg',summary)
cov2d = covariance_ww
cov_diag.append(covariance_ww)
if gm:
covariance_gtgt = self.__create_matrix_arbitrary(cov[4],False,False,'gm', 'gm', summary)
cov_diag.append(covariance_gtgt)
covariance_wgt = self.__create_matrix_arbitrary(cov[1],True,False,'gg', 'gm', summary)
cov2d = np.block([[covariance_ww, covariance_wgt],
[covariance_wgt.T, covariance_gtgt]])
if xipp:
covariance_xipxip = self.__create_matrix_arbitrary(cov[7],True,True,'mm', 'mm', summary)
covariance_wxip = self.__create_matrix_arbitrary(cov[2],True,True,'gg', 'mm', summary)
covariance_xipgt = self.__create_matrix_arbitrary(cov[5],True,False, 'mm', 'gm', summary)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix_arbitrary(cov[9],True,True, 'mm', 'mm', summary)
cov_diag.append(covariance_ximxim)
covariance_wxim = self.__create_matrix_arbitrary(cov[3],True,True, 'gg', 'mm', summary)
covariance_ximgt = self.__create_matrix_arbitrary(cov[6],True,False, 'mm', 'gm', summary)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip, covariance_wxim],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_wxim.T, covariance_ximgt, np.zeros_like(covariance_ximxim), covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix_arbitrary(cov[8],True,True, 'mm', 'mm', summary)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip, covariance_wxim],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip, covariance_xipxim],
[covariance_wxim.T, covariance_ximgt, covariance_xipxim.T, covariance_ximxim]])
elif xipp:
covariance_xipxip = self.__create_matrix_arbitrary(cov[7],True,True ,'mm', 'mm', summary)
covariance_wxip = self.__create_matrix_arbitrary(cov[2],True,True, 'gg', 'mm', summary)
cov2d = np.block([[covariance_ww, covariance_wxip],
[covariance_wxip.T, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix_arbitrary(cov[9],True,True, 'mm', 'mm', summary)
covariance_wxim = self.__create_matrix_arbitrary(cov[3],True,True, 'gg', 'mm', summary)
cov_diag.append(covariance_ximxim)
cov2d = np.block([[covariance_ww, covariance_wxip,covariance_wxim],
[covariance_wxip.T, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_wxim.T, np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix_arbitrary(cov[8],True,True, 'mm', 'mm', summary)
cov2d = np.block([[covariance_ww, covariance_wxip,covariance_wxim],
[covariance_wxip.T, covariance_xipxip, covariance_xipxim],
[covariance_wxim.T, covariance_xipxim.T, covariance_ximxim]])
elif gm:
covariance_gtgt = self.__create_matrix_arbitrary(cov[4],False,False, 'gm', 'gm', summary)
cov2d = covariance_gtgt
cov_diag.append(covariance_gtgt)
if xipp:
covariance_xipxip = self.__create_matrix_arbitrary(cov[7],True,True, 'mm', 'mm', summary)
covariance_xipgt = self.__create_matrix_arbitrary(cov[5],True,False, 'mm', 'gm', summary)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T],
[covariance_xipgt, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix_arbitrary(cov[9],True,True, 'mm', 'mm', summary)
covariance_ximgt = self.__create_matrix_arbitrary(cov[6],True,False, 'mm', 'gm', summary)
cov_diag.append(covariance_ximxim)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_xipgt, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_ximgt, np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix_arbitrary(cov[8],True,True, 'mm', 'mm', summary)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_xipgt, covariance_xipxip, covariance_xipxim],
[covariance_ximgt, covariance_xipxim.T, covariance_ximxim]])
elif xipp:
covariance_xipxip = self.__create_matrix_arbitrary(cov[7],True,True,'mm', 'mm', summary)
cov2d = covariance_xipxip
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix_arbitrary(cov[9],True,True, 'mm', 'mm', summary)
cov2d = np.block([[covariance_xipxip, np.zeros_like(covariance_ximxim)],
[np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
cov_diag.append(covariance_ximxim)
if xipm:
covariance_xipxim = self.__create_matrix_arbitrary(cov[8],True,True, 'mm', 'mm', summary)
cov2d = np.block([[covariance_xipxip, covariance_xipxim],
[covariance_xipxim.T, covariance_ximxim]])
elif ximm:
covariance_ximxim = self.__create_matrix_arbitrary(cov[9],True,True, 'mm', 'mm', summary)
cov2d = covariance_xipxip
cov_diag.append(covariance_ximxim)
elif xipm:
covariance_xipxim = self.__create_matrix_arbitrary(cov[8],True,True, 'mm', 'mm', summary)
cov2d = covariance_xipxim
cov2d_gauss = np.copy(cov2d)
cov = [ssc[idx] for idx in range(obslength)]
cov_diag = []
if gg:
covariance_ww = self.__create_matrix_arbitrary(cov[0],True,True,'gg','gg',summary)
cov2d = covariance_ww
cov_diag.append(covariance_ww)
if gm:
covariance_gtgt = self.__create_matrix_arbitrary(cov[4],False,False,'gm', 'gm', summary)
cov_diag.append(covariance_gtgt)
covariance_wgt = self.__create_matrix_arbitrary(cov[1],True,False,'gg', 'gm', summary)
cov2d = np.block([[covariance_ww, covariance_wgt],
[covariance_wgt.T, covariance_gtgt]])
if xipp:
covariance_xipxip = self.__create_matrix_arbitrary(cov[7],True,True,'mm', 'mm', summary)
covariance_wxip = self.__create_matrix_arbitrary(cov[2],True,True,'gg', 'mm', summary)
covariance_xipgt = self.__create_matrix_arbitrary(cov[5],True,False, 'mm', 'gm', summary)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix_arbitrary(cov[9],True,True, 'mm', 'mm', summary)
cov_diag.append(covariance_ximxim)
covariance_wxim = self.__create_matrix_arbitrary(cov[3],True,True, 'gg', 'mm', summary)
covariance_ximgt = self.__create_matrix_arbitrary(cov[6],True,False, 'mm', 'gm', summary)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip, covariance_wxim],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_wxim.T, covariance_ximgt, np.zeros_like(covariance_ximxim), covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix_arbitrary(cov[8],True,True, 'mm', 'mm', summary)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip, covariance_wxim],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip, covariance_xipxim],
[covariance_wxim.T, covariance_ximgt, covariance_xipxim.T, covariance_ximxim]])
elif xipp:
covariance_xipxip = self.__create_matrix_arbitrary(cov[7],True,True ,'mm', 'mm', summary)
covariance_wxip = self.__create_matrix_arbitrary(cov[2],True,True, 'gg', 'mm', summary)
cov2d = np.block([[covariance_ww, covariance_wxip],
[covariance_wxip.T, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix_arbitrary(cov[9],True,True, 'mm', 'mm', summary)
covariance_wxim = self.__create_matrix_arbitrary(cov[3],True,True, 'gg', 'mm', summary)
cov_diag.append(covariance_ximxim)
cov2d = np.block([[covariance_ww, covariance_wxip,covariance_wxim],
[covariance_wxip.T, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_wxim.T, np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix_arbitrary(cov[8],True,True, 'mm', 'mm', summary)
cov2d = np.block([[covariance_ww, covariance_wxip,covariance_wxim],
[covariance_wxip.T, covariance_xipxip, covariance_xipxim],
[covariance_wxim.T, covariance_xipxim.T, covariance_ximxim]])
elif gm:
covariance_gtgt = self.__create_matrix_arbitrary(cov[4],False,False, 'gm', 'gm', summary)
cov2d = covariance_gtgt
cov_diag.append(covariance_gtgt)
if xipp:
covariance_xipxip = self.__create_matrix_arbitrary(cov[7],True,True, 'mm', 'mm', summary)
covariance_xipgt = self.__create_matrix_arbitrary(cov[5],True,False, 'mm', 'gm', summary)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T],
[covariance_xipgt, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix_arbitrary(cov[9],True,True, 'mm', 'mm', summary)
covariance_ximgt = self.__create_matrix_arbitrary(cov[6],True,False, 'mm', 'gm', summary)
cov_diag.append(covariance_ximxim)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_xipgt, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_ximgt, np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix_arbitrary(cov[8],True,True, 'mm', 'mm', summary)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_xipgt, covariance_xipxip, covariance_xipxim],
[covariance_ximgt, covariance_xipxim.T, covariance_ximxim]])
elif xipp:
covariance_xipxip = self.__create_matrix_arbitrary(cov[7],True,True,'mm', 'mm', summary)
cov2d = covariance_xipxip
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix_arbitrary(cov[9],True,True, 'mm', 'mm', summary)
cov2d = np.block([[covariance_xipxip, np.zeros_like(covariance_ximxim)],
[np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
cov_diag.append(covariance_ximxim)
if xipm:
covariance_xipxim = self.__create_matrix_arbitrary(cov[8],True,True, 'mm', 'mm', summary)
cov2d = np.block([[covariance_xipxip, covariance_xipxim],
[covariance_xipxim.T, covariance_ximxim]])
elif ximm:
covariance_ximxim = self.__create_matrix_arbitrary(cov[9],True,True, 'mm', 'mm', summary)
cov2d = covariance_xipxip
cov_diag.append(covariance_ximxim)
elif xipm:
covariance_xipxim = self.__create_matrix_arbitrary(cov[8],True,True, 'mm', 'mm', summary)
cov2d = covariance_xipxim
cov2d_ssc = np.copy(cov2d)
cov = [nongauss[idx] for idx in range(obslength)]
cov_diag = []
if gg:
covariance_ww = self.__create_matrix_arbitrary(cov[0],True,True,'gg','gg',summary)
cov2d = covariance_ww
cov_diag.append(covariance_ww)
if gm:
covariance_gtgt = self.__create_matrix_arbitrary(cov[4],False,False,'gm', 'gm', summary)
cov_diag.append(covariance_gtgt)
covariance_wgt = self.__create_matrix_arbitrary(cov[1],True,False,'gg', 'gm', summary)
cov2d = np.block([[covariance_ww, covariance_wgt],
[covariance_wgt.T, covariance_gtgt]])
if xipp:
covariance_xipxip = self.__create_matrix_arbitrary(cov[7],True,True,'mm', 'mm', summary)
covariance_wxip = self.__create_matrix_arbitrary(cov[2],True,True,'gg', 'mm', summary)
covariance_xipgt = self.__create_matrix_arbitrary(cov[5],True,False, 'mm', 'gm', summary)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix_arbitrary(cov[9],True,True, 'mm', 'mm', summary)
cov_diag.append(covariance_ximxim)
covariance_wxim = self.__create_matrix_arbitrary(cov[3],True,True, 'gg', 'mm', summary)
covariance_ximgt = self.__create_matrix_arbitrary(cov[6],True,False, 'mm', 'gm', summary)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip, covariance_wxim],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_wxim.T, covariance_ximgt, np.zeros_like(covariance_ximxim), covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix_arbitrary(cov[8],True,True, 'mm', 'mm', summary)
cov2d = np.block([[covariance_ww, covariance_wgt, covariance_wxip, covariance_wxim],
[covariance_wgt.T, covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_wxip.T, covariance_xipgt, covariance_xipxip, covariance_xipxim],
[covariance_wxim.T, covariance_ximgt, covariance_xipxim.T, covariance_ximxim]])
elif xipp:
covariance_xipxip = self.__create_matrix_arbitrary(cov[7],True,True ,'mm', 'mm', summary)
covariance_wxip = self.__create_matrix_arbitrary(cov[2],True,True, 'gg', 'mm', summary)
cov2d = np.block([[covariance_ww, covariance_wxip],
[covariance_wxip.T, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix_arbitrary(cov[9],True,True, 'mm', 'mm', summary)
covariance_wxim = self.__create_matrix_arbitrary(cov[3],True,True, 'gg', 'mm', summary)
cov_diag.append(covariance_ximxim)
cov2d = np.block([[covariance_ww, covariance_wxip,covariance_wxim],
[covariance_wxip.T, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_wxim.T, np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix_arbitrary(cov[8],True,True, 'mm', 'mm', summary)
cov2d = np.block([[covariance_ww, covariance_wxip,covariance_wxim],
[covariance_wxip.T, covariance_xipxip, covariance_xipxim],
[covariance_wxim.T, covariance_xipxim.T, covariance_ximxim]])
elif gm:
covariance_gtgt = self.__create_matrix_arbitrary(cov[4],False,False, 'gm', 'gm', summary)
cov2d = covariance_gtgt
cov_diag.append(covariance_gtgt)
if xipp:
covariance_xipxip = self.__create_matrix_arbitrary(cov[7],True,True, 'mm', 'mm', summary)
covariance_xipgt = self.__create_matrix_arbitrary(cov[5],True,False, 'mm', 'gm', summary)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T],
[covariance_xipgt, covariance_xipxip]])
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix_arbitrary(cov[9],True,True, 'mm', 'mm', summary)
covariance_ximgt = self.__create_matrix_arbitrary(cov[6],True,False, 'mm', 'gm', summary)
cov_diag.append(covariance_ximxim)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_xipgt, covariance_xipxip, np.zeros_like(covariance_ximxim)],
[covariance_ximgt, np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
if xipm:
covariance_xipxim = self.__create_matrix_arbitrary(cov[8],True,True, 'mm', 'mm', summary)
cov2d = np.block([[covariance_gtgt, covariance_xipgt.T, covariance_ximgt.T],
[covariance_xipgt, covariance_xipxip, covariance_xipxim],
[covariance_ximgt, covariance_xipxim.T, covariance_ximxim]])
elif xipp:
covariance_xipxip = self.__create_matrix_arbitrary(cov[7],True,True,'mm', 'mm', summary)
cov2d = covariance_xipxip
cov_diag.append(covariance_xipxip)
if ximm:
covariance_ximxim = self.__create_matrix_arbitrary(cov[9],True,True, 'mm', 'mm', summary)
cov2d = np.block([[covariance_xipxip, np.zeros_like(covariance_ximxim)],
[np.zeros_like(covariance_ximxim).T, covariance_ximxim]])
cov_diag.append(covariance_ximxim)
if xipm:
covariance_xipxim = self.__create_matrix_arbitrary(cov[8],True,True, 'mm', 'mm', summary)
cov2d = np.block([[covariance_xipxip, covariance_xipxim],
[covariance_xipxim.T, covariance_ximxim]])
elif ximm:
covariance_ximxim = self.__create_matrix_arbitrary(cov[9],True,True, 'mm', 'mm', summary)
cov2d = covariance_xipxip
cov_diag.append(covariance_ximxim)
elif xipm:
covariance_xipxim = self.__create_matrix_arbitrary(cov[8],True,True, 'mm', 'mm', summary)
cov2d = covariance_xipxim
cov2d_nongauss = np.copy(cov2d)
for i in range(len(cov2d[:,0])):
for j in range(len(cov2d[:,0])):
cov2d_total[j,i] = cov2d_total[i,j]
if cov_dict['split_gauss']:
cov2d_gauss[j,i] = cov2d_gauss[i,j]
cov2d_nongauss[j,i] = cov2d_nongauss[i,j]
cov2d_ssc[j,i] = cov2d_ssc[i,j]
if len(np.where(np.linalg.eig(cov2d_total)[0] < 0)[0]) > 0:
print("ALARM: The resulting covariance matrix has negative eigenvalues")
print("Try to adjust the accuracy settings in the config file:")
print("For configuration space covariance reduce theta_accuracy and increase integration_intervals, usually a factor of 2 is enough.")
print("For bandpower covariance reduce bandpower_accuracy.")
print("For COSEBI covariance reduce En_accuracy.")
if self.plot:
self.plot_corrcoeff_matrix_arbitrary(
obs_dict, cov2d_total, cov_diag, summary, n_tomo_clust,
n_tomo_lens, sampledim, self.plot ,fct_args)
if obs_dict['observables']['est_shear'] == 'bandpowers' and obs_dict['observables']['cosmic_shear'] == True:
obslist[7] = 'CE_mmCE_mm'
obslist[9] = 'CB_mmCB_mm'
if obs_dict['observables']['est_clust'] == 'bandpowers' and obs_dict['observables']['clustering'] == True:
obslist[0] = 'CE_ggCE_gg'
if obs_dict['observables']['est_ggl'] == 'bandpowers' and obs_dict['observables']['ggl'] == True:
obslist[4] = 'CE_gmCE_gm'
hdr_str = 'Covariance matrix with the diagonals in the order: '
hdr_str += obslist[0]+' ' if obsbool[0] else ''
hdr_str += obslist[4]+' ' if obsbool[4] else ''
hdr_str += obslist[7]+' ' if obsbool[7] else ''
hdr_str += obslist[9]+' ' if obsbool[9] else ''
hdr_str += 'with '
if n_tomo_clust is not None:
hdr_str += str(n_tomo_clust) + ' tomographic clustering bins and '
if n_tomo_lens is not None:
hdr_str += str(n_tomo_lens) + ' tomographic lensing bins and '
if obs_dict['observables']['clustering']:
hdr_str += str(len(summary['WL_gg'])) + ' elements per tomographic bin in gg, '
hdr_str += str(int(summary['arb_number_first_summary_gg'])) + ' spatial indices for probe 1 '
if summary['number_summary_gg'] > 1:
hdr_str += 'and ' + str(int( len(summary['WL_gg']) - summary['arb_number_first_summary_gg'])) + ' spatial indices for probe 2. '
if obs_dict['observables']['ggl']:
hdr_str += str(len(summary['WL_gm'])) + ' elements per tomographic bin in gm '
hdr_str += str(int(summary['arb_number_first_summary_gm'])) + ' spatial indices for probe 1 '
if summary['number_summary_gm'] > 1:
hdr_str += 'and ' + str(int( len(summary['WL_gm']) - summary['arb_number_first_summary_gm'])) + ' spatial indices for probe 2. '
if obs_dict['observables']['cosmic_shear']:
hdr_str += str(len(summary['WL_mmE'])) + ' elements per tomographic bin in mm'
hdr_str += str(int(summary['arb_number_first_summary_mm'])) + ' spatial indices for probe 1 '
if summary['number_summary_mm'] > 1:
hdr_str += 'and ' + str(int( len(summary['WL_mmE']) - summary['arb_number_first_summary_mm'])) + ' spatial indices for probe 2, both for E and B mode.'
if 'matrix' in self.style:
if not cov_dict['split_gauss']:
print("Writing matrix output file.")
fn = self.filename[self.style.index('matrix')]
if self.save_as_binary:
name, extension = os.path.splitext(fn)
np.save(name, cov2d_total)
else:
np.savetxt(fn, cov2d_total, fmt='%.6e', delimiter=' ',
newline='\n', header=hdr_str, comments='# ')
else:
print("Writing matrix output file.")
if self.save_as_binary:
fn = self.filename[self.style.index('matrix')]
name, extension = os.path.splitext(fn)
np.save(name, cov2d_total)
fn_gauss = name + "_gauss"
fn_nongauss = name + "_nongauss"
fn_ssc = name + "_SSC"
np.save(fn_gauss, cov2d_gauss)
if self.has_nongauss:
np.save(fn_nongauss, cov2d_nongauss)
if self.has_ssc:
np.save(fn_ssc, cov2d_ssc)
else:
fn = self.filename[self.style.index('matrix')]
np.savetxt(fn, cov2d_total, fmt='%.6e', delimiter=' ',
newline='\n', header=hdr_str, comments='# ')
name, extension = os.path.splitext(fn)
fn_gauss = name + "_gauss" + extension
fn_nongauss = name + "_nongauss" + extension
fn_ssc = name + "_SSC" + extension
np.savetxt(fn_gauss, cov2d_gauss, fmt='%.6e', delimiter=' ',
newline='\n', header=hdr_str, comments='# ')
if self.has_nongauss:
np.savetxt(fn_nongauss, cov2d_nongauss, fmt='%.6e', delimiter=' ',
newline='\n', header=hdr_str, comments='# ')
if self.has_ssc:
np.savetxt(fn_ssc, cov2d_ssc, fmt='%.6e', delimiter=' ',
newline='\n', header=hdr_str, comments='# ')
def __get_obslist(self,
obs_dict,
xipm = False):
if not xipm:
mm = obs_dict['observables']['cosmic_shear']
gm = obs_dict['observables']['ggl']
gg = obs_dict['observables']['clustering']
cross = obs_dict['observables']['cross_terms']
obslist = ['gggg', 'gggm', 'ggmm', 'gmgm', 'mmgm', 'mmmm']
obsbool = [gg, gg and gm and cross, gg and mm and cross, gm,
mm and gm and cross, mm]
else:
if obs_dict['observables']['cosmic_shear']:
xipp = obs_dict['THETAspace']['xi_pp']
xipm = obs_dict['THETAspace']['xi_pm']
ximm = obs_dict['THETAspace']['xi_mm']
if obs_dict['observables']['est_shear'] == "cosebi":
xipp = True
xipm = True
ximm = True
else:
xipp = False
xipm = False
ximm = False
gm = obs_dict['observables']['ggl']
gg = obs_dict['observables']['clustering']
cross = obs_dict['observables']['cross_terms']
obslist = ['gggg', 'gggm', 'ggxip', 'ggxim', 'gmgm', 'gmxip',
'gmxim', 'xipxip', 'xipxim', 'ximxim']
obsbool = [gg, gg and gm and cross, gg and xipp and cross,
gg and ximm and cross, gm, gm and xipp and cross,
gm and ximm and cross, xipp, xipm, ximm]
return obslist, obsbool, len(obslist)
def __none_to_zero(self,
gauss,
nongauss,
ssc):
for idx in range(len(gauss)):
if gauss[idx] is None:
gauss[idx] = 0
for idx in range(len(nongauss)):
if nongauss[idx] is None:
nongauss[idx] = 0
if ssc[idx] is None:
ssc[idx] = 0
return gauss, nongauss, ssc
def __check_for_empty_input(self,
out,
shape):
if out is None or type(out) is int:
out = np.zeros(shape)
return out
def __get_sampledim(self,
gauss,
nongauss,
ssc):
if self.has_gauss:
sampledim_save = 0
for idx in range(len(gauss)):
try:
sampledim = (gauss[idx].shape)[2]
if(sampledim < sampledim_save):
sampledim = sampledim_save
sampledim_save = (gauss[idx].shape)[2]
break
except (AttributeError,TypeError):
sampledim = -1
if self.has_nongauss:
sample_dim_save = 0
for idx in range(len(nongauss)):
try:
sampledim = (nongauss[idx].shape)[2]
if(sampledim < sampledim_save):
sampledim = sampledim_save
sampledim_save = (nongauss[idx].shape)[2]
break
except (AttributeError,TypeError):
sampledim = -1
if self.has_ssc:
sampledim_save = 0
for idx in range(len(ssc)):
try:
sampledim = (ssc[idx].shape)[2]
if(sampledim < sampledim_save):
sampledim = sampledim_save
sampledim_save = (ssc[idx].shape)[2]
break
except (AttributeError,TypeError):
sampledim = -1
if sampledim == -1:
raise Exception("InputError: Neither of the covariance terms " +
"seems to have any values. Please check.")
return sampledim
def __get_tomodim(self,
gauss,
nongauss,
ssc):
tomodim = [-1]
try:
tomodim = (gauss.shape)[4:]
except (AttributeError,TypeError):
...
try:
tomodim = (nongauss.shape)[4:]
except (AttributeError,TypeError):
...
try:
tomodim = (ssc.shape)[4:]
except (AttributeError,TypeError):
...
return list(tomodim)
def __get_idxlist(self,
proj_quant,
sampledim):
idxlist = []
for idxi, ri in enumerate(proj_quant):
for idxj in range(len(proj_quant)):
rj = proj_quant[idxj]
for s1 in range(sampledim):
for s2 in range(sampledim):
idxlist.append((idxi, ri, idxj, rj, s1, s2))
return idxlist
def project_to_2d_notomo(self,
covmatrix):
knum = covmatrix.shape[0]
sampledim = covmatrix.shape[2]
len2d = knum*sampledim
cov2d = np.zeros((len2d,len2d))
idx1, idx2 = 0, 0
for s1 in range(sampledim):
for s2 in range(sampledim):
cov2d[idx1:idx1+knum, idx2:idx2+knum] = \
covmatrix[:,:,s1, s2]
idx1 += knum
if idx1 == len(cov2d):
idx1 = 0
idx2 += knum
return cov2d
def project_to_2d(self,
mode,
covmatrix,
tomo1 = None,
tomo2 = None):
ellnum = covmatrix.shape[0]
sampledim = covmatrix.shape[2]
if tomo1 is None or tomo2 is None:
try:
tomo1, tomo2 = \
min(covmatrix.shape[4:]), max(covmatrix.shape[4:])
except ValueError:
return self.project_to_2d_notomo(covmatrix)
if mode in ['gggg', 'mmmm', 'xipxip', 'xipxim', 'ximxim']:
tomo1 = covmatrix.shape[4]
len2d = int(tomo1*(tomo1+1)/2)*ellnum
cov2d = np.zeros((len2d,len2d))
idx1, idx2 = 0, 0
for s1 in range(sampledim):
for s2 in range(s1, sampledim):
for t1 in range(tomo1):
for t3 in range(t1, tomo1):
for t2 in range(tomo1):
for t4 in range(t2, tomo1):
cov2d[idx1:idx1+ellnum, idx2:idx2+ellnum] = \
covmatrix[:,:,s1, s2, t1, t3, t2, t4]
idx2 += ellnum
if idx2 == len2d:
idx2 = 0
idx1 += ellnum
elif mode == 'gmgm':
tomo1 = covmatrix.shape[4]
tomo2 = covmatrix.shape[5]
len2d = tomo1*tomo2*ellnum
cov2d = np.zeros((len2d,len2d))
idx1, idx2 = 0, 0
for s1 in range(sampledim):
for s2 in range(s1, sampledim):
for t1 in range(tomo1):
for t2 in range(tomo2):
for t3 in range(tomo1):
for t4 in range(tomo2):
cov2d[idx1:idx1+ellnum, idx2:idx2+ellnum] = \
covmatrix[:,:,s1, s2, t1, t3, t2, t4]
idx2 += ellnum
if idx2 == len2d:
idx2 = 0
idx1 += ellnum
elif mode in ['gggm', 'mmgm', 'gmxip', 'gmxim']:
if mode in ['gmxip', 'gmxim']:
covmatrix = covmatrix.transpose(0,1,2,3,6,7,4,5)
tomo1 = covmatrix.shape[4]
tomo2 = covmatrix.shape[5]
tomo3 = covmatrix.shape[6]
tomo4 = covmatrix.shape[7]
if mode == 'gggm' or mode == 'mmgm':
len2d2 = tomo3*tomo4*ellnum
len2d1 = int(tomo1*(tomo2+1)/2)*ellnum
cov2d = np.zeros((len2d1,len2d2))
idx1, idx2 = 0, 0
for s1 in range(sampledim):
for s2 in range(s1, sampledim):
for t1 in range(tomo1):
for t3 in range(t1,tomo1):
for t2 in range(tomo3):
for t4 in range(tomo4):
cov2d[idx1:idx1+ellnum, idx2:idx2+ellnum] = \
covmatrix[:,:,s1, s2, t1, t3, t2, t4]
idx2 += ellnum
if idx2 == len2d2:
idx2 = 0
idx1 += ellnum
else:
len2d1 = tomo1*tomo2*ellnum
len2d2 = int(tomo3*(tomo4+1)/2)*ellnum
cov2d = np.zeros((len2d1,len2d2))
idx1, idx2 = 0, 0
for s1 in range(sampledim):
for s2 in range(s1, sampledim):
for t1 in range(tomo1):
for t3 in range(tomo1):
for t2 in range(tomo3):
for t4 in range(t2,tomo4):
cov2d[idx1:idx1+ellnum, idx2:idx2+ellnum] = \
covmatrix[:,:,s1, s2, t1, t3, t2, t4]
idx2 += ellnum
if idx2 == len2d2:
idx2 = 0
idx1 += ellnum
elif mode in ['ggmm', 'ggxip', 'ggxim']:
len2d1 = int(tomo1*(tomo1+1)/2)*ellnum
len2d2 = int(tomo2*(tomo2+1)/2)*ellnum
cov2d = np.zeros((len2d1,len2d2))
idx1, idx2 = 0, 0
for s1 in range(sampledim):
for s2 in range(s1, sampledim):
for t1 in range(tomo1):
for t3 in range(t1, tomo1):
for t2 in range(tomo2):
for t4 in range(t2, tomo2):
cov2d[idx1:idx1+ellnum, idx2:idx2+ellnum] = \
covmatrix[:,:,s1, s2, t1, t3, t2, t4]
idx2 += ellnum
if idx2 == len2d2:
idx2 = 0
idx1 += ellnum
else:
raise Exception("mode '" + mode + "' wrong, either gggg, gggm, " +
"ggmm, ggxip, ggxim, gmgm, mmgm, gmxip, gmxim, mmmm, " +
"xipxip, xipxim, ximxim")
return cov2d
def __mesh_2d_matrix_together(self,
covdiag,
covoff):
# covdiag = 'gggg', 'gmgm', 'mmmm' / ..., 'xi_pp', 'xi_mm'
# covoff = 'gggm', 'ggmm', 'mmgm'
# covoff = 'gggm', 'gg&xipp', 'gg&ximm', 'gm&xipp', 'gm&ximm', 'xipxim'
len2d = sum([len(covdiag[x]) for x in range(len(covdiag))])
mesh2d = np.zeros((len2d, len2d))
# block 11
idx1_s = 0
idx1_e = idx1_s + len(covdiag[0])
mesh2d[idx1_s:idx1_e,idx1_s:idx1_e] = covdiag[0]
# block 22
if len(covdiag) > 1:
idx1_s = len(covdiag[0])
idx1_e = idx1_s + len(covdiag[1])
mesh2d[idx1_s:idx1_e,idx1_s:idx1_e] = covdiag[1]
# block 12 and 21
if len(covoff) > 0:
idx2_s = 0
idx2_e = idx2_s + len(covdiag[0])
try:
mesh2d[idx1_s:idx1_e,idx2_s:idx2_e] = covoff[0].T
mesh2d[idx2_s:idx2_e,idx1_s:idx1_e] = covoff[0]
except ValueError:
mesh2d[idx1_s:idx1_e,idx2_s:idx2_e] = covoff[0]
mesh2d[idx2_s:idx2_e,idx1_s:idx1_e] = covoff[0].T
if len(covdiag) > 2:
# block 33
idx1_s = len(covdiag[0]) + len(covdiag[1])
idx1_e = idx1_s + len(covdiag[2])
mesh2d[idx1_s:idx1_e, idx1_s:idx1_e] = covdiag[2]
# block 13 and 31
if len(covoff) > 1:
idx2_s = 0
idx2_e = idx2_s + len(covdiag[0])
try:
mesh2d[idx1_s:idx1_e, idx2_s:idx2_e] = covoff[1].T
mesh2d[idx2_s:idx2_e, idx1_s:idx1_e] = covoff[1]
except ValueError:
mesh2d[idx1_s:idx1_e, idx2_s:idx2_e] = covoff[1]
mesh2d[idx2_s:idx2_e, idx1_s:idx1_e] = covoff[1].T
# block 23 and 32
if len(covoff) > 2:
idx2_s = len(covdiag[0])
idx2_e = idx2_s + len(covdiag[1])
add_idx = 0 if len(covoff) == 3 else 1
try:
mesh2d[idx1_s:idx1_e, idx2_s:idx2_e] = covoff[2+add_idx].T
mesh2d[idx2_s:idx2_e, idx1_s:idx1_e] = covoff[2+add_idx]
except ValueError:
mesh2d[idx1_s:idx1_e, idx2_s:idx2_e] = covoff[2+add_idx]
mesh2d[idx2_s:idx2_e, idx1_s:idx1_e] = covoff[2+add_idx].T
if len(covdiag) > 3:
# block 44
idx1_s = len(covdiag[0]) + len(covdiag[1]) + len(covdiag[2])
idx1_e = idx1_s + len(covdiag[3])
mesh2d[idx1_s:idx1_e, idx1_s:idx1_e] = covdiag[3]
# block 14 and 41
if len(covoff) > 2:
idx2_s = 0
idx2_e = idx2_s + len(covdiag[0])
try:
mesh2d[idx1_s:idx1_e, idx2_s:idx2_e] = covoff[2].T
mesh2d[idx2_s:idx2_e, idx1_s:idx1_e] = covoff[2]
except ValueError:
mesh2d[idx1_s:idx1_e, idx2_s:idx2_e] = covoff[2]
mesh2d[idx2_s:idx2_e, idx1_s:idx1_e] = covoff[2].T
# block 24 and 42
if len(covoff) > 4:
idx2_s = len(covdiag[0])
idx2_e = idx2_s + len(covdiag[1])
try:
mesh2d[idx1_s:idx1_e, idx2_s:idx2_e] = covoff[4].T
mesh2d[idx2_s:idx2_e, idx1_s:idx1_e] = covoff[4]
except ValueError:
mesh2d[idx1_s:idx1_e, idx2_s:idx2_e] = covoff[4]
mesh2d[idx2_s:idx2_e, idx1_s:idx1_e] = covoff[4].T
# block 34 and 43
if len(covoff) > 5:
idx2_s = len(covdiag[0]) + len(covdiag[1])
idx2_e = idx2_s + len(covdiag[2])
try:
mesh2d[idx1_s:idx1_e, idx2_s:idx2_e] = covoff[5].T
mesh2d[idx2_s:idx2_e, idx1_s:idx1_e] = covoff[5]
except ValueError:
mesh2d[idx1_s:idx1_e, idx2_s:idx2_e] = covoff[5]
mesh2d[idx2_s:idx2_e, idx1_s:idx1_e] = covoff[5].T
return mesh2d
def __correlation_matrix(self,
cov):
return cov / np.sqrt( np.diag(cov)[:,None] * np.diag(cov)[None,:] )
def write_Cells(self,
ellrange,
n_tomo_clust,
n_tomo_lens,
Cells):
Cell_gg, Cell_gm, Cell_mm = Cells
if Cell_gg is not None and type(Cell_gg) is not int:
sampledim = Cell_gg.shape[1]
ostr_format = '%.10e\t%i\t\t%i\t\t'
if sampledim == 1:
Cell_str = 'Cell_gg'
else:
Cell_str = ''
for sm in range(sampledim):
for sn in range(sampledim):
Cell_str += 'Cell_g_'+ str(sm+1) +'g_' + str(sn+1) + '\t\t\t\t'
olist_gg = []
olist_gg.append("#ell\t\ttomo_i\ttomo_j\t"+Cell_str)
for ellidx, ell in enumerate(ellrange):
for ti in range(n_tomo_clust):
for tj in range(n_tomo_clust):
ostr = ostr_format \
% (ell, ti+1, tj+1)
for i_sample in range(sampledim):
for j_sample in range(sampledim):
ostr += '%10e\t\t\t' % Cell_gg[ellidx, i_sample, j_sample, ti, tj]
olist_gg.append(ostr)
fname = self.__add_string_to_filename('gg', self.Cellfile)
with open(fname, 'w') as file:
print("Writing '" + fname + "'.")
for ostr in olist_gg:
file.write("%s\n" % ostr)
if Cell_gm is not None and type(Cell_gm) is not int:
sampledim = Cell_gm.shape[1]
ostr_format = '%.10e\t%i\t\t%i\t\t'
if sampledim == 1:
Cell_str = 'Cell_gkappa'
else:
Cell_str = ''
for sm in range(sampledim):
Cell_str += 'Cell_g_'+ str(sm+1) +'kappa_' + '\t\t\t'
olist_gm = []
olist_gm.append("#ell\t\ttomo_i\ttomo_j\t"+Cell_str)
for ellidx, ell in enumerate(ellrange):
for ti in range(n_tomo_clust):
for tj in range(n_tomo_lens):
ostr = ostr_format \
% (ell, ti+1, tj+1)
for i_sample in range(sampledim):
ostr += '%10e\t\t\t' % Cell_gm[ellidx, i_sample, ti, tj]
olist_gm.append(ostr)
fname = self.__add_string_to_filename('gkappa', self.Cellfile)
with open(fname, 'w') as file:
print("Writing '" + fname + "'.")
for ostr in olist_gm:
file.write("%s\n" % ostr)
if Cell_mm is not None and type(Cell_mm) is not int:
sampledim = Cell_mm.shape[1]
ostr_format = '%.10e\t%i\t\t%i\t\t'
Cell_str = 'Cell_kappakappa'
olist_mm = []
olist_mm.append("#ell\t\ttomo_i\ttomo_j\t"+Cell_str)
for ellidx, ell in enumerate(ellrange):
for ti in range(n_tomo_lens):
for tj in range(n_tomo_lens):
ostr = ostr_format \
% (ell, ti+1, tj+1)
ostr += '%10e\t\t\t' % Cell_mm[ellidx, 0, ti, tj]
olist_mm.append(ostr)
fname = self.__add_string_to_filename('kappakappa', self.Cellfile)
with open(fname, 'w') as file:
print("Writing '" + fname + "'.")
for ostr in olist_mm:
file.write("%s\n" % ostr)
return True
def write_trispectra(self,
zet,
krange,
sampledim,
trispec,
tri_bool):
for idx,tbool in enumerate(tri_bool):
if not tbool:
trispec[idx] = \
np.zeros((len(krange), len(krange), sampledim, sampledim))
idxlist = self.__get_idxlist_covk(krange, sampledim)
olist = []
olist.append("#log10ki\tlog10kj\t\ts1\ts2\t" +
"gggg\t\tgggm\t\tggmm\t\tgmgm\t\tmmgm\t\tmmmm")
for kdxi, ki, kdxj, kj, m, n in idxlist:
ostr = "%.4f\t\t%.4f\t\t%i\t%i\t%.4e\t%.4e\t%.4e\t%.4e\t%.4e\t%.4e" \
% (ki, kj, m, n,
trispec[0][kdxi, kdxj, m, n],
trispec[1][kdxi, kdxj, m, n],
trispec[2][kdxi, kdxj, m, n],
trispec[3][kdxi, kdxj, m, n],
trispec[4][kdxi, kdxj, m, n],
trispec[5][kdxi, kdxj, m, n])
olist.append(ostr)
fname = self.__add_string_to_filename(zet, self.trispecfile)
with open(fname, 'w') as file:
print("Writing '" + fname + "'.")
for ostr in olist:
file.write("%s\n" % ostr)
return True
|
rreischkeREPO_NAMEOneCovariancePATH_START.@OneCovariance_extracted@OneCovariance-main@onecov@cov_output.py@.PATH_END.py
|
{
"filename": "download_test_data.py",
"repo_name": "kbwestfall/NIRVANA",
"repo_path": "NIRVANA_extracted/NIRVANA-master/download_test_data.py",
"type": "Python"
}
|
from pathlib import Path
import os
import warnings
import tqdm
import requests
import netrc
from nirvana.util.download import download_file
from nirvana.tests.util import remote_data_file, remote_drp_test_files, remote_drp_test_images
from nirvana.tests.util import remote_dap_test_files
from nirvana.tests.util import drp_test_version, dap_test_version, dap_test_daptype
def main():
from IPython import embed
local_root = Path(remote_data_file()).resolve()
if not local_root.exists():
local_root.mkdir(parents=True)
dr = 'DR17'
url_root = f'https://data.sdss.org/sas/{dr.lower()}/manga/spectro'
# DRP files
drp_files = remote_drp_test_files()
drp_images = remote_drp_test_images()
plates = [f.split('-')[1] for f in drp_files]
for plate, fcube, fimg in zip(plates, drp_files, drp_images):
local_file = local_root / fcube
if local_file.exists():
warnings.warn(f'{local_file.name} exists. Skipping...')
else:
url = f'{url_root}/redux/{drp_test_version}/{plate}/stack/{fcube}'
download_file(url, local_file)
local_file = local_root / fimg
if local_file.exists():
warnings.warn(f'{local_file.name} exists. Skipping...')
else:
url = f'{url_root}/redux/{drp_test_version}/{plate}/images/{fimg}'
download_file(url, local_file)
# DAP files
dap_files = remote_dap_test_files(daptype=dap_test_daptype)
plates = [f.split('-')[1] for f in dap_files]
ifus = [f.split('-')[2] for f in dap_files]
for plate, ifu, f in zip(plates, ifus, dap_files):
local_file = local_root / f
if local_file.exists():
warnings.warn(f'{local_file.name} exists. Skipping...')
continue
url = f'{url_root}/analysis/{drp_test_version}/{dap_test_version}/{dap_test_daptype}' \
f'/{plate}/{ifu}/{f}'
download_file(url, local_file)
# DRPall file
local_file = local_root / f'drpall-{drp_test_version}.fits'
if local_file.exists():
warnings.warn(f'{local_file.name} exists. Skipping...')
else:
url = f'{url_root}/redux/{drp_test_version}/{local_file.name}'
download_file(url, local_file)
# DAPall file
local_file = local_root / f'dapall-{drp_test_version}-{dap_test_version}.fits'
if local_file.exists():
warnings.warn(f'{local_file.name} exists. Skipping...')
else:
url = f'{url_root}/analysis/{drp_test_version}/{dap_test_version}/{local_file.name}'
download_file(url, local_file)
if __name__ == '__main__':
main()
|
kbwestfallREPO_NAMENIRVANAPATH_START.@NIRVANA_extracted@NIRVANA-master@download_test_data.py@.PATH_END.py
|
{
"filename": "connect.py",
"repo_name": "mhammond/pywin32",
"repo_path": "pywin32_extracted/pywin32-main/com/win32com/client/connect.py",
"type": "Python"
}
|
"""Utilities for working with Connections"""
import pythoncom
import win32com.server.policy
import win32com.server.util
class SimpleConnection:
"A simple, single connection object"
def __init__(self, coInstance=None, eventInstance=None, eventCLSID=None, debug=0):
self.cp = None
self.cookie = None
self.debug = debug
if not coInstance is None:
self.Connect(coInstance, eventInstance, eventCLSID)
def __del__(self):
try:
self.Disconnect()
except pythoncom.error:
# Ignore disconnection as we are torn down.
pass
def _wrap(self, obj):
useDispatcher = (
win32com.server.policy.DispatcherWin32trace if self.debug else None
)
return win32com.server.util.wrap(obj, useDispatcher=useDispatcher)
def Connect(self, coInstance, eventInstance, eventCLSID=None):
try:
oleobj = coInstance._oleobj_
except AttributeError:
oleobj = coInstance
cpc = oleobj.QueryInterface(pythoncom.IID_IConnectionPointContainer)
if eventCLSID is None:
eventCLSID = eventInstance.CLSID
comEventInstance = self._wrap(eventInstance)
self.cp = cpc.FindConnectionPoint(eventCLSID)
self.cookie = self.cp.Advise(comEventInstance)
def Disconnect(self):
if not self.cp is None:
if self.cookie:
self.cp.Unadvise(self.cookie)
self.cookie = None
self.cp = None
|
mhammondREPO_NAMEpywin32PATH_START.@pywin32_extracted@pywin32-main@com@win32com@client@connect.py@.PATH_END.py
|
{
"filename": "generate_python_docs.py",
"repo_name": "hannorein/REBOUND",
"repo_path": "REBOUND_extracted/REBOUND-main/docs/generate_python_docs.py",
"type": "Python"
}
|
import rebound
import inspect
import docstring_to_markdown
def convert_code_blocks(doc):
new_doc = ""
lines = doc.split("\n")
first = True
for line in lines:
if first:
if line[:3]==">>>":
first = False
new_doc += "```python\n"
new_doc += line[3:]+"\n"
else:
new_doc += line+"\n"
else:
if line[:3]==">>>":
new_doc += line[3:]+"\n"
else:
new_doc += "```\n"
new_doc += line+"\n"
first = True
if first==False:
new_doc += "```\n"
return new_doc
def render_class(cls, functions=None):
d = "## Class `"+cls+"`\n"
d += convert_code_blocks(inspect.cleandoc(eval(cls).__doc__))
for function in functions:
f = getattr(eval(cls),function)
d += "## Function `"+cls+"."+function+"`\n"
d += convert_code_blocks(inspect.cleandoc(f.__doc__))
return d
print(render_class("rebound.Simulation",["copy"]))
|
hannoreinREPO_NAMEREBOUNDPATH_START.@REBOUND_extracted@REBOUND-main@docs@generate_python_docs.py@.PATH_END.py
|
{
"filename": "test_config.py",
"repo_name": "realfastvla/realfast",
"repo_path": "realfast_extracted/realfast-main/tests/test_config.py",
"type": "Python"
}
|
import rfpipe
import pytest
import evla_mcast
import os.path
import os
_install_dir = os.path.abspath(os.path.dirname(__file__))
@pytest.fixture(scope="module")
def config():
config = evla_mcast.scan_config.ScanConfig(vci=os.path.join(_install_dir, 'data/vci.xml'),
obs=os.path.join(_install_dir, 'data/obs.xml'),
ant=os.path.join(_install_dir, 'data/antprop.xml'),
requires=['ant', 'vci', 'obs'])
config.stopTime = config.startTime+100/(24*3600.)
return config
@pytest.fixture(scope="module")
def config2():
config = evla_mcast.scan_config.ScanConfig(vci=os.path.join(_install_dir, 'data/vci2.xml'),
obs=os.path.join(_install_dir, 'data/obs.xml'),
ant=os.path.join(_install_dir, 'data/antprop.xml'),
requires=['ant', 'vci', 'obs'])
config.stopTime = config.startTime+100/(24*3600.)
return config
@pytest.fixture(scope="module", params=[{'npix_max': 128},
{'memory_limit': 1., 'maxdm': 100},
{'maxdm': 100}])
def inprefs(request):
return request.param
def test_configstate(config, inprefs):
st = rfpipe.state.State(config=config, inprefs=inprefs, preffile=None)
assert st.nints
assert st.metadata.nints
assert st.metadata.endtime_mjd
assert len(st.segmenttimes)
def test_configstate2(config2, inprefs):
st = rfpipe.state.State(config=config2, inprefs=inprefs, preffile=None)
assert st.nints
assert st.metadata.nints
assert st.metadata.endtime_mjd
assert len(st.segmenttimes)
def test_metastate(config, inprefs):
meta = rfpipe.metadata.config_metadata(config, datasource='sim')
st = rfpipe.state.State(inmeta=meta, inprefs=inprefs, preffile=None)
assert st.nints
assert st.metadata.nints
assert st.metadata.endtime_mjd
assert len(st.segmenttimes)
def test_sim(config, inprefs):
meta = rfpipe.metadata.config_metadata(config, datasource='sim')
st = rfpipe.state.State(inmeta=meta, inprefs=inprefs,
preffile=os.path.join(_install_dir,
'data/realfast.yml'))
segment = 0
data = rfpipe.source.read_segment(st, segment)
assert data.shape == st.datashape_orig
|
realfastvlaREPO_NAMErealfastPATH_START.@realfast_extracted@realfast-main@tests@test_config.py@.PATH_END.py
|
{
"filename": "_range.py",
"repo_name": "plotly/plotly.py",
"repo_path": "plotly.py_extracted/plotly.py-master/packages/python/plotly/plotly/validators/layout/xaxis/rangeslider/_range.py",
"type": "Python"
}
|
import _plotly_utils.basevalidators
class RangeValidator(_plotly_utils.basevalidators.InfoArrayValidator):
def __init__(
self, plotly_name="range", parent_name="layout.xaxis.rangeslider", **kwargs
):
super(RangeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
implied_edits=kwargs.pop("implied_edits", {"autorange": False}),
items=kwargs.pop(
"items",
[
{
"editType": "calc",
"impliedEdits": {"^autorange": False},
"valType": "any",
},
{
"editType": "calc",
"impliedEdits": {"^autorange": False},
"valType": "any",
},
],
),
**kwargs,
)
|
plotlyREPO_NAMEplotly.pyPATH_START.@plotly.py_extracted@plotly.py-master@packages@python@plotly@plotly@validators@layout@xaxis@rangeslider@_range.py@.PATH_END.py
|
{
"filename": "reid14_rotcurve.py",
"repo_name": "tvwenger/kd",
"repo_path": "kd_extracted/kd-master/kd/reid14_rotcurve.py",
"type": "Python"
}
|
#!/usr/bin/env python
"""
reid14_rotcurve.py
Utilities involving the Universal Rotation Curve (Persic+1996) from
Reid+2014.
Copyright(C) 2017-2021 by
Trey V. Wenger; tvwenger@gmail.com
GNU General Public License v3 (GNU GPLv3)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation, either version 3 of the License,
or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
2017-04-12 Trey V. Wenger
2020-02-19 Trey V. Wenger updates for v2.0
"""
import numpy as np
from kd import kd_utils
#
# Reid+2014 rotation curve parameters and uncertainties
#
__a1 = 241.0 # km/s V(R_opt)
__a1_err = 8.0
__a2 = 0.90 # R_opt/ R0
__a2_err = 0.06
__a3 = 1.46 # 1.5*(L/L*)^0.2
__a3_err = 0.16
__R0 = 8.34 # kpc
__R0_err = 0.16
def nominal_params():
"""
Return a dictionary containing the nominal rotation curve
parameters.
Parameters: Nothing
Returns: params
params :: dictionary
params['a1'], etc. : scalar
The nominal rotation curve parameter
"""
params = {"a1": __a1, "a2": __a2, "a3": __a3, "R0": __R0}
return params
def resample_params(size=None):
"""
Resample the Reid+2014 rotation curve parameters within their
uncertainties assuming Gaussian probabilities.
Parameters:
size :: integer
The number of random samples to generate. If None, generate
only one sample and return a scalar.
Returns: params
params :: dictionary
params['a1'], etc. : scalar or array of scalars
The re-sampled parameters
"""
params = {
"a1": np.random.normal(loc=__a1, scale=__a1_err, size=size),
"a2": np.random.normal(loc=__a2, scale=__a2_err, size=size),
"a3": np.random.normal(loc=__a3, scale=__a3_err, size=size),
"R0": np.random.normal(loc=__R0, scale=__R0_err, size=size),
}
return params
def calc_theta(R, a1=__a1, a2=__a2, a3=__a3, R0=__R0):
"""
Return circular orbit speed at a given Galactocentric radius.
Parameters:
R :: scalar or array of scalars
Galactocentric radius (kpc)
a1, a2, a3 :: scalars (optional)
Reid+2014 rotation curve parameters
R0 :: scalar (optional)
Solar Galactocentric radius (kpc)
Returns: theta
theta :: scalar or array of scalars
circular orbit speed at R (km/s)
"""
input_scalar = np.isscalar(R)
R = np.atleast_1d(R)
#
# Equations 8, 9, 10, 11a, 11b in Persic+1996
#
x = R / (a2 * R0)
LLstar = (a3 / 1.5) ** 5.0
beta = 0.72 + 0.44 * np.log10(LLstar)
# Disk component Vd^2 / V(R_opt)^2
Vd2 = beta * 1.97 * x ** 1.22 / (x ** 2.0 + 0.78 ** 2.0) ** 1.43
# Halo component Vh^2 / V(R_opt)^2
Vh2 = (1.0 - beta) * (1.0 + a3 ** 2.0) * x ** 2.0 / (x ** 2.0 + a3 ** 2.0)
#
# Catch non-physical case where Vd2 + Vh2 < 0
#
Vtot = Vd2 + Vh2
Vtot[Vtot < 0.0] = np.nan
#
# Circular velocity
#
theta = a1 * np.sqrt(Vtot)
if input_scalar:
return theta[0]
return theta
def calc_vlsr(glong, glat, dist, a1=__a1, a2=__a2, a3=__a3, R0=__R0):
"""
Return the LSR velocity at a given Galactic longitude and
line-of-sight distance.
Parameters:
glong, glat :: scalar or array of scalars
Galactic longitude and latitude (deg).
dist :: scalar or array of scalars
line-of-sight distance (kpc).
a1, a2, a3 :: scalars (optional)
Reid+2014 rotation curve parameters
R0 :: scalar (optional)
Solar Galactocentric radius (kpc)
Returns: vlsr
vlsr :: scalar or array of scalars
LSR velocity (km/s).
"""
input_scalar = np.isscalar(glong) and np.isscalar(glat) and np.isscalar(dist)
glong, glat, dist = np.atleast_1d(glong, glat, dist)
#
# Convert distance to Galactocentric radius, catch small Rgal
#
Rgal = kd_utils.calc_Rgal(glong, glat, dist, R0=R0)
Rgal[Rgal < 1.0e-6] = 1.0e-6
#
# Rotation curve circular velocity
#
theta = calc_theta(Rgal, a1=a1, a2=a2, a3=a3, R0=R0)
theta0 = calc_theta(R0, a1=a1, a2=a2, a3=a3, R0=R0)
#
# Now take circular velocity and convert to LSR velocity
#
vlsr = R0 * np.sin(np.deg2rad(glong))
vlsr = vlsr * ((theta / Rgal) - (theta0 / R0))
if input_scalar:
return vlsr[0]
return vlsr
|
tvwengerREPO_NAMEkdPATH_START.@kd_extracted@kd-master@kd@reid14_rotcurve.py@.PATH_END.py
|
{
"filename": "recipes_FLAT_LS_SPECT.py",
"repo_name": "GeminiDRSoftware/DRAGONS",
"repo_path": "DRAGONS_extracted/DRAGONS-master/geminidr/gmos/recipes/ql/recipes_FLAT_LS_SPECT.py",
"type": "Python"
}
|
"""
Recipes available to data with tags ['GMOS', 'SPECT', 'LS', 'FLAT'].
These are GMOS longslit observations.
Default is "reduce".
"""
recipe_tags = {'GMOS', 'SPECT', 'LS', 'FLAT'}
from geminidr.gmos.recipes.sq.recipes_FLAT_LS_SPECT import (makeProcessedFlatStack,
makeProcessedFlatNoStack)
_default = makeProcessedFlatNoStack
|
GeminiDRSoftwareREPO_NAMEDRAGONSPATH_START.@DRAGONS_extracted@DRAGONS-master@geminidr@gmos@recipes@ql@recipes_FLAT_LS_SPECT.py@.PATH_END.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.